iwlagn: don't use the PCI wrappers for DMA operation
Get a pointer to the struct device during probe and get the rid of all the PCI specific DMA wrappers. Signed-off-by: Emmanuel Grumbach <emmanuel.grumbach@intel.com> Signed-off-by: Wey-Yi Guy <wey-yi.w.guy@intel.com>
This commit is contained in:
committed by
Wey-Yi Guy
parent
084dd79172
commit
795414db86
@@ -639,9 +639,9 @@ void iwlagn_rx_queue_reset(struct iwl_priv *priv, struct iwl_rx_queue *rxq)
|
|||||||
/* In the reset function, these buffers may have been allocated
|
/* In the reset function, these buffers may have been allocated
|
||||||
* to an SKB, so we need to unmap and free potential storage */
|
* to an SKB, so we need to unmap and free potential storage */
|
||||||
if (rxq->pool[i].page != NULL) {
|
if (rxq->pool[i].page != NULL) {
|
||||||
pci_unmap_page(priv->pci_dev, rxq->pool[i].page_dma,
|
dma_unmap_page(priv->bus.dev, rxq->pool[i].page_dma,
|
||||||
PAGE_SIZE << priv->hw_params.rx_page_order,
|
PAGE_SIZE << priv->hw_params.rx_page_order,
|
||||||
PCI_DMA_FROMDEVICE);
|
DMA_FROM_DEVICE);
|
||||||
__iwl_free_pages(priv, rxq->pool[i].page);
|
__iwl_free_pages(priv, rxq->pool[i].page);
|
||||||
rxq->pool[i].page = NULL;
|
rxq->pool[i].page = NULL;
|
||||||
}
|
}
|
||||||
@@ -913,9 +913,9 @@ void iwlagn_rx_allocate(struct iwl_priv *priv, gfp_t priority)
|
|||||||
BUG_ON(rxb->page);
|
BUG_ON(rxb->page);
|
||||||
rxb->page = page;
|
rxb->page = page;
|
||||||
/* Get physical address of the RB */
|
/* Get physical address of the RB */
|
||||||
rxb->page_dma = pci_map_page(priv->pci_dev, page, 0,
|
rxb->page_dma = dma_map_page(priv->bus.dev, page, 0,
|
||||||
PAGE_SIZE << priv->hw_params.rx_page_order,
|
PAGE_SIZE << priv->hw_params.rx_page_order,
|
||||||
PCI_DMA_FROMDEVICE);
|
DMA_FROM_DEVICE);
|
||||||
/* dma address must be no more than 36 bits */
|
/* dma address must be no more than 36 bits */
|
||||||
BUG_ON(rxb->page_dma & ~DMA_BIT_MASK(36));
|
BUG_ON(rxb->page_dma & ~DMA_BIT_MASK(36));
|
||||||
/* and also 256 byte aligned! */
|
/* and also 256 byte aligned! */
|
||||||
@@ -958,9 +958,9 @@ void iwlagn_rx_queue_free(struct iwl_priv *priv, struct iwl_rx_queue *rxq)
|
|||||||
int i;
|
int i;
|
||||||
for (i = 0; i < RX_QUEUE_SIZE + RX_FREE_BUFFERS; i++) {
|
for (i = 0; i < RX_QUEUE_SIZE + RX_FREE_BUFFERS; i++) {
|
||||||
if (rxq->pool[i].page != NULL) {
|
if (rxq->pool[i].page != NULL) {
|
||||||
pci_unmap_page(priv->pci_dev, rxq->pool[i].page_dma,
|
dma_unmap_page(priv->bus.dev, rxq->pool[i].page_dma,
|
||||||
PAGE_SIZE << priv->hw_params.rx_page_order,
|
PAGE_SIZE << priv->hw_params.rx_page_order,
|
||||||
PCI_DMA_FROMDEVICE);
|
DMA_FROM_DEVICE);
|
||||||
__iwl_free_pages(priv, rxq->pool[i].page);
|
__iwl_free_pages(priv, rxq->pool[i].page);
|
||||||
rxq->pool[i].page = NULL;
|
rxq->pool[i].page = NULL;
|
||||||
}
|
}
|
||||||
|
@@ -716,10 +716,10 @@ int iwlagn_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
|
|||||||
|
|
||||||
/* Physical address of this Tx command's header (not MAC header!),
|
/* Physical address of this Tx command's header (not MAC header!),
|
||||||
* within command buffer array. */
|
* within command buffer array. */
|
||||||
txcmd_phys = pci_map_single(priv->pci_dev,
|
txcmd_phys = dma_map_single(priv->bus.dev,
|
||||||
&out_cmd->hdr, firstlen,
|
&out_cmd->hdr, firstlen,
|
||||||
PCI_DMA_BIDIRECTIONAL);
|
DMA_BIDIRECTIONAL);
|
||||||
if (unlikely(pci_dma_mapping_error(priv->pci_dev, txcmd_phys)))
|
if (unlikely(dma_mapping_error(priv->bus.dev, txcmd_phys)))
|
||||||
goto drop_unlock_sta;
|
goto drop_unlock_sta;
|
||||||
dma_unmap_addr_set(out_meta, mapping, txcmd_phys);
|
dma_unmap_addr_set(out_meta, mapping, txcmd_phys);
|
||||||
dma_unmap_len_set(out_meta, len, firstlen);
|
dma_unmap_len_set(out_meta, len, firstlen);
|
||||||
@@ -735,13 +735,13 @@ int iwlagn_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
|
|||||||
* if any (802.11 null frames have no payload). */
|
* if any (802.11 null frames have no payload). */
|
||||||
secondlen = skb->len - hdr_len;
|
secondlen = skb->len - hdr_len;
|
||||||
if (secondlen > 0) {
|
if (secondlen > 0) {
|
||||||
phys_addr = pci_map_single(priv->pci_dev, skb->data + hdr_len,
|
phys_addr = dma_map_single(priv->bus.dev, skb->data + hdr_len,
|
||||||
secondlen, PCI_DMA_TODEVICE);
|
secondlen, DMA_TO_DEVICE);
|
||||||
if (unlikely(pci_dma_mapping_error(priv->pci_dev, phys_addr))) {
|
if (unlikely(dma_mapping_error(priv->bus.dev, phys_addr))) {
|
||||||
pci_unmap_single(priv->pci_dev,
|
dma_unmap_single(priv->bus.dev,
|
||||||
dma_unmap_addr(out_meta, mapping),
|
dma_unmap_addr(out_meta, mapping),
|
||||||
dma_unmap_len(out_meta, len),
|
dma_unmap_len(out_meta, len),
|
||||||
PCI_DMA_BIDIRECTIONAL);
|
DMA_BIDIRECTIONAL);
|
||||||
goto drop_unlock_sta;
|
goto drop_unlock_sta;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -764,8 +764,8 @@ int iwlagn_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
|
|||||||
offsetof(struct iwl_tx_cmd, scratch);
|
offsetof(struct iwl_tx_cmd, scratch);
|
||||||
|
|
||||||
/* take back ownership of DMA buffer to enable update */
|
/* take back ownership of DMA buffer to enable update */
|
||||||
pci_dma_sync_single_for_cpu(priv->pci_dev, txcmd_phys,
|
dma_sync_single_for_cpu(priv->bus.dev, txcmd_phys, firstlen,
|
||||||
firstlen, PCI_DMA_BIDIRECTIONAL);
|
DMA_BIDIRECTIONAL);
|
||||||
tx_cmd->dram_lsb_ptr = cpu_to_le32(scratch_phys);
|
tx_cmd->dram_lsb_ptr = cpu_to_le32(scratch_phys);
|
||||||
tx_cmd->dram_msb_ptr = iwl_get_dma_hi_addr(scratch_phys);
|
tx_cmd->dram_msb_ptr = iwl_get_dma_hi_addr(scratch_phys);
|
||||||
|
|
||||||
@@ -780,8 +780,8 @@ int iwlagn_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
|
|||||||
iwlagn_txq_update_byte_cnt_tbl(priv, txq,
|
iwlagn_txq_update_byte_cnt_tbl(priv, txq,
|
||||||
le16_to_cpu(tx_cmd->len));
|
le16_to_cpu(tx_cmd->len));
|
||||||
|
|
||||||
pci_dma_sync_single_for_device(priv->pci_dev, txcmd_phys,
|
dma_sync_single_for_device(priv->bus.dev, txcmd_phys, firstlen,
|
||||||
firstlen, PCI_DMA_BIDIRECTIONAL);
|
DMA_BIDIRECTIONAL);
|
||||||
|
|
||||||
trace_iwlwifi_dev_tx(priv,
|
trace_iwlwifi_dev_tx(priv,
|
||||||
&((struct iwl_tfd *)txq->tfds)[txq->q.write_ptr],
|
&((struct iwl_tfd *)txq->tfds)[txq->q.write_ptr],
|
||||||
@@ -848,8 +848,7 @@ static inline void iwlagn_free_dma_ptr(struct iwl_priv *priv,
|
|||||||
if (unlikely(!ptr->addr))
|
if (unlikely(!ptr->addr))
|
||||||
return;
|
return;
|
||||||
|
|
||||||
dma_free_coherent(priv->bus.dev,
|
dma_free_coherent(priv->bus.dev, ptr->size, ptr->addr, ptr->dma);
|
||||||
ptr->size, ptr->addr, ptr->dma);
|
|
||||||
memset(ptr, 0, sizeof(*ptr));
|
memset(ptr, 0, sizeof(*ptr));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@@ -495,9 +495,9 @@ static void iwl_rx_handle(struct iwl_priv *priv)
|
|||||||
|
|
||||||
rxq->queue[i] = NULL;
|
rxq->queue[i] = NULL;
|
||||||
|
|
||||||
pci_unmap_page(priv->pci_dev, rxb->page_dma,
|
dma_unmap_page(priv->bus.dev, rxb->page_dma,
|
||||||
PAGE_SIZE << priv->hw_params.rx_page_order,
|
PAGE_SIZE << priv->hw_params.rx_page_order,
|
||||||
PCI_DMA_FROMDEVICE);
|
DMA_FROM_DEVICE);
|
||||||
pkt = rxb_addr(rxb);
|
pkt = rxb_addr(rxb);
|
||||||
|
|
||||||
len = le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK;
|
len = le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK;
|
||||||
@@ -579,9 +579,9 @@ static void iwl_rx_handle(struct iwl_priv *priv)
|
|||||||
* rx_free list for reuse later. */
|
* rx_free list for reuse later. */
|
||||||
spin_lock_irqsave(&rxq->lock, flags);
|
spin_lock_irqsave(&rxq->lock, flags);
|
||||||
if (rxb->page != NULL) {
|
if (rxb->page != NULL) {
|
||||||
rxb->page_dma = pci_map_page(priv->pci_dev, rxb->page,
|
rxb->page_dma = dma_map_page(priv->bus.dev, rxb->page,
|
||||||
0, PAGE_SIZE << priv->hw_params.rx_page_order,
|
0, PAGE_SIZE << priv->hw_params.rx_page_order,
|
||||||
PCI_DMA_FROMDEVICE);
|
DMA_FROM_DEVICE);
|
||||||
list_add_tail(&rxb->list, &rxq->rx_free);
|
list_add_tail(&rxb->list, &rxq->rx_free);
|
||||||
rxq->free_count++;
|
rxq->free_count++;
|
||||||
} else
|
} else
|
||||||
|
@@ -128,7 +128,6 @@ static inline u8 iwl_tfd_get_num_tbs(struct iwl_tfd *tfd)
|
|||||||
static void iwlagn_unmap_tfd(struct iwl_priv *priv, struct iwl_cmd_meta *meta,
|
static void iwlagn_unmap_tfd(struct iwl_priv *priv, struct iwl_cmd_meta *meta,
|
||||||
struct iwl_tfd *tfd)
|
struct iwl_tfd *tfd)
|
||||||
{
|
{
|
||||||
struct pci_dev *dev = priv->pci_dev;
|
|
||||||
int i;
|
int i;
|
||||||
int num_tbs;
|
int num_tbs;
|
||||||
|
|
||||||
@@ -143,15 +142,15 @@ static void iwlagn_unmap_tfd(struct iwl_priv *priv, struct iwl_cmd_meta *meta,
|
|||||||
|
|
||||||
/* Unmap tx_cmd */
|
/* Unmap tx_cmd */
|
||||||
if (num_tbs)
|
if (num_tbs)
|
||||||
pci_unmap_single(dev,
|
dma_unmap_single(priv->bus.dev,
|
||||||
dma_unmap_addr(meta, mapping),
|
dma_unmap_addr(meta, mapping),
|
||||||
dma_unmap_len(meta, len),
|
dma_unmap_len(meta, len),
|
||||||
PCI_DMA_BIDIRECTIONAL);
|
DMA_BIDIRECTIONAL);
|
||||||
|
|
||||||
/* Unmap chunks, if any. */
|
/* Unmap chunks, if any. */
|
||||||
for (i = 1; i < num_tbs; i++)
|
for (i = 1; i < num_tbs; i++)
|
||||||
pci_unmap_single(dev, iwl_tfd_tb_get_addr(tfd, i),
|
dma_unmap_single(priv->bus.dev, iwl_tfd_tb_get_addr(tfd, i),
|
||||||
iwl_tfd_tb_get_len(tfd, i), PCI_DMA_TODEVICE);
|
iwl_tfd_tb_get_len(tfd, i), DMA_TO_DEVICE);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@@ -310,10 +309,10 @@ void iwl_cmd_queue_unmap(struct iwl_priv *priv)
|
|||||||
i = get_cmd_index(q, q->read_ptr);
|
i = get_cmd_index(q, q->read_ptr);
|
||||||
|
|
||||||
if (txq->meta[i].flags & CMD_MAPPED) {
|
if (txq->meta[i].flags & CMD_MAPPED) {
|
||||||
pci_unmap_single(priv->pci_dev,
|
dma_unmap_single(priv->bus.dev,
|
||||||
dma_unmap_addr(&txq->meta[i], mapping),
|
dma_unmap_addr(&txq->meta[i], mapping),
|
||||||
dma_unmap_len(&txq->meta[i], len),
|
dma_unmap_len(&txq->meta[i], len),
|
||||||
PCI_DMA_BIDIRECTIONAL);
|
DMA_BIDIRECTIONAL);
|
||||||
txq->meta[i].flags = 0;
|
txq->meta[i].flags = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -456,7 +455,7 @@ static int iwl_tx_queue_alloc(struct iwl_priv *priv,
|
|||||||
txq->tfds = dma_alloc_coherent(dev, tfd_sz, &txq->q.dma_addr,
|
txq->tfds = dma_alloc_coherent(dev, tfd_sz, &txq->q.dma_addr,
|
||||||
GFP_KERNEL);
|
GFP_KERNEL);
|
||||||
if (!txq->tfds) {
|
if (!txq->tfds) {
|
||||||
IWL_ERR(priv, "pci_alloc_consistent(%zd) failed\n", tfd_sz);
|
IWL_ERR(priv, "dma_alloc_coherent(%zd) failed\n", tfd_sz);
|
||||||
goto error;
|
goto error;
|
||||||
}
|
}
|
||||||
txq->q.id = id;
|
txq->q.id = id;
|
||||||
@@ -677,9 +676,9 @@ int iwl_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
|
|||||||
le16_to_cpu(out_cmd->hdr.sequence), cmd_size,
|
le16_to_cpu(out_cmd->hdr.sequence), cmd_size,
|
||||||
q->write_ptr, idx, priv->cmd_queue);
|
q->write_ptr, idx, priv->cmd_queue);
|
||||||
|
|
||||||
phys_addr = pci_map_single(priv->pci_dev, &out_cmd->hdr,
|
phys_addr = dma_map_single(priv->bus.dev, &out_cmd->hdr, copy_size,
|
||||||
copy_size, PCI_DMA_BIDIRECTIONAL);
|
DMA_BIDIRECTIONAL);
|
||||||
if (unlikely(pci_dma_mapping_error(priv->pci_dev, phys_addr))) {
|
if (unlikely(dma_mapping_error(priv->bus.dev, phys_addr))) {
|
||||||
idx = -ENOMEM;
|
idx = -ENOMEM;
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
@@ -699,9 +698,9 @@ int iwl_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
|
|||||||
continue;
|
continue;
|
||||||
if (!(cmd->dataflags[i] & IWL_HCMD_DFL_NOCOPY))
|
if (!(cmd->dataflags[i] & IWL_HCMD_DFL_NOCOPY))
|
||||||
continue;
|
continue;
|
||||||
phys_addr = pci_map_single(priv->pci_dev, (void *)cmd->data[i],
|
phys_addr = dma_map_single(priv->bus.dev, (void *)cmd->data[i],
|
||||||
cmd->len[i], PCI_DMA_TODEVICE);
|
cmd->len[i], DMA_TO_DEVICE);
|
||||||
if (pci_dma_mapping_error(priv->pci_dev, phys_addr)) {
|
if (dma_mapping_error(priv->bus.dev, phys_addr)) {
|
||||||
iwlagn_unmap_tfd(priv, out_meta,
|
iwlagn_unmap_tfd(priv, out_meta,
|
||||||
&txq->tfds[q->write_ptr]);
|
&txq->tfds[q->write_ptr]);
|
||||||
idx = -ENOMEM;
|
idx = -ENOMEM;
|
||||||
|
Reference in New Issue
Block a user