b43: replace the ssb_dma API with the generic DMA API
Signed-off-by: FUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp> Cc: Stefano Brivio <stefano.brivio@polimi.it> Cc: John W. Linville <linville@tuxdriver.com> Acked-by: Michael Buesch <mb@bu3sch.de> Acked-by: David S. Miller <davem@davemloft.net> Acked-by: Larry Finger <Larry.Finger@lwfinger.net> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: John W. Linville <linville@tuxdriver.com>
This commit is contained in:
committed by
John W. Linville
parent
4e8031328b
commit
718e8898af
@@ -333,11 +333,11 @@ static inline
|
|||||||
dma_addr_t dmaaddr;
|
dma_addr_t dmaaddr;
|
||||||
|
|
||||||
if (tx) {
|
if (tx) {
|
||||||
dmaaddr = ssb_dma_map_single(ring->dev->dev,
|
dmaaddr = dma_map_single(ring->dev->dev->dma_dev,
|
||||||
buf, len, DMA_TO_DEVICE);
|
buf, len, DMA_TO_DEVICE);
|
||||||
} else {
|
} else {
|
||||||
dmaaddr = ssb_dma_map_single(ring->dev->dev,
|
dmaaddr = dma_map_single(ring->dev->dev->dma_dev,
|
||||||
buf, len, DMA_FROM_DEVICE);
|
buf, len, DMA_FROM_DEVICE);
|
||||||
}
|
}
|
||||||
|
|
||||||
return dmaaddr;
|
return dmaaddr;
|
||||||
@@ -348,11 +348,11 @@ static inline
|
|||||||
dma_addr_t addr, size_t len, int tx)
|
dma_addr_t addr, size_t len, int tx)
|
||||||
{
|
{
|
||||||
if (tx) {
|
if (tx) {
|
||||||
ssb_dma_unmap_single(ring->dev->dev,
|
dma_unmap_single(ring->dev->dev->dma_dev,
|
||||||
addr, len, DMA_TO_DEVICE);
|
addr, len, DMA_TO_DEVICE);
|
||||||
} else {
|
} else {
|
||||||
ssb_dma_unmap_single(ring->dev->dev,
|
dma_unmap_single(ring->dev->dev->dma_dev,
|
||||||
addr, len, DMA_FROM_DEVICE);
|
addr, len, DMA_FROM_DEVICE);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -361,7 +361,7 @@ static inline
|
|||||||
dma_addr_t addr, size_t len)
|
dma_addr_t addr, size_t len)
|
||||||
{
|
{
|
||||||
B43_WARN_ON(ring->tx);
|
B43_WARN_ON(ring->tx);
|
||||||
ssb_dma_sync_single_for_cpu(ring->dev->dev,
|
dma_sync_single_for_cpu(ring->dev->dev->dma_dev,
|
||||||
addr, len, DMA_FROM_DEVICE);
|
addr, len, DMA_FROM_DEVICE);
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -370,8 +370,8 @@ static inline
|
|||||||
dma_addr_t addr, size_t len)
|
dma_addr_t addr, size_t len)
|
||||||
{
|
{
|
||||||
B43_WARN_ON(ring->tx);
|
B43_WARN_ON(ring->tx);
|
||||||
ssb_dma_sync_single_for_device(ring->dev->dev,
|
dma_sync_single_for_device(ring->dev->dev->dma_dev,
|
||||||
addr, len, DMA_FROM_DEVICE);
|
addr, len, DMA_FROM_DEVICE);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline
|
static inline
|
||||||
@@ -401,9 +401,9 @@ static int alloc_ringmemory(struct b43_dmaring *ring)
|
|||||||
*/
|
*/
|
||||||
if (ring->type == B43_DMA_64BIT)
|
if (ring->type == B43_DMA_64BIT)
|
||||||
flags |= GFP_DMA;
|
flags |= GFP_DMA;
|
||||||
ring->descbase = ssb_dma_alloc_consistent(ring->dev->dev,
|
ring->descbase = dma_alloc_coherent(ring->dev->dev->dma_dev,
|
||||||
B43_DMA_RINGMEMSIZE,
|
B43_DMA_RINGMEMSIZE,
|
||||||
&(ring->dmabase), flags);
|
&(ring->dmabase), flags);
|
||||||
if (!ring->descbase) {
|
if (!ring->descbase) {
|
||||||
b43err(ring->dev->wl, "DMA ringmemory allocation failed\n");
|
b43err(ring->dev->wl, "DMA ringmemory allocation failed\n");
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
@@ -420,8 +420,8 @@ static void free_ringmemory(struct b43_dmaring *ring)
|
|||||||
if (ring->type == B43_DMA_64BIT)
|
if (ring->type == B43_DMA_64BIT)
|
||||||
flags |= GFP_DMA;
|
flags |= GFP_DMA;
|
||||||
|
|
||||||
ssb_dma_free_consistent(ring->dev->dev, B43_DMA_RINGMEMSIZE,
|
dma_free_coherent(ring->dev->dev->dma_dev, B43_DMA_RINGMEMSIZE,
|
||||||
ring->descbase, ring->dmabase, flags);
|
ring->descbase, ring->dmabase);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Reset the RX DMA channel */
|
/* Reset the RX DMA channel */
|
||||||
@@ -528,7 +528,7 @@ static bool b43_dma_mapping_error(struct b43_dmaring *ring,
|
|||||||
dma_addr_t addr,
|
dma_addr_t addr,
|
||||||
size_t buffersize, bool dma_to_device)
|
size_t buffersize, bool dma_to_device)
|
||||||
{
|
{
|
||||||
if (unlikely(ssb_dma_mapping_error(ring->dev->dev, addr)))
|
if (unlikely(dma_mapping_error(ring->dev->dev->dma_dev, addr)))
|
||||||
return 1;
|
return 1;
|
||||||
|
|
||||||
switch (ring->type) {
|
switch (ring->type) {
|
||||||
@@ -874,10 +874,10 @@ struct b43_dmaring *b43_setup_dmaring(struct b43_wldev *dev,
|
|||||||
goto err_kfree_meta;
|
goto err_kfree_meta;
|
||||||
|
|
||||||
/* test for ability to dma to txhdr_cache */
|
/* test for ability to dma to txhdr_cache */
|
||||||
dma_test = ssb_dma_map_single(dev->dev,
|
dma_test = dma_map_single(dev->dev->dma_dev,
|
||||||
ring->txhdr_cache,
|
ring->txhdr_cache,
|
||||||
b43_txhdr_size(dev),
|
b43_txhdr_size(dev),
|
||||||
DMA_TO_DEVICE);
|
DMA_TO_DEVICE);
|
||||||
|
|
||||||
if (b43_dma_mapping_error(ring, dma_test,
|
if (b43_dma_mapping_error(ring, dma_test,
|
||||||
b43_txhdr_size(dev), 1)) {
|
b43_txhdr_size(dev), 1)) {
|
||||||
@@ -889,10 +889,10 @@ struct b43_dmaring *b43_setup_dmaring(struct b43_wldev *dev,
|
|||||||
if (!ring->txhdr_cache)
|
if (!ring->txhdr_cache)
|
||||||
goto err_kfree_meta;
|
goto err_kfree_meta;
|
||||||
|
|
||||||
dma_test = ssb_dma_map_single(dev->dev,
|
dma_test = dma_map_single(dev->dev->dma_dev,
|
||||||
ring->txhdr_cache,
|
ring->txhdr_cache,
|
||||||
b43_txhdr_size(dev),
|
b43_txhdr_size(dev),
|
||||||
DMA_TO_DEVICE);
|
DMA_TO_DEVICE);
|
||||||
|
|
||||||
if (b43_dma_mapping_error(ring, dma_test,
|
if (b43_dma_mapping_error(ring, dma_test,
|
||||||
b43_txhdr_size(dev), 1)) {
|
b43_txhdr_size(dev), 1)) {
|
||||||
@@ -903,9 +903,9 @@ struct b43_dmaring *b43_setup_dmaring(struct b43_wldev *dev,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
ssb_dma_unmap_single(dev->dev,
|
dma_unmap_single(dev->dev->dma_dev,
|
||||||
dma_test, b43_txhdr_size(dev),
|
dma_test, b43_txhdr_size(dev),
|
||||||
DMA_TO_DEVICE);
|
DMA_TO_DEVICE);
|
||||||
}
|
}
|
||||||
|
|
||||||
err = alloc_ringmemory(ring);
|
err = alloc_ringmemory(ring);
|
||||||
@@ -1018,9 +1018,12 @@ static int b43_dma_set_mask(struct b43_wldev *dev, u64 mask)
|
|||||||
/* Try to set the DMA mask. If it fails, try falling back to a
|
/* Try to set the DMA mask. If it fails, try falling back to a
|
||||||
* lower mask, as we can always also support a lower one. */
|
* lower mask, as we can always also support a lower one. */
|
||||||
while (1) {
|
while (1) {
|
||||||
err = ssb_dma_set_mask(dev->dev, mask);
|
err = dma_set_mask(dev->dev->dma_dev, mask);
|
||||||
if (!err)
|
if (!err) {
|
||||||
break;
|
err = dma_set_coherent_mask(dev->dev->dma_dev, mask);
|
||||||
|
if (!err)
|
||||||
|
break;
|
||||||
|
}
|
||||||
if (mask == DMA_BIT_MASK(64)) {
|
if (mask == DMA_BIT_MASK(64)) {
|
||||||
mask = DMA_BIT_MASK(32);
|
mask = DMA_BIT_MASK(32);
|
||||||
fallback = 1;
|
fallback = 1;
|
||||||
|
Reference in New Issue
Block a user