sparc: Make SBUS DMA interfaces take struct device.
This is the first step in converting all the SBUS drivers over to generic dma_*(). Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
@@ -109,8 +109,8 @@ extern void sbus_set_sbus64(struct sbus_dev *, int);
|
||||
extern void sbus_fill_device_irq(struct sbus_dev *);
|
||||
|
||||
/* These yield IOMMU mappings in consistent mode. */
|
||||
extern void *sbus_alloc_consistent(struct sbus_dev *, long, u32 *dma_addrp);
|
||||
extern void sbus_free_consistent(struct sbus_dev *, long, void *, u32);
|
||||
extern void *sbus_alloc_consistent(struct device *, long, u32 *dma_addrp);
|
||||
extern void sbus_free_consistent(struct device *, long, void *, u32);
|
||||
void prom_adjust_ranges(struct linux_prom_ranges *, int,
|
||||
struct linux_prom_ranges *, int);
|
||||
|
||||
@@ -120,18 +120,14 @@ void prom_adjust_ranges(struct linux_prom_ranges *, int,
|
||||
#define SBUS_DMA_NONE DMA_NONE
|
||||
|
||||
/* All the rest use streaming mode mappings. */
|
||||
extern dma_addr_t sbus_map_single(struct sbus_dev *, void *, size_t, int);
|
||||
extern void sbus_unmap_single(struct sbus_dev *, dma_addr_t, size_t, int);
|
||||
extern int sbus_map_sg(struct sbus_dev *, struct scatterlist *, int, int);
|
||||
extern void sbus_unmap_sg(struct sbus_dev *, struct scatterlist *, int, int);
|
||||
extern dma_addr_t sbus_map_single(struct device *, void *, size_t, int);
|
||||
extern void sbus_unmap_single(struct device *, dma_addr_t, size_t, int);
|
||||
extern int sbus_map_sg(struct device *, struct scatterlist *, int, int);
|
||||
extern void sbus_unmap_sg(struct device *, struct scatterlist *, int, int);
|
||||
|
||||
/* Finally, allow explicit synchronization of streamable mappings. */
|
||||
extern void sbus_dma_sync_single_for_cpu(struct sbus_dev *, dma_addr_t, size_t, int);
|
||||
#define sbus_dma_sync_single sbus_dma_sync_single_for_cpu
|
||||
extern void sbus_dma_sync_single_for_device(struct sbus_dev *, dma_addr_t, size_t, int);
|
||||
extern void sbus_dma_sync_sg_for_cpu(struct sbus_dev *, struct scatterlist *, int, int);
|
||||
#define sbus_dma_sync_sg sbus_dma_sync_sg_for_cpu
|
||||
extern void sbus_dma_sync_sg_for_device(struct sbus_dev *, struct scatterlist *, int, int);
|
||||
extern void sbus_dma_sync_single_for_cpu(struct device *, dma_addr_t, size_t, int);
|
||||
extern void sbus_dma_sync_single_for_device(struct device *, dma_addr_t, size_t, int);
|
||||
|
||||
/* Eric Brower (ebrower@usa.net)
|
||||
* Translate SBus interrupt levels to ino values--
|
||||
|
@@ -100,17 +100,16 @@ extern struct sbus_bus *sbus_root;
|
||||
extern void sbus_set_sbus64(struct sbus_dev *, int);
|
||||
extern void sbus_fill_device_irq(struct sbus_dev *);
|
||||
|
||||
static inline void *sbus_alloc_consistent(struct sbus_dev *sdev , size_t size,
|
||||
static inline void *sbus_alloc_consistent(struct device *dev , size_t size,
|
||||
dma_addr_t *dma_handle)
|
||||
{
|
||||
return dma_alloc_coherent(&sdev->ofdev.dev, size,
|
||||
dma_handle, GFP_ATOMIC);
|
||||
return dma_alloc_coherent(dev, size, dma_handle, GFP_ATOMIC);
|
||||
}
|
||||
|
||||
static inline void sbus_free_consistent(struct sbus_dev *sdev, size_t size,
|
||||
static inline void sbus_free_consistent(struct device *dev, size_t size,
|
||||
void *vaddr, dma_addr_t dma_handle)
|
||||
{
|
||||
return dma_free_coherent(&sdev->ofdev.dev, size, vaddr, dma_handle);
|
||||
return dma_free_coherent(dev, size, vaddr, dma_handle);
|
||||
}
|
||||
|
||||
#define SBUS_DMA_BIDIRECTIONAL DMA_BIDIRECTIONAL
|
||||
@@ -119,68 +118,51 @@ static inline void sbus_free_consistent(struct sbus_dev *sdev, size_t size,
|
||||
#define SBUS_DMA_NONE DMA_NONE
|
||||
|
||||
/* All the rest use streaming mode mappings. */
|
||||
static inline dma_addr_t sbus_map_single(struct sbus_dev *sdev, void *ptr,
|
||||
static inline dma_addr_t sbus_map_single(struct device *dev, void *ptr,
|
||||
size_t size, int direction)
|
||||
{
|
||||
return dma_map_single(&sdev->ofdev.dev, ptr, size,
|
||||
return dma_map_single(dev, ptr, size,
|
||||
(enum dma_data_direction) direction);
|
||||
}
|
||||
|
||||
static inline void sbus_unmap_single(struct sbus_dev *sdev,
|
||||
static inline void sbus_unmap_single(struct device *dev,
|
||||
dma_addr_t dma_addr, size_t size,
|
||||
int direction)
|
||||
{
|
||||
dma_unmap_single(&sdev->ofdev.dev, dma_addr, size,
|
||||
dma_unmap_single(dev, dma_addr, size,
|
||||
(enum dma_data_direction) direction);
|
||||
}
|
||||
|
||||
static inline int sbus_map_sg(struct sbus_dev *sdev, struct scatterlist *sg,
|
||||
static inline int sbus_map_sg(struct device *dev, struct scatterlist *sg,
|
||||
int nents, int direction)
|
||||
{
|
||||
return dma_map_sg(&sdev->ofdev.dev, sg, nents,
|
||||
return dma_map_sg(dev, sg, nents,
|
||||
(enum dma_data_direction) direction);
|
||||
}
|
||||
|
||||
static inline void sbus_unmap_sg(struct sbus_dev *sdev, struct scatterlist *sg,
|
||||
static inline void sbus_unmap_sg(struct device *dev, struct scatterlist *sg,
|
||||
int nents, int direction)
|
||||
{
|
||||
dma_unmap_sg(&sdev->ofdev.dev, sg, nents,
|
||||
dma_unmap_sg(dev, sg, nents,
|
||||
(enum dma_data_direction) direction);
|
||||
}
|
||||
|
||||
/* Finally, allow explicit synchronization of streamable mappings. */
|
||||
static inline void sbus_dma_sync_single_for_cpu(struct sbus_dev *sdev,
|
||||
static inline void sbus_dma_sync_single_for_cpu(struct device *dev,
|
||||
dma_addr_t dma_handle,
|
||||
size_t size, int direction)
|
||||
{
|
||||
dma_sync_single_for_cpu(&sdev->ofdev.dev, dma_handle, size,
|
||||
dma_sync_single_for_cpu(dev, dma_handle, size,
|
||||
(enum dma_data_direction) direction);
|
||||
}
|
||||
#define sbus_dma_sync_single sbus_dma_sync_single_for_cpu
|
||||
|
||||
static inline void sbus_dma_sync_single_for_device(struct sbus_dev *sdev,
|
||||
static inline void sbus_dma_sync_single_for_device(struct device *dev,
|
||||
dma_addr_t dma_handle,
|
||||
size_t size, int direction)
|
||||
{
|
||||
/* No flushing needed to sync cpu writes to the device. */
|
||||
}
|
||||
|
||||
static inline void sbus_dma_sync_sg_for_cpu(struct sbus_dev *sdev,
|
||||
struct scatterlist *sg,
|
||||
int nents, int direction)
|
||||
{
|
||||
dma_sync_sg_for_cpu(&sdev->ofdev.dev, sg, nents,
|
||||
(enum dma_data_direction) direction);
|
||||
}
|
||||
#define sbus_dma_sync_sg sbus_dma_sync_sg_for_cpu
|
||||
|
||||
static inline void sbus_dma_sync_sg_for_device(struct sbus_dev *sdev,
|
||||
struct scatterlist *sg,
|
||||
int nents, int direction)
|
||||
{
|
||||
/* No flushing needed to sync cpu writes to the device. */
|
||||
}
|
||||
|
||||
extern void sbus_arch_bus_ranges_init(struct device_node *, struct sbus_bus *);
|
||||
extern void sbus_setup_iommu(struct sbus_bus *, struct device_node *);
|
||||
extern void sbus_setup_arch_props(struct sbus_bus *, struct device_node *);
|
||||
|
@@ -300,11 +300,10 @@ void __init sbus_fill_device_irq(struct sbus_dev *sdev)
|
||||
* Allocate a chunk of memory suitable for DMA.
|
||||
* Typically devices use them for control blocks.
|
||||
* CPU may access them without any explicit flushing.
|
||||
*
|
||||
* XXX Some clever people know that sdev is not used and supply NULL. Watch.
|
||||
*/
|
||||
void *sbus_alloc_consistent(struct sbus_dev *sdev, long len, u32 *dma_addrp)
|
||||
void *sbus_alloc_consistent(struct device *dev, long len, u32 *dma_addrp)
|
||||
{
|
||||
struct of_device *op = to_of_device(dev);
|
||||
unsigned long len_total = (len + PAGE_SIZE-1) & PAGE_MASK;
|
||||
unsigned long va;
|
||||
struct resource *res;
|
||||
@@ -341,10 +340,7 @@ void *sbus_alloc_consistent(struct sbus_dev *sdev, long len, u32 *dma_addrp)
|
||||
if (mmu_map_dma_area(dma_addrp, va, res->start, len_total) != 0)
|
||||
goto err_noiommu;
|
||||
|
||||
/* Set the resource name, if known. */
|
||||
if (sdev) {
|
||||
res->name = sdev->prom_name;
|
||||
}
|
||||
res->name = op->node->name;
|
||||
|
||||
return (void *)(unsigned long)res->start;
|
||||
|
||||
@@ -358,7 +354,7 @@ err_nopages:
|
||||
return NULL;
|
||||
}
|
||||
|
||||
void sbus_free_consistent(struct sbus_dev *sdev, long n, void *p, u32 ba)
|
||||
void sbus_free_consistent(struct device *dev, long n, void *p, u32 ba)
|
||||
{
|
||||
struct resource *res;
|
||||
struct page *pgv;
|
||||
@@ -396,8 +392,10 @@ void sbus_free_consistent(struct sbus_dev *sdev, long n, void *p, u32 ba)
|
||||
* CPU view of this memory may be inconsistent with
|
||||
* a device view and explicit flushing is necessary.
|
||||
*/
|
||||
dma_addr_t sbus_map_single(struct sbus_dev *sdev, void *va, size_t len, int direction)
|
||||
dma_addr_t sbus_map_single(struct device *dev, void *va, size_t len, int direction)
|
||||
{
|
||||
struct sbus_dev *sdev = to_sbus_device(dev);
|
||||
|
||||
/* XXX why are some lengths signed, others unsigned? */
|
||||
if (len <= 0) {
|
||||
return 0;
|
||||
@@ -409,13 +407,16 @@ dma_addr_t sbus_map_single(struct sbus_dev *sdev, void *va, size_t len, int dire
|
||||
return mmu_get_scsi_one(va, len, sdev->bus);
|
||||
}
|
||||
|
||||
void sbus_unmap_single(struct sbus_dev *sdev, dma_addr_t ba, size_t n, int direction)
|
||||
void sbus_unmap_single(struct device *dev, dma_addr_t ba, size_t n, int direction)
|
||||
{
|
||||
struct sbus_dev *sdev = to_sbus_device(dev);
|
||||
mmu_release_scsi_one(ba, n, sdev->bus);
|
||||
}
|
||||
|
||||
int sbus_map_sg(struct sbus_dev *sdev, struct scatterlist *sg, int n, int direction)
|
||||
int sbus_map_sg(struct device *dev, struct scatterlist *sg, int n, int direction)
|
||||
{
|
||||
struct sbus_dev *sdev = to_sbus_device(dev);
|
||||
|
||||
mmu_get_scsi_sgl(sg, n, sdev->bus);
|
||||
|
||||
/*
|
||||
@@ -425,16 +426,19 @@ int sbus_map_sg(struct sbus_dev *sdev, struct scatterlist *sg, int n, int direct
|
||||
return n;
|
||||
}
|
||||
|
||||
void sbus_unmap_sg(struct sbus_dev *sdev, struct scatterlist *sg, int n, int direction)
|
||||
void sbus_unmap_sg(struct device *dev, struct scatterlist *sg, int n, int direction)
|
||||
{
|
||||
struct sbus_dev *sdev = to_sbus_device(dev);
|
||||
|
||||
mmu_release_scsi_sgl(sg, n, sdev->bus);
|
||||
}
|
||||
|
||||
/*
|
||||
*/
|
||||
void sbus_dma_sync_single_for_cpu(struct sbus_dev *sdev, dma_addr_t ba, size_t size, int direction)
|
||||
void sbus_dma_sync_single_for_cpu(struct device *dev, dma_addr_t ba, size_t size, int direction)
|
||||
{
|
||||
#if 0
|
||||
struct sbus_dev *sdev = to_sbus_device(dev);
|
||||
unsigned long va;
|
||||
struct resource *res;
|
||||
|
||||
@@ -452,9 +456,10 @@ void sbus_dma_sync_single_for_cpu(struct sbus_dev *sdev, dma_addr_t ba, size_t s
|
||||
#endif
|
||||
}
|
||||
|
||||
void sbus_dma_sync_single_for_device(struct sbus_dev *sdev, dma_addr_t ba, size_t size, int direction)
|
||||
void sbus_dma_sync_single_for_device(struct device *dev, dma_addr_t ba, size_t size, int direction)
|
||||
{
|
||||
#if 0
|
||||
struct sbus_dev *sdev = to_sbus_device(dev);
|
||||
unsigned long va;
|
||||
struct resource *res;
|
||||
|
||||
@@ -472,16 +477,6 @@ void sbus_dma_sync_single_for_device(struct sbus_dev *sdev, dma_addr_t ba, size_
|
||||
#endif
|
||||
}
|
||||
|
||||
void sbus_dma_sync_sg_for_cpu(struct sbus_dev *sdev, struct scatterlist *sg, int n, int direction)
|
||||
{
|
||||
printk("sbus_dma_sync_sg_for_cpu: not implemented yet\n");
|
||||
}
|
||||
|
||||
void sbus_dma_sync_sg_for_device(struct sbus_dev *sdev, struct scatterlist *sg, int n, int direction)
|
||||
{
|
||||
printk("sbus_dma_sync_sg_for_device: not implemented yet\n");
|
||||
}
|
||||
|
||||
/* Support code for sbus_init(). */
|
||||
/*
|
||||
* XXX This functions appears to be a distorted version of
|
||||
|
@@ -163,8 +163,6 @@ EXPORT_SYMBOL(sbus_map_sg);
|
||||
EXPORT_SYMBOL(sbus_unmap_sg);
|
||||
EXPORT_SYMBOL(sbus_dma_sync_single_for_cpu);
|
||||
EXPORT_SYMBOL(sbus_dma_sync_single_for_device);
|
||||
EXPORT_SYMBOL(sbus_dma_sync_sg_for_cpu);
|
||||
EXPORT_SYMBOL(sbus_dma_sync_sg_for_device);
|
||||
EXPORT_SYMBOL(sbus_iounmap);
|
||||
EXPORT_SYMBOL(sbus_ioremap);
|
||||
#endif
|
||||
|
Reference in New Issue
Block a user