ARM: dma-mapping: push buffer ownership down into dma-mapping.c
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk> Tested-By: Santosh Shilimkar <santosh.shilimkar@ti.com>
This commit is contained in:
@@ -56,47 +56,58 @@ static inline dma_addr_t virt_to_dma(struct device *dev, void *addr)
|
|||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
/*
|
|
||||||
* Private support functions: these are not part of the API and are
|
|
||||||
* liable to change. Drivers must not use these.
|
|
||||||
*/
|
|
||||||
extern void dma_cache_maint(const void *kaddr, size_t size, int rw);
|
|
||||||
extern void dma_cache_maint_page(struct page *page, unsigned long offset,
|
|
||||||
size_t size, int rw);
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* The DMA API is built upon the notion of "buffer ownership". A buffer
|
* The DMA API is built upon the notion of "buffer ownership". A buffer
|
||||||
* is either exclusively owned by the CPU (and therefore may be accessed
|
* is either exclusively owned by the CPU (and therefore may be accessed
|
||||||
* by it) or exclusively owned by the DMA device. These helper functions
|
* by it) or exclusively owned by the DMA device. These helper functions
|
||||||
* represent the transitions between these two ownership states.
|
* represent the transitions between these two ownership states.
|
||||||
*
|
*
|
||||||
* As above, these are private support functions and not part of the API.
|
* Note, however, that on later ARMs, this notion does not work due to
|
||||||
* Drivers must not use these.
|
* speculative prefetches. We model our approach on the assumption that
|
||||||
|
* the CPU does do speculative prefetches, which means we clean caches
|
||||||
|
* before transfers and delay cache invalidation until transfer completion.
|
||||||
|
*
|
||||||
|
* Private support functions: these are not part of the API and are
|
||||||
|
* liable to change. Drivers must not use these.
|
||||||
*/
|
*/
|
||||||
static inline void __dma_single_cpu_to_dev(const void *kaddr, size_t size,
|
static inline void __dma_single_cpu_to_dev(const void *kaddr, size_t size,
|
||||||
enum dma_data_direction dir)
|
enum dma_data_direction dir)
|
||||||
{
|
{
|
||||||
|
extern void ___dma_single_cpu_to_dev(const void *, size_t,
|
||||||
|
enum dma_data_direction);
|
||||||
|
|
||||||
if (!arch_is_coherent())
|
if (!arch_is_coherent())
|
||||||
dma_cache_maint(kaddr, size, dir);
|
___dma_single_cpu_to_dev(kaddr, size, dir);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void __dma_single_dev_to_cpu(const void *kaddr, size_t size,
|
static inline void __dma_single_dev_to_cpu(const void *kaddr, size_t size,
|
||||||
enum dma_data_direction dir)
|
enum dma_data_direction dir)
|
||||||
{
|
{
|
||||||
/* nothing to do */
|
extern void ___dma_single_dev_to_cpu(const void *, size_t,
|
||||||
|
enum dma_data_direction);
|
||||||
|
|
||||||
|
if (!arch_is_coherent())
|
||||||
|
___dma_single_dev_to_cpu(kaddr, size, dir);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void __dma_page_cpu_to_dev(struct page *page, unsigned long off,
|
static inline void __dma_page_cpu_to_dev(struct page *page, unsigned long off,
|
||||||
size_t size, enum dma_data_direction dir)
|
size_t size, enum dma_data_direction dir)
|
||||||
{
|
{
|
||||||
|
extern void ___dma_page_cpu_to_dev(struct page *, unsigned long,
|
||||||
|
size_t, enum dma_data_direction);
|
||||||
|
|
||||||
if (!arch_is_coherent())
|
if (!arch_is_coherent())
|
||||||
dma_cache_maint_page(page, off, size, dir);
|
___dma_page_cpu_to_dev(page, off, size, dir);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void __dma_page_dev_to_cpu(struct page *page, unsigned long off,
|
static inline void __dma_page_dev_to_cpu(struct page *page, unsigned long off,
|
||||||
size_t size, enum dma_data_direction dir)
|
size_t size, enum dma_data_direction dir)
|
||||||
{
|
{
|
||||||
/* nothing to do */
|
extern void ___dma_page_dev_to_cpu(struct page *, unsigned long,
|
||||||
|
size_t, enum dma_data_direction);
|
||||||
|
|
||||||
|
if (!arch_is_coherent())
|
||||||
|
___dma_page_dev_to_cpu(page, off, size, dir);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@@ -404,7 +404,7 @@ EXPORT_SYMBOL(dma_free_coherent);
|
|||||||
* platforms with CONFIG_DMABOUNCE.
|
* platforms with CONFIG_DMABOUNCE.
|
||||||
* Use the driver DMA support - see dma-mapping.h (dma_sync_*)
|
* Use the driver DMA support - see dma-mapping.h (dma_sync_*)
|
||||||
*/
|
*/
|
||||||
void dma_cache_maint(const void *start, size_t size, int direction)
|
static void dma_cache_maint(const void *start, size_t size, int direction)
|
||||||
{
|
{
|
||||||
void (*inner_op)(const void *, const void *);
|
void (*inner_op)(const void *, const void *);
|
||||||
void (*outer_op)(unsigned long, unsigned long);
|
void (*outer_op)(unsigned long, unsigned long);
|
||||||
@@ -431,7 +431,20 @@ void dma_cache_maint(const void *start, size_t size, int direction)
|
|||||||
inner_op(start, start + size);
|
inner_op(start, start + size);
|
||||||
outer_op(__pa(start), __pa(start) + size);
|
outer_op(__pa(start), __pa(start) + size);
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(dma_cache_maint);
|
|
||||||
|
void ___dma_single_cpu_to_dev(const void *kaddr, size_t size,
|
||||||
|
enum dma_data_direction dir)
|
||||||
|
{
|
||||||
|
dma_cache_maint(kaddr, size, dir);
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL(___dma_single_cpu_to_dev);
|
||||||
|
|
||||||
|
void ___dma_single_dev_to_cpu(const void *kaddr, size_t size,
|
||||||
|
enum dma_data_direction dir)
|
||||||
|
{
|
||||||
|
/* nothing to do */
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL(___dma_single_dev_to_cpu);
|
||||||
|
|
||||||
static void dma_cache_maint_contiguous(struct page *page, unsigned long offset,
|
static void dma_cache_maint_contiguous(struct page *page, unsigned long offset,
|
||||||
size_t size, int direction)
|
size_t size, int direction)
|
||||||
@@ -474,7 +487,7 @@ static void dma_cache_maint_contiguous(struct page *page, unsigned long offset,
|
|||||||
outer_op(paddr, paddr + size);
|
outer_op(paddr, paddr + size);
|
||||||
}
|
}
|
||||||
|
|
||||||
void dma_cache_maint_page(struct page *page, unsigned long offset,
|
static void dma_cache_maint_page(struct page *page, unsigned long offset,
|
||||||
size_t size, int dir)
|
size_t size, int dir)
|
||||||
{
|
{
|
||||||
/*
|
/*
|
||||||
@@ -499,7 +512,20 @@ void dma_cache_maint_page(struct page *page, unsigned long offset,
|
|||||||
left -= len;
|
left -= len;
|
||||||
} while (left);
|
} while (left);
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(dma_cache_maint_page);
|
|
||||||
|
void ___dma_page_cpu_to_dev(struct page *page, unsigned long off,
|
||||||
|
size_t size, enum dma_data_direction dir)
|
||||||
|
{
|
||||||
|
dma_cache_maint_page(page, off, size, dir);
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL(___dma_page_cpu_to_dev);
|
||||||
|
|
||||||
|
void ___dma_page_dev_to_cpu(struct page *page, unsigned long off,
|
||||||
|
size_t size, enum dma_data_direction dir)
|
||||||
|
{
|
||||||
|
/* nothing to do */
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL(___dma_page_dev_to_cpu);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* dma_map_sg - map a set of SG buffers for streaming mode DMA
|
* dma_map_sg - map a set of SG buffers for streaming mode DMA
|
||||||
|
Reference in New Issue
Block a user