ARM: dma: replace ISA_DMA_THRESHOLD with a variable
ISA_DMA_THRESHOLD has been unused by non-arch code, so lets now get rid of it from ARM by replacing it with arm_dma_zone_mask. Move dma_supported() and dma_set_mask() out of line, and have dma_supported() check this new variable instead. Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
This commit is contained in:
@@ -115,33 +115,8 @@ static inline void __dma_page_dev_to_cpu(struct page *page, unsigned long off,
|
|||||||
___dma_page_dev_to_cpu(page, off, size, dir);
|
___dma_page_dev_to_cpu(page, off, size, dir);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
extern int dma_supported(struct device *, u64);
|
||||||
* Return whether the given device DMA address mask can be supported
|
extern int dma_set_mask(struct device *, u64);
|
||||||
* properly. For example, if your device can only drive the low 24-bits
|
|
||||||
* during bus mastering, then you would pass 0x00ffffff as the mask
|
|
||||||
* to this function.
|
|
||||||
*
|
|
||||||
* FIXME: This should really be a platform specific issue - we should
|
|
||||||
* return false if GFP_DMA allocations may not satisfy the supplied 'mask'.
|
|
||||||
*/
|
|
||||||
static inline int dma_supported(struct device *dev, u64 mask)
|
|
||||||
{
|
|
||||||
if (mask < ISA_DMA_THRESHOLD)
|
|
||||||
return 0;
|
|
||||||
return 1;
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline int dma_set_mask(struct device *dev, u64 dma_mask)
|
|
||||||
{
|
|
||||||
if (!dev->dma_mask || !dma_supported(dev, dma_mask))
|
|
||||||
return -EIO;
|
|
||||||
|
|
||||||
#ifndef CONFIG_DMABOUNCE
|
|
||||||
*dev->dma_mask = dma_mask;
|
|
||||||
#endif
|
|
||||||
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* DMA errors are defined by all-bits-set in the DMA address.
|
* DMA errors are defined by all-bits-set in the DMA address.
|
||||||
|
@@ -203,18 +203,6 @@ static inline unsigned long __phys_to_virt(unsigned long x)
|
|||||||
#define PHYS_OFFSET PLAT_PHYS_OFFSET
|
#define PHYS_OFFSET PLAT_PHYS_OFFSET
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
/*
|
|
||||||
* The DMA mask corresponding to the maximum bus address allocatable
|
|
||||||
* using GFP_DMA. The default here places no restriction on DMA
|
|
||||||
* allocations. This must be the smallest DMA mask in the system,
|
|
||||||
* so a successful GFP_DMA allocation will always satisfy this.
|
|
||||||
*/
|
|
||||||
#ifndef ARM_DMA_ZONE_SIZE
|
|
||||||
#define ISA_DMA_THRESHOLD (0xffffffffULL)
|
|
||||||
#else
|
|
||||||
#define ISA_DMA_THRESHOLD (PHYS_OFFSET + ARM_DMA_ZONE_SIZE - 1)
|
|
||||||
#endif
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* PFNs are used to describe any physical page; this means
|
* PFNs are used to describe any physical page; this means
|
||||||
* PFN 0 == physical address 0.
|
* PFN 0 == physical address 0.
|
||||||
|
@@ -25,9 +25,11 @@
|
|||||||
#include <asm/tlbflush.h>
|
#include <asm/tlbflush.h>
|
||||||
#include <asm/sizes.h>
|
#include <asm/sizes.h>
|
||||||
|
|
||||||
|
#include "mm.h"
|
||||||
|
|
||||||
static u64 get_coherent_dma_mask(struct device *dev)
|
static u64 get_coherent_dma_mask(struct device *dev)
|
||||||
{
|
{
|
||||||
u64 mask = ISA_DMA_THRESHOLD;
|
u64 mask = (u64)arm_dma_limit;
|
||||||
|
|
||||||
if (dev) {
|
if (dev) {
|
||||||
mask = dev->coherent_dma_mask;
|
mask = dev->coherent_dma_mask;
|
||||||
@@ -41,10 +43,10 @@ static u64 get_coherent_dma_mask(struct device *dev)
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
if ((~mask) & ISA_DMA_THRESHOLD) {
|
if ((~mask) & (u64)arm_dma_limit) {
|
||||||
dev_warn(dev, "coherent DMA mask %#llx is smaller "
|
dev_warn(dev, "coherent DMA mask %#llx is smaller "
|
||||||
"than system GFP_DMA mask %#llx\n",
|
"than system GFP_DMA mask %#llx\n",
|
||||||
mask, (unsigned long long)ISA_DMA_THRESHOLD);
|
mask, (u64)arm_dma_limit);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -657,6 +659,33 @@ void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
|
|||||||
}
|
}
|
||||||
EXPORT_SYMBOL(dma_sync_sg_for_device);
|
EXPORT_SYMBOL(dma_sync_sg_for_device);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Return whether the given device DMA address mask can be supported
|
||||||
|
* properly. For example, if your device can only drive the low 24-bits
|
||||||
|
* during bus mastering, then you would pass 0x00ffffff as the mask
|
||||||
|
* to this function.
|
||||||
|
*/
|
||||||
|
int dma_supported(struct device *dev, u64 mask)
|
||||||
|
{
|
||||||
|
if (mask < (u64)arm_dma_limit)
|
||||||
|
return 0;
|
||||||
|
return 1;
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL(dma_supported);
|
||||||
|
|
||||||
|
int dma_set_mask(struct device *dev, u64 dma_mask)
|
||||||
|
{
|
||||||
|
if (!dev->dma_mask || !dma_supported(dev, dma_mask))
|
||||||
|
return -EIO;
|
||||||
|
|
||||||
|
#ifndef CONFIG_DMABOUNCE
|
||||||
|
*dev->dma_mask = dma_mask;
|
||||||
|
#endif
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL(dma_set_mask);
|
||||||
|
|
||||||
#define PREALLOC_DMA_DEBUG_ENTRIES 4096
|
#define PREALLOC_DMA_DEBUG_ENTRIES 4096
|
||||||
|
|
||||||
static int __init dma_debug_do_init(void)
|
static int __init dma_debug_do_init(void)
|
||||||
|
@@ -212,6 +212,14 @@ static void __init arm_bootmem_init(unsigned long start_pfn,
|
|||||||
}
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_ZONE_DMA
|
#ifdef CONFIG_ZONE_DMA
|
||||||
|
/*
|
||||||
|
* The DMA mask corresponding to the maximum bus address allocatable
|
||||||
|
* using GFP_DMA. The default here places no restriction on DMA
|
||||||
|
* allocations. This must be the smallest DMA mask in the system,
|
||||||
|
* so a successful GFP_DMA allocation will always satisfy this.
|
||||||
|
*/
|
||||||
|
u32 arm_dma_limit;
|
||||||
|
|
||||||
static void __init arm_adjust_dma_zone(unsigned long *size, unsigned long *hole,
|
static void __init arm_adjust_dma_zone(unsigned long *size, unsigned long *hole,
|
||||||
unsigned long dma_size)
|
unsigned long dma_size)
|
||||||
{
|
{
|
||||||
@@ -278,6 +286,8 @@ static void __init arm_bootmem_free(unsigned long min, unsigned long max_low,
|
|||||||
*/
|
*/
|
||||||
arm_adjust_dma_zone(zone_size, zhole_size,
|
arm_adjust_dma_zone(zone_size, zhole_size,
|
||||||
ARM_DMA_ZONE_SIZE >> PAGE_SHIFT);
|
ARM_DMA_ZONE_SIZE >> PAGE_SHIFT);
|
||||||
|
|
||||||
|
arm_dma_limit = PHYS_OFFSET + ARM_DMA_ZONE_SIZE - 1;
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
free_area_init_node(0, zone_size, min, zhole_size);
|
free_area_init_node(0, zone_size, min, zhole_size);
|
||||||
|
@@ -23,5 +23,11 @@ extern void __flush_dcache_page(struct address_space *mapping, struct page *page
|
|||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
#ifdef CONFIG_ZONE_DMA
|
||||||
|
extern u32 arm_dma_limit;
|
||||||
|
#else
|
||||||
|
#define arm_dma_limit ((u32)~0)
|
||||||
|
#endif
|
||||||
|
|
||||||
void __init bootmem_init(void);
|
void __init bootmem_init(void);
|
||||||
void arm_mm_memblock_reserve(void);
|
void arm_mm_memblock_reserve(void);
|
||||||
|
Reference in New Issue
Block a user