Merge branch 'fixes_for_linus' of git://git.linaro.org/people/mszyprowski/linux-dma-mapping

Pull CMA and DMA-mapping fixes from Marek Szyprowski:
 "This consists mainly of a set of one-liner fixes and cleanups for a
  few minor issues identified in both Contiguous Memory Allocator code
  and ARM DMA-mapping subsystem."

* 'fixes_for_linus' of git://git.linaro.org/people/mszyprowski/linux-dma-mapping:
  ARM: mm: Remove unused arm_vmregion priv field
  ARM: dma-mapping: fix build warning in __dma_alloc()
  ARM: dma-mapping: support debug_dma_mapping_error
  mm: cma: alloc_contig_range: return early for err path
  drivers: cma: Fix wrong CMA selected region size default value
  drivers: dma-coherent: Fix typo in dma_mmap_from_coherent documentation
  drivers: dma-contiguous: Don't redefine SZ_1M
This commit is contained in:
Linus Torvalds 2012-10-25 15:57:48 -07:00
commit cff7b8ba60
7 changed files with 7 additions and 11 deletions

View File

@ -91,6 +91,7 @@ static inline dma_addr_t virt_to_dma(struct device *dev, void *addr)
*/
static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
{
debug_dma_mapping_error(dev, dma_addr);
return dma_addr == DMA_ERROR_CODE;
}

View File

@ -610,7 +610,7 @@ static void *__dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
gfp_t gfp, pgprot_t prot, bool is_coherent, const void *caller)
{
u64 mask = get_coherent_dma_mask(dev);
struct page *page;
struct page *page = NULL;
void *addr;
#ifdef CONFIG_DMA_API_DEBUG

View File

@ -17,7 +17,6 @@ struct arm_vmregion {
struct list_head vm_list;
unsigned long vm_start;
unsigned long vm_end;
void *priv;
int vm_active;
const void *caller;
};

View File

@ -236,7 +236,7 @@ config CMA_SIZE_PERCENTAGE
choice
prompt "Selected region size"
default CMA_SIZE_SEL_ABSOLUTE
default CMA_SIZE_SEL_MBYTES
config CMA_SIZE_SEL_MBYTES
bool "Use mega bytes value only"

View File

@ -191,9 +191,8 @@ EXPORT_SYMBOL(dma_release_from_coherent);
* This checks whether the memory was allocated from the per-device
* coherent memory pool and if so, maps that memory to the provided vma.
*
* Returns 1 if we correctly mapped the memory, or 0 if
* dma_release_coherent() should proceed with mapping memory from
* generic pools.
* Returns 1 if we correctly mapped the memory, or 0 if the caller should
* proceed with mapping memory from generic pools.
*/
int dma_mmap_from_coherent(struct device *dev, struct vm_area_struct *vma,
void *vaddr, size_t size, int *ret)

View File

@ -27,15 +27,12 @@
#include <linux/mm.h>
#include <linux/mutex.h>
#include <linux/page-isolation.h>
#include <linux/sizes.h>
#include <linux/slab.h>
#include <linux/swap.h>
#include <linux/mm_types.h>
#include <linux/dma-contiguous.h>
#ifndef SZ_1M
#define SZ_1M (1 << 20)
#endif
struct cma {
unsigned long base_pfn;
unsigned long count;

View File

@ -5825,7 +5825,7 @@ int alloc_contig_range(unsigned long start, unsigned long end,
ret = start_isolate_page_range(pfn_max_align_down(start),
pfn_max_align_up(end), migratetype);
if (ret)
goto done;
return ret;
ret = __alloc_contig_migrate_range(&cc, start, end);
if (ret)