MIPS: use the generic uncached segment support in dma-direct

Stop providing the arch alloc/free hooks and just expose the segment
offset instead.

Signed-off-by: Christoph Hellwig <hch@lst.de>
Acked-by: Paul Burton <paul.burton@mips.com>
This commit is contained in:
Christoph Hellwig 2019-04-28 13:57:39 -05:00
parent c30700db9e
commit 2e96e04d25
4 changed files with 11 additions and 27 deletions

View File

@ -8,6 +8,7 @@ config MIPS
select ARCH_HAS_ELF_RANDOMIZE
select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST
select ARCH_HAS_UBSAN_SANITIZE_ALL
select ARCH_HAS_UNCACHED_SEGMENT
select ARCH_SUPPORTS_UPROBES
select ARCH_USE_BUILTIN_BSWAP
select ARCH_USE_CMPXCHG_LOCKREF if 64BIT

View File

@ -258,9 +258,6 @@ extern int __virt_addr_valid(const volatile void *kaddr);
((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
#define UNCAC_ADDR(addr) (UNCAC_BASE + __pa(addr))
#define CAC_ADDR(addr) ((unsigned long)__va((addr) - UNCAC_BASE))
#include <asm-generic/memory_model.h>
#include <asm-generic/getorder.h>

View File

@ -575,10 +575,6 @@ static void *jazz_dma_alloc(struct device *dev, size_t size,
return NULL;
}
if (!(attrs & DMA_ATTR_NON_CONSISTENT)) {
dma_cache_wback_inv((unsigned long)ret, size);
ret = (void *)UNCAC_ADDR(ret);
}
return ret;
}
@ -586,8 +582,6 @@ static void jazz_dma_free(struct device *dev, size_t size, void *vaddr,
dma_addr_t dma_handle, unsigned long attrs)
{
vdma_free(dma_handle);
if (!(attrs & DMA_ATTR_NON_CONSISTENT))
vaddr = (void *)CAC_ADDR((unsigned long)vaddr);
dma_direct_free_pages(dev, size, vaddr, dma_handle, attrs);
}

View File

@ -44,33 +44,25 @@ static inline bool cpu_needs_post_dma_flush(struct device *dev)
}
}
void *arch_dma_alloc(struct device *dev, size_t size,
dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs)
void arch_dma_prep_coherent(struct page *page, size_t size)
{
void *ret;
ret = dma_direct_alloc_pages(dev, size, dma_handle, gfp, attrs);
if (ret && !(attrs & DMA_ATTR_NON_CONSISTENT)) {
dma_cache_wback_inv((unsigned long) ret, size);
ret = (void *)UNCAC_ADDR(ret);
}
return ret;
dma_cache_wback_inv((unsigned long)page_address(page), size);
}
void arch_dma_free(struct device *dev, size_t size, void *cpu_addr,
dma_addr_t dma_addr, unsigned long attrs)
void *uncached_kernel_address(void *addr)
{
if (!(attrs & DMA_ATTR_NON_CONSISTENT))
cpu_addr = (void *)CAC_ADDR((unsigned long)cpu_addr);
dma_direct_free_pages(dev, size, cpu_addr, dma_addr, attrs);
return (void *)(__pa(addr) + UNCAC_BASE);
}
void *cached_kernel_address(void *addr)
{
return __va(addr) - UNCAC_BASE;
}
long arch_dma_coherent_to_pfn(struct device *dev, void *cpu_addr,
dma_addr_t dma_addr)
{
unsigned long addr = CAC_ADDR((unsigned long)cpu_addr);
return page_to_pfn(virt_to_page((void *)addr));
return page_to_pfn(virt_to_page(cached_kernel_address(cpu_addr)));
}
pgprot_t arch_dma_mmap_pgprot(struct device *dev, pgprot_t prot,