sh: Fix up and optimize the kmap_coherent() interface.

This fixes up the kmap_coherent/kunmap_coherent() interface for recent
changes both in the page fault path and the shared cache flushers, as
well as adding in some optimizations.

One of the key things to note here is that the TLB flush itself is
deferred until the unmap, and the call in to update_mmu_cache() itself
goes away, relying on the regular page fault path to handle the lazy
dcache writeback if necessary.

Signed-off-by: Paul Mundt <lethal@linux-sh.org>
This commit is contained in:
Paul Mundt
2009-09-03 17:21:10 +09:00
parent d1af119a69
commit 0906a3ad33
8 changed files with 61 additions and 47 deletions

View File

@ -51,7 +51,7 @@ void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
!test_bit(PG_dcache_dirty, &page->flags)) {
void *vto = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK);
memcpy(vto, src, len);
kunmap_coherent();
kunmap_coherent(vto);
} else {
memcpy(dst, src, len);
if (boot_cpu_data.dcache.n_aliases)
@ -70,7 +70,7 @@ void copy_from_user_page(struct vm_area_struct *vma, struct page *page,
!test_bit(PG_dcache_dirty, &page->flags)) {
void *vfrom = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK);
memcpy(dst, vfrom, len);
kunmap_coherent();
kunmap_coherent(vfrom);
} else {
memcpy(dst, src, len);
if (boot_cpu_data.dcache.n_aliases)
@ -89,7 +89,7 @@ void copy_user_highpage(struct page *to, struct page *from,
!test_bit(PG_dcache_dirty, &from->flags)) {
vfrom = kmap_coherent(from, vaddr);
copy_page(vto, vfrom);
kunmap_coherent();
kunmap_coherent(vfrom);
} else {
vfrom = kmap_atomic(from, KM_USER0);
copy_page(vto, vfrom);
@ -150,7 +150,7 @@ void __flush_anon_page(struct page *page, unsigned long vmaddr)
kaddr = kmap_coherent(page, vmaddr);
__flush_wback_region((void *)kaddr, PAGE_SIZE);
kunmap_coherent();
kunmap_coherent(kaddr);
} else
__flush_wback_region((void *)addr, PAGE_SIZE);
}