[PATCH] paravirt: kpte flush
Create a new PTE function which combines clearing a kernel PTE with the subsequent flush. This allows the two to be easily combined into a single hypercall or paravirt-op. More subtly, reverse the order of the flush for kmap_atomic. Instead of flushing on establishing a mapping, flush on clearing a mapping. This eliminates the possibility of leaving stale kmap entries which may still have valid TLB mappings. This is required for direct mode hypervisors, which need to reprotect all mappings of a given page when changing the page type from a normal page to a protected page (such as a page table or descriptor table page). But it also provides some nicer semantics for real hardware, by providing extra debug-proofing against using stale mappings, as well as ensuring that no stale mappings exist when changing the cacheability attributes of a page, which could lead to cache conflicts when two different types of mappings exist for the same page. Signed-off-by: Zachary Amsden <zach@vmware.com> Cc: Rusty Russell <rusty@rustcorp.com.au> Cc: Jeremy Fitzhardinge <jeremy@xensource.com> Cc: Andi Kleen <ak@suse.de> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
This commit is contained in:
committed by
Linus Torvalds
parent
25e4df5bae
commit
23002d88be
@@ -38,22 +38,19 @@ void *kmap_atomic(struct page *page, enum km_type type)
|
|||||||
|
|
||||||
idx = type + KM_TYPE_NR*smp_processor_id();
|
idx = type + KM_TYPE_NR*smp_processor_id();
|
||||||
vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
|
vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
|
||||||
#ifdef CONFIG_DEBUG_HIGHMEM
|
|
||||||
if (!pte_none(*(kmap_pte-idx)))
|
if (!pte_none(*(kmap_pte-idx)))
|
||||||
BUG();
|
BUG();
|
||||||
#endif
|
|
||||||
set_pte(kmap_pte-idx, mk_pte(page, kmap_prot));
|
set_pte(kmap_pte-idx, mk_pte(page, kmap_prot));
|
||||||
__flush_tlb_one(vaddr);
|
|
||||||
|
|
||||||
return (void*) vaddr;
|
return (void*) vaddr;
|
||||||
}
|
}
|
||||||
|
|
||||||
void kunmap_atomic(void *kvaddr, enum km_type type)
|
void kunmap_atomic(void *kvaddr, enum km_type type)
|
||||||
{
|
{
|
||||||
#ifdef CONFIG_DEBUG_HIGHMEM
|
|
||||||
unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
|
unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
|
||||||
enum fixed_addresses idx = type + KM_TYPE_NR*smp_processor_id();
|
enum fixed_addresses idx = type + KM_TYPE_NR*smp_processor_id();
|
||||||
|
|
||||||
|
#ifdef CONFIG_DEBUG_HIGHMEM
|
||||||
if (vaddr >= PAGE_OFFSET && vaddr < (unsigned long)high_memory) {
|
if (vaddr >= PAGE_OFFSET && vaddr < (unsigned long)high_memory) {
|
||||||
dec_preempt_count();
|
dec_preempt_count();
|
||||||
preempt_check_resched();
|
preempt_check_resched();
|
||||||
@@ -62,14 +59,14 @@ void kunmap_atomic(void *kvaddr, enum km_type type)
|
|||||||
|
|
||||||
if (vaddr != __fix_to_virt(FIX_KMAP_BEGIN+idx))
|
if (vaddr != __fix_to_virt(FIX_KMAP_BEGIN+idx))
|
||||||
BUG();
|
BUG();
|
||||||
|
|
||||||
/*
|
|
||||||
* force other mappings to Oops if they'll try to access
|
|
||||||
* this pte without first remap it
|
|
||||||
*/
|
|
||||||
pte_clear(&init_mm, vaddr, kmap_pte-idx);
|
|
||||||
__flush_tlb_one(vaddr);
|
|
||||||
#endif
|
#endif
|
||||||
|
/*
|
||||||
|
* Force other mappings to Oops if they'll try to access this pte
|
||||||
|
* without first remap it. Keeping stale mappings around is a bad idea
|
||||||
|
* also, in case the page changes cacheability attributes or becomes
|
||||||
|
* a protected page in a hypervisor.
|
||||||
|
*/
|
||||||
|
kpte_clear_flush(kmap_pte-idx, vaddr);
|
||||||
|
|
||||||
dec_preempt_count();
|
dec_preempt_count();
|
||||||
preempt_check_resched();
|
preempt_check_resched();
|
||||||
@@ -88,7 +85,6 @@ void *kmap_atomic_pfn(unsigned long pfn, enum km_type type)
|
|||||||
idx = type + KM_TYPE_NR*smp_processor_id();
|
idx = type + KM_TYPE_NR*smp_processor_id();
|
||||||
vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
|
vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
|
||||||
set_pte(kmap_pte-idx, pfn_pte(pfn, kmap_prot));
|
set_pte(kmap_pte-idx, pfn_pte(pfn, kmap_prot));
|
||||||
__flush_tlb_one(vaddr);
|
|
||||||
|
|
||||||
return (void*) vaddr;
|
return (void*) vaddr;
|
||||||
}
|
}
|
||||||
|
@@ -441,6 +441,13 @@ extern pte_t *lookup_address(unsigned long address);
|
|||||||
#define pte_unmap_nested(pte) do { } while (0)
|
#define pte_unmap_nested(pte) do { } while (0)
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
/* Clear a kernel PTE and flush it from the TLB */
|
||||||
|
#define kpte_clear_flush(ptep, vaddr) \
|
||||||
|
do { \
|
||||||
|
pte_clear(&init_mm, vaddr, ptep); \
|
||||||
|
__flush_tlb_one(vaddr); \
|
||||||
|
} while (0)
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* The i386 doesn't have any external MMU info: the kernel page
|
* The i386 doesn't have any external MMU info: the kernel page
|
||||||
* tables contain all the necessary information.
|
* tables contain all the necessary information.
|
||||||
|
Reference in New Issue
Block a user