KVM: MMU: large page support
Create large pages mappings if the guest PTE's are marked as such and the underlying memory is hugetlbfs backed. If the largepage contains write-protected pages, a large pte is not used. Gives a consistent 2% improvement for data copies on ram mounted filesystem, without NPT/EPT. Anthony measures a 4% improvement on 4-way kernbench, with NPT. Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com> Signed-off-by: Avi Kivity <avi@qumranet.com>
This commit is contained in:
committed by
Avi Kivity
parent
2e53d63acb
commit
05da45583d
@ -248,6 +248,7 @@ static void FNAME(update_pte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *page,
|
||||
pt_element_t gpte;
|
||||
unsigned pte_access;
|
||||
struct page *npage;
|
||||
int largepage = vcpu->arch.update_pte.largepage;
|
||||
|
||||
gpte = *(const pt_element_t *)pte;
|
||||
if (~gpte & (PT_PRESENT_MASK | PT_ACCESSED_MASK)) {
|
||||
@ -264,7 +265,8 @@ static void FNAME(update_pte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *page,
|
||||
return;
|
||||
get_page(npage);
|
||||
mmu_set_spte(vcpu, spte, page->role.access, pte_access, 0, 0,
|
||||
gpte & PT_DIRTY_MASK, NULL, gpte_to_gfn(gpte), npage);
|
||||
gpte & PT_DIRTY_MASK, NULL, largepage, gpte_to_gfn(gpte),
|
||||
npage);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -272,8 +274,8 @@ static void FNAME(update_pte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *page,
|
||||
*/
|
||||
static u64 *FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr,
|
||||
struct guest_walker *walker,
|
||||
int user_fault, int write_fault, int *ptwrite,
|
||||
struct page *page)
|
||||
int user_fault, int write_fault, int largepage,
|
||||
int *ptwrite, struct page *page)
|
||||
{
|
||||
hpa_t shadow_addr;
|
||||
int level;
|
||||
@ -301,11 +303,19 @@ static u64 *FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr,
|
||||
shadow_ent = ((u64 *)__va(shadow_addr)) + index;
|
||||
if (level == PT_PAGE_TABLE_LEVEL)
|
||||
break;
|
||||
if (is_shadow_present_pte(*shadow_ent)) {
|
||||
|
||||
if (largepage && level == PT_DIRECTORY_LEVEL)
|
||||
break;
|
||||
|
||||
if (is_shadow_present_pte(*shadow_ent)
|
||||
&& !is_large_pte(*shadow_ent)) {
|
||||
shadow_addr = *shadow_ent & PT64_BASE_ADDR_MASK;
|
||||
continue;
|
||||
}
|
||||
|
||||
if (is_large_pte(*shadow_ent))
|
||||
rmap_remove(vcpu->kvm, shadow_ent);
|
||||
|
||||
if (level - 1 == PT_PAGE_TABLE_LEVEL
|
||||
&& walker->level == PT_DIRECTORY_LEVEL) {
|
||||
metaphysical = 1;
|
||||
@ -339,7 +349,7 @@ static u64 *FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr,
|
||||
mmu_set_spte(vcpu, shadow_ent, access, walker->pte_access & access,
|
||||
user_fault, write_fault,
|
||||
walker->ptes[walker->level-1] & PT_DIRTY_MASK,
|
||||
ptwrite, walker->gfn, page);
|
||||
ptwrite, largepage, walker->gfn, page);
|
||||
|
||||
return shadow_ent;
|
||||
}
|
||||
@ -369,6 +379,7 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr,
|
||||
int write_pt = 0;
|
||||
int r;
|
||||
struct page *page;
|
||||
int largepage = 0;
|
||||
|
||||
pgprintk("%s: addr %lx err %x\n", __FUNCTION__, addr, error_code);
|
||||
kvm_mmu_audit(vcpu, "pre page fault");
|
||||
@ -396,6 +407,14 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr,
|
||||
}
|
||||
|
||||
down_read(¤t->mm->mmap_sem);
|
||||
if (walker.level == PT_DIRECTORY_LEVEL) {
|
||||
gfn_t large_gfn;
|
||||
large_gfn = walker.gfn & ~(KVM_PAGES_PER_HPAGE-1);
|
||||
if (is_largepage_backed(vcpu, large_gfn)) {
|
||||
walker.gfn = large_gfn;
|
||||
largepage = 1;
|
||||
}
|
||||
}
|
||||
page = gfn_to_page(vcpu->kvm, walker.gfn);
|
||||
up_read(¤t->mm->mmap_sem);
|
||||
|
||||
@ -410,7 +429,8 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr,
|
||||
spin_lock(&vcpu->kvm->mmu_lock);
|
||||
kvm_mmu_free_some_pages(vcpu);
|
||||
shadow_pte = FNAME(fetch)(vcpu, addr, &walker, user_fault, write_fault,
|
||||
&write_pt, page);
|
||||
largepage, &write_pt, page);
|
||||
|
||||
pgprintk("%s: shadow pte %p %llx ptwrite %d\n", __FUNCTION__,
|
||||
shadow_pte, *shadow_pte, write_pt);
|
||||
|
||||
|
Reference in New Issue
Block a user