KVM: MMU: large page support
Create large pages mappings if the guest PTE's are marked as such and the underlying memory is hugetlbfs backed. If the largepage contains write-protected pages, a large pte is not used. Gives a consistent 2% improvement for data copies on ram mounted filesystem, without NPT/EPT. Anthony measures a 4% improvement on 4-way kernbench, with NPT. Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com> Signed-off-by: Avi Kivity <avi@qumranet.com>
This commit is contained in:
committed by
Avi Kivity
parent
2e53d63acb
commit
05da45583d
@@ -103,6 +103,10 @@ struct kvm_memory_slot {
|
||||
unsigned long flags;
|
||||
unsigned long *rmap;
|
||||
unsigned long *dirty_bitmap;
|
||||
struct {
|
||||
unsigned long rmap_pde;
|
||||
int write_count;
|
||||
} *lpage_info;
|
||||
unsigned long userspace_addr;
|
||||
int user_alloc;
|
||||
};
|
||||
@@ -169,6 +173,7 @@ int kvm_arch_set_memory_region(struct kvm *kvm,
|
||||
int user_alloc);
|
||||
gfn_t unalias_gfn(struct kvm *kvm, gfn_t gfn);
|
||||
struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn);
|
||||
unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn);
|
||||
void kvm_release_page_clean(struct page *page);
|
||||
void kvm_release_page_dirty(struct page *page);
|
||||
int kvm_read_guest_page(struct kvm *kvm, gfn_t gfn, void *data, int offset,
|
||||
|
Reference in New Issue
Block a user