KVM: VMX: conditionally disable 2M pages
Disable usage of 2M pages if VMX_EPT_2MB_PAGE_BIT (bit 16) is clear in MSR_IA32_VMX_EPT_VPID_CAP and EPT is enabled. [avi: s/largepages_disabled/largepages_enabled/ to avoid negative logic] Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com> Signed-off-by: Avi Kivity <avi@redhat.com>
This commit is contained in:
committed by
Avi Kivity
parent
68f89400bc
commit
54dee9933e
@@ -1381,6 +1381,9 @@ static __init int hardware_setup(void)
|
|||||||
if (!cpu_has_vmx_tpr_shadow())
|
if (!cpu_has_vmx_tpr_shadow())
|
||||||
kvm_x86_ops->update_cr8_intercept = NULL;
|
kvm_x86_ops->update_cr8_intercept = NULL;
|
||||||
|
|
||||||
|
if (enable_ept && !cpu_has_vmx_ept_2m_page())
|
||||||
|
kvm_disable_largepages();
|
||||||
|
|
||||||
return alloc_kvm_area();
|
return alloc_kvm_area();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@@ -224,6 +224,7 @@ int kvm_arch_set_memory_region(struct kvm *kvm,
|
|||||||
struct kvm_userspace_memory_region *mem,
|
struct kvm_userspace_memory_region *mem,
|
||||||
struct kvm_memory_slot old,
|
struct kvm_memory_slot old,
|
||||||
int user_alloc);
|
int user_alloc);
|
||||||
|
void kvm_disable_largepages(void);
|
||||||
void kvm_arch_flush_shadow(struct kvm *kvm);
|
void kvm_arch_flush_shadow(struct kvm *kvm);
|
||||||
gfn_t unalias_gfn(struct kvm *kvm, gfn_t gfn);
|
gfn_t unalias_gfn(struct kvm *kvm, gfn_t gfn);
|
||||||
struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn);
|
struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn);
|
||||||
|
@@ -85,6 +85,8 @@ static long kvm_vcpu_ioctl(struct file *file, unsigned int ioctl,
|
|||||||
|
|
||||||
static bool kvm_rebooting;
|
static bool kvm_rebooting;
|
||||||
|
|
||||||
|
static bool largepages_enabled = true;
|
||||||
|
|
||||||
#ifdef KVM_CAP_DEVICE_ASSIGNMENT
|
#ifdef KVM_CAP_DEVICE_ASSIGNMENT
|
||||||
static struct kvm_assigned_dev_kernel *kvm_find_assigned_dev(struct list_head *head,
|
static struct kvm_assigned_dev_kernel *kvm_find_assigned_dev(struct list_head *head,
|
||||||
int assigned_dev_id)
|
int assigned_dev_id)
|
||||||
@@ -1174,9 +1176,11 @@ int __kvm_set_memory_region(struct kvm *kvm,
|
|||||||
ugfn = new.userspace_addr >> PAGE_SHIFT;
|
ugfn = new.userspace_addr >> PAGE_SHIFT;
|
||||||
/*
|
/*
|
||||||
* If the gfn and userspace address are not aligned wrt each
|
* If the gfn and userspace address are not aligned wrt each
|
||||||
* other, disable large page support for this slot
|
* other, or if explicitly asked to, disable large page
|
||||||
|
* support for this slot
|
||||||
*/
|
*/
|
||||||
if ((base_gfn ^ ugfn) & (KVM_PAGES_PER_HPAGE - 1))
|
if ((base_gfn ^ ugfn) & (KVM_PAGES_PER_HPAGE - 1) ||
|
||||||
|
!largepages_enabled)
|
||||||
for (i = 0; i < largepages; ++i)
|
for (i = 0; i < largepages; ++i)
|
||||||
new.lpage_info[i].write_count = 1;
|
new.lpage_info[i].write_count = 1;
|
||||||
}
|
}
|
||||||
@@ -1291,6 +1295,12 @@ out:
|
|||||||
return r;
|
return r;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void kvm_disable_largepages(void)
|
||||||
|
{
|
||||||
|
largepages_enabled = false;
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL_GPL(kvm_disable_largepages);
|
||||||
|
|
||||||
int is_error_page(struct page *page)
|
int is_error_page(struct page *page)
|
||||||
{
|
{
|
||||||
return page == bad_page;
|
return page == bad_page;
|
||||||
|
Reference in New Issue
Block a user