KVM: x86: Inject #GP with the right rip on efer writes
This patch fixes a bug in the KVM efer-msr write path. If a guest writes to a reserved efer bit the set_efer function injects the #GP directly. The architecture dependent wrmsr function does not see this, assumes success and advances the rip. This results in a #GP in the guest with the wrong rip. This patch fixes this by reporting efer write errors back to the architectural wrmsr function. Signed-off-by: Joerg Roedel <joerg.roedel@amd.com> Signed-off-by: Avi Kivity <avi@redhat.com>
This commit is contained in:
committed by
Avi Kivity
parent
0d945bd935
commit
b69e8caef5
@ -683,37 +683,29 @@ static u32 emulated_msrs[] = {
|
|||||||
MSR_IA32_MISC_ENABLE,
|
MSR_IA32_MISC_ENABLE,
|
||||||
};
|
};
|
||||||
|
|
||||||
static void set_efer(struct kvm_vcpu *vcpu, u64 efer)
|
static int set_efer(struct kvm_vcpu *vcpu, u64 efer)
|
||||||
{
|
{
|
||||||
if (efer & efer_reserved_bits) {
|
if (efer & efer_reserved_bits)
|
||||||
kvm_inject_gp(vcpu, 0);
|
return 1;
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (is_paging(vcpu)
|
if (is_paging(vcpu)
|
||||||
&& (vcpu->arch.efer & EFER_LME) != (efer & EFER_LME)) {
|
&& (vcpu->arch.efer & EFER_LME) != (efer & EFER_LME))
|
||||||
kvm_inject_gp(vcpu, 0);
|
return 1;
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (efer & EFER_FFXSR) {
|
if (efer & EFER_FFXSR) {
|
||||||
struct kvm_cpuid_entry2 *feat;
|
struct kvm_cpuid_entry2 *feat;
|
||||||
|
|
||||||
feat = kvm_find_cpuid_entry(vcpu, 0x80000001, 0);
|
feat = kvm_find_cpuid_entry(vcpu, 0x80000001, 0);
|
||||||
if (!feat || !(feat->edx & bit(X86_FEATURE_FXSR_OPT))) {
|
if (!feat || !(feat->edx & bit(X86_FEATURE_FXSR_OPT)))
|
||||||
kvm_inject_gp(vcpu, 0);
|
return 1;
|
||||||
return;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if (efer & EFER_SVME) {
|
if (efer & EFER_SVME) {
|
||||||
struct kvm_cpuid_entry2 *feat;
|
struct kvm_cpuid_entry2 *feat;
|
||||||
|
|
||||||
feat = kvm_find_cpuid_entry(vcpu, 0x80000001, 0);
|
feat = kvm_find_cpuid_entry(vcpu, 0x80000001, 0);
|
||||||
if (!feat || !(feat->ecx & bit(X86_FEATURE_SVM))) {
|
if (!feat || !(feat->ecx & bit(X86_FEATURE_SVM)))
|
||||||
kvm_inject_gp(vcpu, 0);
|
return 1;
|
||||||
return;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
kvm_x86_ops->set_efer(vcpu, efer);
|
kvm_x86_ops->set_efer(vcpu, efer);
|
||||||
@ -725,6 +717,8 @@ static void set_efer(struct kvm_vcpu *vcpu, u64 efer)
|
|||||||
|
|
||||||
vcpu->arch.mmu.base_role.nxe = (efer & EFER_NX) && !tdp_enabled;
|
vcpu->arch.mmu.base_role.nxe = (efer & EFER_NX) && !tdp_enabled;
|
||||||
kvm_mmu_reset_context(vcpu);
|
kvm_mmu_reset_context(vcpu);
|
||||||
|
|
||||||
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
void kvm_enable_efer_bits(u64 mask)
|
void kvm_enable_efer_bits(u64 mask)
|
||||||
@ -1153,8 +1147,7 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data)
|
|||||||
{
|
{
|
||||||
switch (msr) {
|
switch (msr) {
|
||||||
case MSR_EFER:
|
case MSR_EFER:
|
||||||
set_efer(vcpu, data);
|
return set_efer(vcpu, data);
|
||||||
break;
|
|
||||||
case MSR_K7_HWCR:
|
case MSR_K7_HWCR:
|
||||||
data &= ~(u64)0x40; /* ignore flush filter disable */
|
data &= ~(u64)0x40; /* ignore flush filter disable */
|
||||||
data &= ~(u64)0x100; /* ignore ignne emulation enable */
|
data &= ~(u64)0x100; /* ignore ignne emulation enable */
|
||||||
|
Reference in New Issue
Block a user