KVM: MMU: Reinstate pte prefetch on invlpg
Commit fb341f57
removed the pte prefetch on guest invlpg, citing guest races.
However, the SDM is adamant that prefetch is allowed:
"The processor may create entries in paging-structure caches for
translations required for prefetches and for accesses that are a
result of speculative execution that would never actually occur
in the executed code path."
And, in fact, there was a race in the prefetch code: we picked up the pte
without the mmu lock held, so an older invlpg could install the pte over
a newer invlpg.
Reinstate the prefetch logic, but this time note whether another invlpg has
executed using a counter. If a race occured, do not install the pte.
Signed-off-by: Avi Kivity <avi@redhat.com>
Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>
This commit is contained in:
@@ -2613,9 +2613,30 @@ void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
|
||||
int flooded = 0;
|
||||
int npte;
|
||||
int r;
|
||||
int invlpg_counter;
|
||||
|
||||
pgprintk("%s: gpa %llx bytes %d\n", __func__, gpa, bytes);
|
||||
|
||||
invlpg_counter = atomic_read(&vcpu->kvm->arch.invlpg_counter);
|
||||
|
||||
/*
|
||||
* Assume that the pte write on a page table of the same type
|
||||
* as the current vcpu paging mode. This is nearly always true
|
||||
* (might be false while changing modes). Note it is verified later
|
||||
* by update_pte().
|
||||
*/
|
||||
if ((is_pae(vcpu) && bytes == 4) || !new) {
|
||||
/* Handle a 32-bit guest writing two halves of a 64-bit gpte */
|
||||
if (is_pae(vcpu)) {
|
||||
gpa &= ~(gpa_t)7;
|
||||
bytes = 8;
|
||||
}
|
||||
r = kvm_read_guest(vcpu->kvm, gpa, &gentry, min(bytes, 8));
|
||||
if (r)
|
||||
gentry = 0;
|
||||
new = (const u8 *)&gentry;
|
||||
}
|
||||
|
||||
switch (bytes) {
|
||||
case 4:
|
||||
gentry = *(const u32 *)new;
|
||||
@@ -2628,22 +2649,10 @@ void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
|
||||
break;
|
||||
}
|
||||
|
||||
/*
|
||||
* Assume that the pte write on a page table of the same type
|
||||
* as the current vcpu paging mode. This is nearly always true
|
||||
* (might be false while changing modes). Note it is verified later
|
||||
* by update_pte().
|
||||
*/
|
||||
if (is_pae(vcpu) && bytes == 4) {
|
||||
/* Handle a 32-bit guest writing two halves of a 64-bit gpte */
|
||||
gpa &= ~(gpa_t)7;
|
||||
r = kvm_read_guest(vcpu->kvm, gpa, &gentry, 8);
|
||||
if (r)
|
||||
gentry = 0;
|
||||
}
|
||||
|
||||
mmu_guess_page_from_pte_write(vcpu, gpa, gentry);
|
||||
spin_lock(&vcpu->kvm->mmu_lock);
|
||||
if (atomic_read(&vcpu->kvm->arch.invlpg_counter) != invlpg_counter)
|
||||
gentry = 0;
|
||||
kvm_mmu_access_page(vcpu, gfn);
|
||||
kvm_mmu_free_some_pages(vcpu);
|
||||
++vcpu->kvm->stat.mmu_pte_write;
|
||||
|
Reference in New Issue
Block a user