KVM: MMU: Use for_each_shadow_entry() in __direct_map()
Eliminating a callback and a useless structure. Signed-off-by: Avi Kivity <avi@redhat.com>
This commit is contained in:
@@ -1846,67 +1846,42 @@ static void nonpaging_new_cr3(struct kvm_vcpu *vcpu)
|
|||||||
{
|
{
|
||||||
}
|
}
|
||||||
|
|
||||||
struct direct_shadow_walk {
|
|
||||||
struct kvm_shadow_walk walker;
|
|
||||||
pfn_t pfn;
|
|
||||||
int write;
|
|
||||||
int largepage;
|
|
||||||
int pt_write;
|
|
||||||
};
|
|
||||||
|
|
||||||
static int direct_map_entry(struct kvm_shadow_walk *_walk,
|
|
||||||
struct kvm_vcpu *vcpu,
|
|
||||||
u64 addr, u64 *sptep, int level)
|
|
||||||
{
|
|
||||||
struct direct_shadow_walk *walk =
|
|
||||||
container_of(_walk, struct direct_shadow_walk, walker);
|
|
||||||
struct kvm_mmu_page *sp;
|
|
||||||
gfn_t pseudo_gfn;
|
|
||||||
gfn_t gfn = addr >> PAGE_SHIFT;
|
|
||||||
|
|
||||||
if (level == PT_PAGE_TABLE_LEVEL
|
|
||||||
|| (walk->largepage && level == PT_DIRECTORY_LEVEL)) {
|
|
||||||
mmu_set_spte(vcpu, sptep, ACC_ALL, ACC_ALL,
|
|
||||||
0, walk->write, 1, &walk->pt_write,
|
|
||||||
walk->largepage, 0, gfn, walk->pfn, false);
|
|
||||||
++vcpu->stat.pf_fixed;
|
|
||||||
return 1;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (*sptep == shadow_trap_nonpresent_pte) {
|
|
||||||
pseudo_gfn = (addr & PT64_DIR_BASE_ADDR_MASK) >> PAGE_SHIFT;
|
|
||||||
sp = kvm_mmu_get_page(vcpu, pseudo_gfn, (gva_t)addr, level - 1,
|
|
||||||
1, ACC_ALL, sptep);
|
|
||||||
if (!sp) {
|
|
||||||
pgprintk("nonpaging_map: ENOMEM\n");
|
|
||||||
kvm_release_pfn_clean(walk->pfn);
|
|
||||||
return -ENOMEM;
|
|
||||||
}
|
|
||||||
|
|
||||||
set_shadow_pte(sptep,
|
|
||||||
__pa(sp->spt)
|
|
||||||
| PT_PRESENT_MASK | PT_WRITABLE_MASK
|
|
||||||
| shadow_user_mask | shadow_x_mask);
|
|
||||||
}
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
static int __direct_map(struct kvm_vcpu *vcpu, gpa_t v, int write,
|
static int __direct_map(struct kvm_vcpu *vcpu, gpa_t v, int write,
|
||||||
int largepage, gfn_t gfn, pfn_t pfn)
|
int largepage, gfn_t gfn, pfn_t pfn)
|
||||||
{
|
{
|
||||||
int r;
|
struct kvm_shadow_walk_iterator iterator;
|
||||||
struct direct_shadow_walk walker = {
|
struct kvm_mmu_page *sp;
|
||||||
.walker = { .entry = direct_map_entry, },
|
int pt_write = 0;
|
||||||
.pfn = pfn,
|
gfn_t pseudo_gfn;
|
||||||
.largepage = largepage,
|
|
||||||
.write = write,
|
|
||||||
.pt_write = 0,
|
|
||||||
};
|
|
||||||
|
|
||||||
r = walk_shadow(&walker.walker, vcpu, gfn << PAGE_SHIFT);
|
for_each_shadow_entry(vcpu, (u64)gfn << PAGE_SHIFT, iterator) {
|
||||||
if (r < 0)
|
if (iterator.level == PT_PAGE_TABLE_LEVEL
|
||||||
return r;
|
|| (largepage && iterator.level == PT_DIRECTORY_LEVEL)) {
|
||||||
return walker.pt_write;
|
mmu_set_spte(vcpu, iterator.sptep, ACC_ALL, ACC_ALL,
|
||||||
|
0, write, 1, &pt_write,
|
||||||
|
largepage, 0, gfn, pfn, false);
|
||||||
|
++vcpu->stat.pf_fixed;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (*iterator.sptep == shadow_trap_nonpresent_pte) {
|
||||||
|
pseudo_gfn = (iterator.addr & PT64_DIR_BASE_ADDR_MASK) >> PAGE_SHIFT;
|
||||||
|
sp = kvm_mmu_get_page(vcpu, pseudo_gfn, iterator.addr,
|
||||||
|
iterator.level - 1,
|
||||||
|
1, ACC_ALL, iterator.sptep);
|
||||||
|
if (!sp) {
|
||||||
|
pgprintk("nonpaging_map: ENOMEM\n");
|
||||||
|
kvm_release_pfn_clean(pfn);
|
||||||
|
return -ENOMEM;
|
||||||
|
}
|
||||||
|
|
||||||
|
set_shadow_pte(iterator.sptep,
|
||||||
|
__pa(sp->spt)
|
||||||
|
| PT_PRESENT_MASK | PT_WRITABLE_MASK
|
||||||
|
| shadow_user_mask | shadow_x_mask);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return pt_write;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, int write, gfn_t gfn)
|
static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, int write, gfn_t gfn)
|
||||||
|
Reference in New Issue
Block a user