Commit 9f652d21 authored by Avi Kivity's avatar Avi Kivity

KVM: MMU: Use for_each_shadow_entry() in __direct_map()

Eliminating a callback and a useless structure.
Signed-off-by: default avatarAvi Kivity <avi@redhat.com>
parent 2d11123a
...@@ -1846,67 +1846,42 @@ static void nonpaging_new_cr3(struct kvm_vcpu *vcpu) ...@@ -1846,67 +1846,42 @@ static void nonpaging_new_cr3(struct kvm_vcpu *vcpu)
{ {
} }
struct direct_shadow_walk { static int __direct_map(struct kvm_vcpu *vcpu, gpa_t v, int write,
struct kvm_shadow_walk walker; int largepage, gfn_t gfn, pfn_t pfn)
pfn_t pfn;
int write;
int largepage;
int pt_write;
};
static int direct_map_entry(struct kvm_shadow_walk *_walk,
struct kvm_vcpu *vcpu,
u64 addr, u64 *sptep, int level)
{ {
struct direct_shadow_walk *walk = struct kvm_shadow_walk_iterator iterator;
container_of(_walk, struct direct_shadow_walk, walker);
struct kvm_mmu_page *sp; struct kvm_mmu_page *sp;
int pt_write = 0;
gfn_t pseudo_gfn; gfn_t pseudo_gfn;
gfn_t gfn = addr >> PAGE_SHIFT;
if (level == PT_PAGE_TABLE_LEVEL
|| (walk->largepage && level == PT_DIRECTORY_LEVEL)) {
mmu_set_spte(vcpu, sptep, ACC_ALL, ACC_ALL,
0, walk->write, 1, &walk->pt_write,
walk->largepage, 0, gfn, walk->pfn, false);
++vcpu->stat.pf_fixed;
return 1;
}
if (*sptep == shadow_trap_nonpresent_pte) { for_each_shadow_entry(vcpu, (u64)gfn << PAGE_SHIFT, iterator) {
pseudo_gfn = (addr & PT64_DIR_BASE_ADDR_MASK) >> PAGE_SHIFT; if (iterator.level == PT_PAGE_TABLE_LEVEL
sp = kvm_mmu_get_page(vcpu, pseudo_gfn, (gva_t)addr, level - 1, || (largepage && iterator.level == PT_DIRECTORY_LEVEL)) {
1, ACC_ALL, sptep); mmu_set_spte(vcpu, iterator.sptep, ACC_ALL, ACC_ALL,
if (!sp) { 0, write, 1, &pt_write,
pgprintk("nonpaging_map: ENOMEM\n"); largepage, 0, gfn, pfn, false);
kvm_release_pfn_clean(walk->pfn); ++vcpu->stat.pf_fixed;
return -ENOMEM; break;
} }
set_shadow_pte(sptep, if (*iterator.sptep == shadow_trap_nonpresent_pte) {
__pa(sp->spt) pseudo_gfn = (iterator.addr & PT64_DIR_BASE_ADDR_MASK) >> PAGE_SHIFT;
| PT_PRESENT_MASK | PT_WRITABLE_MASK sp = kvm_mmu_get_page(vcpu, pseudo_gfn, iterator.addr,
| shadow_user_mask | shadow_x_mask); iterator.level - 1,
} 1, ACC_ALL, iterator.sptep);
return 0; if (!sp) {
} pgprintk("nonpaging_map: ENOMEM\n");
kvm_release_pfn_clean(pfn);
return -ENOMEM;
}
static int __direct_map(struct kvm_vcpu *vcpu, gpa_t v, int write, set_shadow_pte(iterator.sptep,
int largepage, gfn_t gfn, pfn_t pfn) __pa(sp->spt)
{ | PT_PRESENT_MASK | PT_WRITABLE_MASK
int r; | shadow_user_mask | shadow_x_mask);
struct direct_shadow_walk walker = { }
.walker = { .entry = direct_map_entry, }, }
.pfn = pfn, return pt_write;
.largepage = largepage,
.write = write,
.pt_write = 0,
};
r = walk_shadow(&walker.walker, vcpu, gfn << PAGE_SHIFT);
if (r < 0)
return r;
return walker.pt_write;
} }
static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, int write, gfn_t gfn) static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, int write, gfn_t gfn)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment