Commit 9f652d21 authored by Avi Kivity's avatar Avi Kivity

KVM: MMU: Use for_each_shadow_entry() in __direct_map()

Eliminating a callback and a useless structure.
Signed-off-by: default avatarAvi Kivity <avi@redhat.com>
parent 2d11123a
...@@ -1846,67 +1846,42 @@ static void nonpaging_new_cr3(struct kvm_vcpu *vcpu) ...@@ -1846,67 +1846,42 @@ static void nonpaging_new_cr3(struct kvm_vcpu *vcpu)
{ {
} }
struct direct_shadow_walk { static int __direct_map(struct kvm_vcpu *vcpu, gpa_t v, int write,
struct kvm_shadow_walk walker; int largepage, gfn_t gfn, pfn_t pfn)
pfn_t pfn;
int write;
int largepage;
int pt_write;
};
static int direct_map_entry(struct kvm_shadow_walk *_walk,
struct kvm_vcpu *vcpu,
u64 addr, u64 *sptep, int level)
{ {
struct direct_shadow_walk *walk = struct kvm_shadow_walk_iterator iterator;
container_of(_walk, struct direct_shadow_walk, walker);
struct kvm_mmu_page *sp; struct kvm_mmu_page *sp;
int pt_write = 0;
gfn_t pseudo_gfn; gfn_t pseudo_gfn;
gfn_t gfn = addr >> PAGE_SHIFT;
if (level == PT_PAGE_TABLE_LEVEL for_each_shadow_entry(vcpu, (u64)gfn << PAGE_SHIFT, iterator) {
|| (walk->largepage && level == PT_DIRECTORY_LEVEL)) { if (iterator.level == PT_PAGE_TABLE_LEVEL
mmu_set_spte(vcpu, sptep, ACC_ALL, ACC_ALL, || (largepage && iterator.level == PT_DIRECTORY_LEVEL)) {
0, walk->write, 1, &walk->pt_write, mmu_set_spte(vcpu, iterator.sptep, ACC_ALL, ACC_ALL,
walk->largepage, 0, gfn, walk->pfn, false); 0, write, 1, &pt_write,
largepage, 0, gfn, pfn, false);
++vcpu->stat.pf_fixed; ++vcpu->stat.pf_fixed;
return 1; break;
} }
if (*sptep == shadow_trap_nonpresent_pte) { if (*iterator.sptep == shadow_trap_nonpresent_pte) {
pseudo_gfn = (addr & PT64_DIR_BASE_ADDR_MASK) >> PAGE_SHIFT; pseudo_gfn = (iterator.addr & PT64_DIR_BASE_ADDR_MASK) >> PAGE_SHIFT;
sp = kvm_mmu_get_page(vcpu, pseudo_gfn, (gva_t)addr, level - 1, sp = kvm_mmu_get_page(vcpu, pseudo_gfn, iterator.addr,
1, ACC_ALL, sptep); iterator.level - 1,
1, ACC_ALL, iterator.sptep);
if (!sp) { if (!sp) {
pgprintk("nonpaging_map: ENOMEM\n"); pgprintk("nonpaging_map: ENOMEM\n");
kvm_release_pfn_clean(walk->pfn); kvm_release_pfn_clean(pfn);
return -ENOMEM; return -ENOMEM;
} }
set_shadow_pte(sptep, set_shadow_pte(iterator.sptep,
__pa(sp->spt) __pa(sp->spt)
| PT_PRESENT_MASK | PT_WRITABLE_MASK | PT_PRESENT_MASK | PT_WRITABLE_MASK
| shadow_user_mask | shadow_x_mask); | shadow_user_mask | shadow_x_mask);
} }
return 0; }
} return pt_write;
static int __direct_map(struct kvm_vcpu *vcpu, gpa_t v, int write,
int largepage, gfn_t gfn, pfn_t pfn)
{
int r;
struct direct_shadow_walk walker = {
.walker = { .entry = direct_map_entry, },
.pfn = pfn,
.largepage = largepage,
.write = write,
.pt_write = 0,
};
r = walk_shadow(&walker.walker, vcpu, gfn << PAGE_SHIFT);
if (r < 0)
return r;
return walker.pt_write;
} }
static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, int write, gfn_t gfn) static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, int write, gfn_t gfn)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment