Commit 140754bc authored by Avi Kivity's avatar Avi Kivity

KVM: MMU: Convert direct maps to use the generic shadow walker

Signed-off-by: default avatarAvi Kivity <avi@qumranet.com>
parent 3d000db5
...@@ -1260,49 +1260,66 @@ static void nonpaging_new_cr3(struct kvm_vcpu *vcpu) ...@@ -1260,49 +1260,66 @@ static void nonpaging_new_cr3(struct kvm_vcpu *vcpu)
{ {
} }
static int __direct_map(struct kvm_vcpu *vcpu, gpa_t v, int write, struct direct_shadow_walk {
int largepage, gfn_t gfn, pfn_t pfn) struct kvm_shadow_walk walker;
{ pfn_t pfn;
hpa_t table_addr = vcpu->arch.mmu.root_hpa; int write;
int pt_write = 0; int largepage;
int level = vcpu->arch.mmu.shadow_root_level; int pt_write;
};
for (; ; level--) {
u32 index = PT64_INDEX(v, level);
u64 *table;
ASSERT(VALID_PAGE(table_addr)); static int direct_map_entry(struct kvm_shadow_walk *_walk,
table = __va(table_addr); struct kvm_vcpu *vcpu,
gva_t addr, u64 *sptep, int level)
{
struct direct_shadow_walk *walk =
container_of(_walk, struct direct_shadow_walk, walker);
struct kvm_mmu_page *sp;
gfn_t pseudo_gfn;
gfn_t gfn = addr >> PAGE_SHIFT;
if (level == PT_PAGE_TABLE_LEVEL
|| (walk->largepage && level == PT_DIRECTORY_LEVEL)) {
mmu_set_spte(vcpu, sptep, ACC_ALL, ACC_ALL,
0, walk->write, 1, &walk->pt_write,
walk->largepage, gfn, walk->pfn, false);
return 1;
}
if (level == 1 || (largepage && level == 2)) { if (*sptep == shadow_trap_nonpresent_pte) {
mmu_set_spte(vcpu, &table[index], ACC_ALL, ACC_ALL, pseudo_gfn = (addr & PT64_DIR_BASE_ADDR_MASK) >> PAGE_SHIFT;
0, write, 1, &pt_write, largepage, sp = kvm_mmu_get_page(vcpu, pseudo_gfn, addr, level - 1,
gfn, pfn, false); 1, ACC_ALL, sptep);
return pt_write; if (!sp) {
pgprintk("nonpaging_map: ENOMEM\n");
kvm_release_pfn_clean(walk->pfn);
return -ENOMEM;
} }
if (table[index] == shadow_trap_nonpresent_pte) { set_shadow_pte(sptep,
struct kvm_mmu_page *new_table; __pa(sp->spt)
gfn_t pseudo_gfn; | PT_PRESENT_MASK | PT_WRITABLE_MASK
| shadow_user_mask | shadow_x_mask);
pseudo_gfn = (v & PT64_DIR_BASE_ADDR_MASK)
>> PAGE_SHIFT;
new_table = kvm_mmu_get_page(vcpu, pseudo_gfn,
v, level - 1,
1, ACC_ALL, &table[index]);
if (!new_table) {
pgprintk("nonpaging_map: ENOMEM\n");
kvm_release_pfn_clean(pfn);
return -ENOMEM;
}
set_shadow_pte(&table[index],
__pa(new_table->spt)
| PT_PRESENT_MASK | PT_WRITABLE_MASK
| shadow_user_mask | shadow_x_mask);
}
table_addr = table[index] & PT64_BASE_ADDR_MASK;
} }
return 0;
}
static int __direct_map(struct kvm_vcpu *vcpu, gpa_t v, int write,
int largepage, gfn_t gfn, pfn_t pfn)
{
int r;
struct direct_shadow_walk walker = {
.walker = { .entry = direct_map_entry, },
.pfn = pfn,
.largepage = largepage,
.write = write,
.pt_write = 0,
};
r = walk_shadow(&walker.walker, vcpu, (gva_t)gfn << PAGE_SHIFT);
if (r < 0)
return r;
return walker.pt_write;
} }
static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, int write, gfn_t gfn) static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, int write, gfn_t gfn)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment