Commit f7d9c7b7 authored by Avi Kivity's avatar Avi Kivity

KVM: MMU: Fix race when instantiating a shadow pte

For improved concurrency, the guest walk is performed concurrently with other
vcpus.  This means that we need to revalidate the guest ptes once we have
write-protected the guest page tables, at which point they can no longer be
modified.

The current code attempts to avoid this check if the shadow page table is not
new, on the assumption that if it has existed before, the guest could not have
modified the pte without the shadow lock.  However the assumption is incorrect,
as the racing vcpu could have modified the pte, then instantiated the shadow
page, before our vcpu regains control:

  vcpu0        vcpu1

  fault
  walk pte

               modify pte
               fault in same pagetable
               instantiate shadow page

  lookup shadow page
  conclude it is old
  instantiate spte based on stale guest pte

We could do something clever with generation counters, but a test run by
Marcelo suggests this is unnecessary and we can just do the revalidation
unconditionally.  The pte will be in the processor cache and the check can
be quite fast.
Signed-off-by: default avatarAvi Kivity <avi@qumranet.com>
parent 8c35f237
...@@ -681,8 +681,7 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu, ...@@ -681,8 +681,7 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu,
unsigned level, unsigned level,
int metaphysical, int metaphysical,
unsigned access, unsigned access,
u64 *parent_pte, u64 *parent_pte)
bool *new_page)
{ {
union kvm_mmu_page_role role; union kvm_mmu_page_role role;
unsigned index; unsigned index;
...@@ -722,8 +721,6 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu, ...@@ -722,8 +721,6 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu,
vcpu->arch.mmu.prefetch_page(vcpu, sp); vcpu->arch.mmu.prefetch_page(vcpu, sp);
if (!metaphysical) if (!metaphysical)
rmap_write_protect(vcpu->kvm, gfn); rmap_write_protect(vcpu->kvm, gfn);
if (new_page)
*new_page = 1;
return sp; return sp;
} }
...@@ -1006,8 +1003,7 @@ static int __nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, int write, ...@@ -1006,8 +1003,7 @@ static int __nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, int write,
>> PAGE_SHIFT; >> PAGE_SHIFT;
new_table = kvm_mmu_get_page(vcpu, pseudo_gfn, new_table = kvm_mmu_get_page(vcpu, pseudo_gfn,
v, level - 1, v, level - 1,
1, ACC_ALL, &table[index], 1, ACC_ALL, &table[index]);
NULL);
if (!new_table) { if (!new_table) {
pgprintk("nonpaging_map: ENOMEM\n"); pgprintk("nonpaging_map: ENOMEM\n");
kvm_release_page_clean(page); kvm_release_page_clean(page);
...@@ -1100,7 +1096,7 @@ static void mmu_alloc_roots(struct kvm_vcpu *vcpu) ...@@ -1100,7 +1096,7 @@ static void mmu_alloc_roots(struct kvm_vcpu *vcpu)
ASSERT(!VALID_PAGE(root)); ASSERT(!VALID_PAGE(root));
sp = kvm_mmu_get_page(vcpu, root_gfn, 0, sp = kvm_mmu_get_page(vcpu, root_gfn, 0,
PT64_ROOT_LEVEL, 0, ACC_ALL, NULL, NULL); PT64_ROOT_LEVEL, 0, ACC_ALL, NULL);
root = __pa(sp->spt); root = __pa(sp->spt);
++sp->root_count; ++sp->root_count;
vcpu->arch.mmu.root_hpa = root; vcpu->arch.mmu.root_hpa = root;
...@@ -1121,7 +1117,7 @@ static void mmu_alloc_roots(struct kvm_vcpu *vcpu) ...@@ -1121,7 +1117,7 @@ static void mmu_alloc_roots(struct kvm_vcpu *vcpu)
root_gfn = 0; root_gfn = 0;
sp = kvm_mmu_get_page(vcpu, root_gfn, i << 30, sp = kvm_mmu_get_page(vcpu, root_gfn, i << 30,
PT32_ROOT_LEVEL, !is_paging(vcpu), PT32_ROOT_LEVEL, !is_paging(vcpu),
ACC_ALL, NULL, NULL); ACC_ALL, NULL);
root = __pa(sp->spt); root = __pa(sp->spt);
++sp->root_count; ++sp->root_count;
vcpu->arch.mmu.pae_root[i] = root | PT_PRESENT_MASK; vcpu->arch.mmu.pae_root[i] = root | PT_PRESENT_MASK;
......
...@@ -300,7 +300,6 @@ static u64 *FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr, ...@@ -300,7 +300,6 @@ static u64 *FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr,
u64 shadow_pte; u64 shadow_pte;
int metaphysical; int metaphysical;
gfn_t table_gfn; gfn_t table_gfn;
bool new_page = 0;
shadow_ent = ((u64 *)__va(shadow_addr)) + index; shadow_ent = ((u64 *)__va(shadow_addr)) + index;
if (level == PT_PAGE_TABLE_LEVEL) if (level == PT_PAGE_TABLE_LEVEL)
...@@ -322,8 +321,8 @@ static u64 *FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr, ...@@ -322,8 +321,8 @@ static u64 *FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr,
} }
shadow_page = kvm_mmu_get_page(vcpu, table_gfn, addr, level-1, shadow_page = kvm_mmu_get_page(vcpu, table_gfn, addr, level-1,
metaphysical, access, metaphysical, access,
shadow_ent, &new_page); shadow_ent);
if (new_page && !metaphysical) { if (!metaphysical) {
int r; int r;
pt_element_t curr_pte; pt_element_t curr_pte;
r = kvm_read_guest_atomic(vcpu->kvm, r = kvm_read_guest_atomic(vcpu->kvm,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment