Commit 37f6a4e2 authored by Marcelo Tosatti's avatar Marcelo Tosatti Committed by Paolo Bonzini

KVM: x86: handle invalid root_hpa everywhere

Rom Freiman <rom@stratoscale.com> notes other code paths vulnerable to
bug fixed by 989c6b34.
Signed-off-by: default avatarMarcelo Tosatti <mtosatti@redhat.com>
parent ab53f22e
...@@ -2832,6 +2832,9 @@ static bool fast_page_fault(struct kvm_vcpu *vcpu, gva_t gva, int level, ...@@ -2832,6 +2832,9 @@ static bool fast_page_fault(struct kvm_vcpu *vcpu, gva_t gva, int level,
bool ret = false; bool ret = false;
u64 spte = 0ull; u64 spte = 0ull;
if (!VALID_PAGE(vcpu->arch.mmu.root_hpa))
return false;
if (!page_fault_can_be_fast(error_code)) if (!page_fault_can_be_fast(error_code))
return false; return false;
...@@ -3227,6 +3230,9 @@ static u64 walk_shadow_page_get_mmio_spte(struct kvm_vcpu *vcpu, u64 addr) ...@@ -3227,6 +3230,9 @@ static u64 walk_shadow_page_get_mmio_spte(struct kvm_vcpu *vcpu, u64 addr)
struct kvm_shadow_walk_iterator iterator; struct kvm_shadow_walk_iterator iterator;
u64 spte = 0ull; u64 spte = 0ull;
if (!VALID_PAGE(vcpu->arch.mmu.root_hpa))
return spte;
walk_shadow_page_lockless_begin(vcpu); walk_shadow_page_lockless_begin(vcpu);
for_each_shadow_entry_lockless(vcpu, addr, iterator, spte) for_each_shadow_entry_lockless(vcpu, addr, iterator, spte)
if (!is_shadow_present_pte(spte)) if (!is_shadow_present_pte(spte))
...@@ -4513,6 +4519,9 @@ int kvm_mmu_get_spte_hierarchy(struct kvm_vcpu *vcpu, u64 addr, u64 sptes[4]) ...@@ -4513,6 +4519,9 @@ int kvm_mmu_get_spte_hierarchy(struct kvm_vcpu *vcpu, u64 addr, u64 sptes[4])
u64 spte; u64 spte;
int nr_sptes = 0; int nr_sptes = 0;
if (!VALID_PAGE(vcpu->arch.mmu.root_hpa))
return nr_sptes;
walk_shadow_page_lockless_begin(vcpu); walk_shadow_page_lockless_begin(vcpu);
for_each_shadow_entry_lockless(vcpu, addr, iterator, spte) { for_each_shadow_entry_lockless(vcpu, addr, iterator, spte) {
sptes[iterator.level-1] = spte; sptes[iterator.level-1] = spte;
......
...@@ -569,6 +569,9 @@ static int FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr, ...@@ -569,6 +569,9 @@ static int FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr,
if (FNAME(gpte_changed)(vcpu, gw, top_level)) if (FNAME(gpte_changed)(vcpu, gw, top_level))
goto out_gpte_changed; goto out_gpte_changed;
if (!VALID_PAGE(vcpu->arch.mmu.root_hpa))
goto out_gpte_changed;
for (shadow_walk_init(&it, vcpu, addr); for (shadow_walk_init(&it, vcpu, addr);
shadow_walk_okay(&it) && it.level > gw->level; shadow_walk_okay(&it) && it.level > gw->level;
shadow_walk_next(&it)) { shadow_walk_next(&it)) {
...@@ -820,6 +823,11 @@ static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva) ...@@ -820,6 +823,11 @@ static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva)
*/ */
mmu_topup_memory_caches(vcpu); mmu_topup_memory_caches(vcpu);
if (!VALID_PAGE(vcpu->arch.mmu.root_hpa)) {
WARN_ON(1);
return;
}
spin_lock(&vcpu->kvm->mmu_lock); spin_lock(&vcpu->kvm->mmu_lock);
for_each_shadow_entry(vcpu, gva, iterator) { for_each_shadow_entry(vcpu, gva, iterator) {
level = iterator.level; level = iterator.level;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment