Commit 67369273 authored by Sean Christopherson's avatar Sean Christopherson Committed by Paolo Bonzini

KVM: x86: Use KVM_BUG/KVM_BUG_ON to handle bugs that are fatal to the VM

Signed-off-by: default avatarSean Christopherson <sean.j.christopherson@intel.com>
Signed-off-by: default avatarIsaku Yamahata <isaku.yamahata@intel.com>
Reviewed-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
Message-Id: <0e8760a26151f47dc47052b25ca8b84fffe0641e.1625186503.git.isaku.yamahata@intel.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent 7ee3e8c3
...@@ -1560,7 +1560,7 @@ static void svm_cache_reg(struct kvm_vcpu *vcpu, enum kvm_reg reg) ...@@ -1560,7 +1560,7 @@ static void svm_cache_reg(struct kvm_vcpu *vcpu, enum kvm_reg reg)
load_pdptrs(vcpu, vcpu->arch.walk_mmu, kvm_read_cr3(vcpu)); load_pdptrs(vcpu, vcpu->arch.walk_mmu, kvm_read_cr3(vcpu));
break; break;
default: default:
WARN_ON_ONCE(1); KVM_BUG_ON(1, vcpu->kvm);
} }
} }
......
...@@ -2274,7 +2274,7 @@ static void vmx_cache_reg(struct kvm_vcpu *vcpu, enum kvm_reg reg) ...@@ -2274,7 +2274,7 @@ static void vmx_cache_reg(struct kvm_vcpu *vcpu, enum kvm_reg reg)
vcpu->arch.cr4 |= vmcs_readl(GUEST_CR4) & guest_owned_bits; vcpu->arch.cr4 |= vmcs_readl(GUEST_CR4) & guest_owned_bits;
break; break;
default: default:
WARN_ON_ONCE(1); KVM_BUG_ON(1, vcpu->kvm);
break; break;
} }
} }
...@@ -4996,6 +4996,7 @@ static int handle_cr(struct kvm_vcpu *vcpu) ...@@ -4996,6 +4996,7 @@ static int handle_cr(struct kvm_vcpu *vcpu)
return kvm_complete_insn_gp(vcpu, err); return kvm_complete_insn_gp(vcpu, err);
case 3: case 3:
WARN_ON_ONCE(enable_unrestricted_guest); WARN_ON_ONCE(enable_unrestricted_guest);
err = kvm_set_cr3(vcpu, val); err = kvm_set_cr3(vcpu, val);
return kvm_complete_insn_gp(vcpu, err); return kvm_complete_insn_gp(vcpu, err);
case 4: case 4:
...@@ -5021,14 +5022,13 @@ static int handle_cr(struct kvm_vcpu *vcpu) ...@@ -5021,14 +5022,13 @@ static int handle_cr(struct kvm_vcpu *vcpu)
} }
break; break;
case 2: /* clts */ case 2: /* clts */
WARN_ONCE(1, "Guest should always own CR0.TS"); KVM_BUG(1, vcpu->kvm, "Guest always owns CR0.TS");
vmx_set_cr0(vcpu, kvm_read_cr0_bits(vcpu, ~X86_CR0_TS)); return -EIO;
trace_kvm_cr_write(0, kvm_read_cr0(vcpu));
return kvm_skip_emulated_instruction(vcpu);
case 1: /*mov from cr*/ case 1: /*mov from cr*/
switch (cr) { switch (cr) {
case 3: case 3:
WARN_ON_ONCE(enable_unrestricted_guest); WARN_ON_ONCE(enable_unrestricted_guest);
val = kvm_read_cr3(vcpu); val = kvm_read_cr3(vcpu);
kvm_register_write(vcpu, reg, val); kvm_register_write(vcpu, reg, val);
trace_kvm_cr_read(cr, val); trace_kvm_cr_read(cr, val);
...@@ -5338,7 +5338,9 @@ static int handle_ept_misconfig(struct kvm_vcpu *vcpu) ...@@ -5338,7 +5338,9 @@ static int handle_ept_misconfig(struct kvm_vcpu *vcpu)
static int handle_nmi_window(struct kvm_vcpu *vcpu) static int handle_nmi_window(struct kvm_vcpu *vcpu)
{ {
WARN_ON_ONCE(!enable_vnmi); if (KVM_BUG_ON(!enable_vnmi, vcpu->kvm))
return -EIO;
exec_controls_clearbit(to_vmx(vcpu), CPU_BASED_NMI_WINDOW_EXITING); exec_controls_clearbit(to_vmx(vcpu), CPU_BASED_NMI_WINDOW_EXITING);
++vcpu->stat.nmi_window_exits; ++vcpu->stat.nmi_window_exits;
kvm_make_request(KVM_REQ_EVENT, vcpu); kvm_make_request(KVM_REQ_EVENT, vcpu);
...@@ -5896,7 +5898,8 @@ static int __vmx_handle_exit(struct kvm_vcpu *vcpu, fastpath_t exit_fastpath) ...@@ -5896,7 +5898,8 @@ static int __vmx_handle_exit(struct kvm_vcpu *vcpu, fastpath_t exit_fastpath)
* below) should never happen as that means we incorrectly allowed a * below) should never happen as that means we incorrectly allowed a
* nested VM-Enter with an invalid vmcs12. * nested VM-Enter with an invalid vmcs12.
*/ */
WARN_ON_ONCE(vmx->nested.nested_run_pending); if (KVM_BUG_ON(vmx->nested.nested_run_pending, vcpu->kvm))
return -EIO;
/* If guest state is invalid, start emulating */ /* If guest state is invalid, start emulating */
if (vmx->emulation_required) if (vmx->emulation_required)
...@@ -6274,7 +6277,9 @@ static int vmx_sync_pir_to_irr(struct kvm_vcpu *vcpu) ...@@ -6274,7 +6277,9 @@ static int vmx_sync_pir_to_irr(struct kvm_vcpu *vcpu)
int max_irr; int max_irr;
bool max_irr_updated; bool max_irr_updated;
WARN_ON(!vcpu->arch.apicv_active); if (KVM_BUG_ON(!vcpu->arch.apicv_active, vcpu->kvm))
return -EIO;
if (pi_test_on(&vmx->pi_desc)) { if (pi_test_on(&vmx->pi_desc)) {
pi_clear_on(&vmx->pi_desc); pi_clear_on(&vmx->pi_desc);
/* /*
...@@ -6357,7 +6362,7 @@ static void handle_external_interrupt_irqoff(struct kvm_vcpu *vcpu) ...@@ -6357,7 +6362,7 @@ static void handle_external_interrupt_irqoff(struct kvm_vcpu *vcpu)
unsigned int vector = intr_info & INTR_INFO_VECTOR_MASK; unsigned int vector = intr_info & INTR_INFO_VECTOR_MASK;
gate_desc *desc = (gate_desc *)host_idt_base + vector; gate_desc *desc = (gate_desc *)host_idt_base + vector;
if (WARN_ONCE(!is_external_intr(intr_info), if (KVM_BUG(!is_external_intr(intr_info), vcpu->kvm,
"KVM: unexpected VM-Exit interrupt info: 0x%x", intr_info)) "KVM: unexpected VM-Exit interrupt info: 0x%x", intr_info))
return; return;
......
...@@ -9395,6 +9395,10 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu) ...@@ -9395,6 +9395,10 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
} }
if (kvm_request_pending(vcpu)) { if (kvm_request_pending(vcpu)) {
if (kvm_check_request(KVM_REQ_VM_BUGGED, vcpu)) {
r = -EIO;
goto out;
}
if (kvm_check_request(KVM_REQ_GET_NESTED_STATE_PAGES, vcpu)) { if (kvm_check_request(KVM_REQ_GET_NESTED_STATE_PAGES, vcpu)) {
if (unlikely(!kvm_x86_ops.nested_ops->get_nested_state_pages(vcpu))) { if (unlikely(!kvm_x86_ops.nested_ops->get_nested_state_pages(vcpu))) {
r = 0; r = 0;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment