Commit 996ff542 authored by Paolo Bonzini's avatar Paolo Bonzini

KVM: x86: move kvm_inject_gp up from kvm_set_dr to callers

Push the injection of #GP up to the callers, so that they can just use
kvm_complete_insn_gp. __kvm_set_dr is pretty much what the callers can use
together with kvm_complete_insn_gp, so rename it to kvm_set_dr and drop
the old kvm_set_dr wrapper.

This also allows nested VMX code, which really wanted to use __kvm_set_dr,
to use the right function.

While at it, remove the kvm_require_dr() check from the SVM interception.
The APM states:

  All normal exception checks take precedence over the SVM intercepts.

which includes the CR4.DE=1 #UD.
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent 29d6ca41
...@@ -2617,6 +2617,7 @@ static int dr_interception(struct vcpu_svm *svm) ...@@ -2617,6 +2617,7 @@ static int dr_interception(struct vcpu_svm *svm)
{ {
int reg, dr; int reg, dr;
unsigned long val; unsigned long val;
int err = 0;
if (svm->vcpu.guest_debug == 0) { if (svm->vcpu.guest_debug == 0) {
/* /*
...@@ -2634,20 +2635,16 @@ static int dr_interception(struct vcpu_svm *svm) ...@@ -2634,20 +2635,16 @@ static int dr_interception(struct vcpu_svm *svm)
reg = svm->vmcb->control.exit_info_1 & SVM_EXITINFO_REG_MASK; reg = svm->vmcb->control.exit_info_1 & SVM_EXITINFO_REG_MASK;
dr = svm->vmcb->control.exit_code - SVM_EXIT_READ_DR0; dr = svm->vmcb->control.exit_code - SVM_EXIT_READ_DR0;
if (dr >= 16) { /* mov to DRn */ if (dr >= 16) { /* mov to DRn */
if (!kvm_require_dr(&svm->vcpu, dr - 16)) dr -= 16;
return 1;
val = kvm_register_read(&svm->vcpu, reg); val = kvm_register_read(&svm->vcpu, reg);
kvm_set_dr(&svm->vcpu, dr - 16, val); err = kvm_set_dr(&svm->vcpu, dr, val);
} else { } else {
if (!kvm_require_dr(&svm->vcpu, dr))
return 1;
kvm_get_dr(&svm->vcpu, dr, &val); kvm_get_dr(&svm->vcpu, dr, &val);
kvm_register_write(&svm->vcpu, reg, val); kvm_register_write(&svm->vcpu, reg, val);
} }
return kvm_skip_emulated_instruction(&svm->vcpu); return kvm_complete_insn_gp(&svm->vcpu, err);
} }
static int cr8_write_interception(struct vcpu_svm *svm) static int cr8_write_interception(struct vcpu_svm *svm)
......
...@@ -5095,6 +5095,7 @@ static int handle_dr(struct kvm_vcpu *vcpu) ...@@ -5095,6 +5095,7 @@ static int handle_dr(struct kvm_vcpu *vcpu)
{ {
unsigned long exit_qualification; unsigned long exit_qualification;
int dr, dr7, reg; int dr, dr7, reg;
int err = 1;
exit_qualification = vmx_get_exit_qual(vcpu); exit_qualification = vmx_get_exit_qual(vcpu);
dr = exit_qualification & DEBUG_REG_ACCESS_NUM; dr = exit_qualification & DEBUG_REG_ACCESS_NUM;
...@@ -5103,9 +5104,9 @@ static int handle_dr(struct kvm_vcpu *vcpu) ...@@ -5103,9 +5104,9 @@ static int handle_dr(struct kvm_vcpu *vcpu)
if (!kvm_require_dr(vcpu, dr)) if (!kvm_require_dr(vcpu, dr))
return 1; return 1;
/* Do not handle if the CPL > 0, will trigger GP on re-entry */ if (kvm_x86_ops.get_cpl(vcpu) > 0)
if (!kvm_require_cpl(vcpu, 0)) goto out;
return 1;
dr7 = vmcs_readl(GUEST_DR7); dr7 = vmcs_readl(GUEST_DR7);
if (dr7 & DR7_GD) { if (dr7 & DR7_GD) {
/* /*
...@@ -5144,11 +5145,13 @@ static int handle_dr(struct kvm_vcpu *vcpu) ...@@ -5144,11 +5145,13 @@ static int handle_dr(struct kvm_vcpu *vcpu)
kvm_get_dr(vcpu, dr, &val); kvm_get_dr(vcpu, dr, &val);
kvm_register_write(vcpu, reg, val); kvm_register_write(vcpu, reg, val);
} else err = 0;
if (kvm_set_dr(vcpu, dr, kvm_register_readl(vcpu, reg))) } else {
return 1; err = kvm_set_dr(vcpu, dr, kvm_register_readl(vcpu, reg));
}
return kvm_skip_emulated_instruction(vcpu); out:
return kvm_complete_insn_gp(vcpu, err);
} }
static void vmx_sync_dirty_debug_regs(struct kvm_vcpu *vcpu) static void vmx_sync_dirty_debug_regs(struct kvm_vcpu *vcpu)
......
...@@ -1143,7 +1143,7 @@ static u64 kvm_dr6_fixed(struct kvm_vcpu *vcpu) ...@@ -1143,7 +1143,7 @@ static u64 kvm_dr6_fixed(struct kvm_vcpu *vcpu)
return fixed; return fixed;
} }
static int __kvm_set_dr(struct kvm_vcpu *vcpu, int dr, unsigned long val) int kvm_set_dr(struct kvm_vcpu *vcpu, int dr, unsigned long val)
{ {
size_t size = ARRAY_SIZE(vcpu->arch.db); size_t size = ARRAY_SIZE(vcpu->arch.db);
...@@ -1156,13 +1156,13 @@ static int __kvm_set_dr(struct kvm_vcpu *vcpu, int dr, unsigned long val) ...@@ -1156,13 +1156,13 @@ static int __kvm_set_dr(struct kvm_vcpu *vcpu, int dr, unsigned long val)
case 4: case 4:
case 6: case 6:
if (!kvm_dr6_valid(val)) if (!kvm_dr6_valid(val))
return -1; /* #GP */ return 1; /* #GP */
vcpu->arch.dr6 = (val & DR6_VOLATILE) | kvm_dr6_fixed(vcpu); vcpu->arch.dr6 = (val & DR6_VOLATILE) | kvm_dr6_fixed(vcpu);
break; break;
case 5: case 5:
default: /* 7 */ default: /* 7 */
if (!kvm_dr7_valid(val)) if (!kvm_dr7_valid(val))
return -1; /* #GP */ return 1; /* #GP */
vcpu->arch.dr7 = (val & DR7_VOLATILE) | DR7_FIXED_1; vcpu->arch.dr7 = (val & DR7_VOLATILE) | DR7_FIXED_1;
kvm_update_dr7(vcpu); kvm_update_dr7(vcpu);
break; break;
...@@ -1170,15 +1170,6 @@ static int __kvm_set_dr(struct kvm_vcpu *vcpu, int dr, unsigned long val) ...@@ -1170,15 +1170,6 @@ static int __kvm_set_dr(struct kvm_vcpu *vcpu, int dr, unsigned long val)
return 0; return 0;
} }
int kvm_set_dr(struct kvm_vcpu *vcpu, int dr, unsigned long val)
{
if (__kvm_set_dr(vcpu, dr, val)) {
kvm_inject_gp(vcpu, 0);
return 1;
}
return 0;
}
EXPORT_SYMBOL_GPL(kvm_set_dr); EXPORT_SYMBOL_GPL(kvm_set_dr);
void kvm_get_dr(struct kvm_vcpu *vcpu, int dr, unsigned long *val) void kvm_get_dr(struct kvm_vcpu *vcpu, int dr, unsigned long *val)
...@@ -6619,7 +6610,7 @@ static int emulator_set_dr(struct x86_emulate_ctxt *ctxt, int dr, ...@@ -6619,7 +6610,7 @@ static int emulator_set_dr(struct x86_emulate_ctxt *ctxt, int dr,
unsigned long value) unsigned long value)
{ {
return __kvm_set_dr(emul_to_vcpu(ctxt), dr, value); return kvm_set_dr(emul_to_vcpu(ctxt), dr, value);
} }
static u64 mk_cr_64(u64 curr_cr, u32 new_val) static u64 mk_cr_64(u64 curr_cr, u32 new_val)
...@@ -8664,7 +8655,7 @@ static void enter_smm(struct kvm_vcpu *vcpu) ...@@ -8664,7 +8655,7 @@ static void enter_smm(struct kvm_vcpu *vcpu)
dt.address = dt.size = 0; dt.address = dt.size = 0;
static_call(kvm_x86_set_idt)(vcpu, &dt); static_call(kvm_x86_set_idt)(vcpu, &dt);
__kvm_set_dr(vcpu, 7, DR7_FIXED_1); kvm_set_dr(vcpu, 7, DR7_FIXED_1);
cs.selector = (vcpu->arch.smbase >> 4) & 0xffff; cs.selector = (vcpu->arch.smbase >> 4) & 0xffff;
cs.base = vcpu->arch.smbase; cs.base = vcpu->arch.smbase;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment