Commit 16f8a6f9 authored by Nadav Amit's avatar Nadav Amit Committed by Paolo Bonzini

KVM: vmx: Unavailable DR4/5 is checked before CPL

If DR4/5 is accessed when it is unavailable (since CR4.DE is set), then #UD
should be generated even if CPL>0. This is according to Intel SDM Table 6-2:
"Priority Among Simultaneous Exceptions and Interrupts".

Note, that this may happen on the first DR access, even if the host does not
sets debug breakpoints. Obviously, it occurs when the host debugs the guest.

This patch moves the DR4/5 checks from __kvm_set_dr/_kvm_get_dr to handle_dr.
The emulator already checks DR4/5 availability in check_dr_read. Nested
virutalization related calls to kvm_set_dr/kvm_get_dr would not like to inject
exceptions to the guest.

As for SVM, the patch follows the previous logic as much as possible. Anyhow,
it appears the DR interception code might be buggy - even if the DR access
may cause an exception, the instruction is skipped.
Signed-off-by: default avatarNadav Amit <namit@cs.technion.ac.il>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent c49c759f
...@@ -895,6 +895,7 @@ int kvm_read_guest_page_mmu(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, ...@@ -895,6 +895,7 @@ int kvm_read_guest_page_mmu(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
gfn_t gfn, void *data, int offset, int len, gfn_t gfn, void *data, int offset, int len,
u32 access); u32 access);
bool kvm_require_cpl(struct kvm_vcpu *vcpu, int required_cpl); bool kvm_require_cpl(struct kvm_vcpu *vcpu, int required_cpl);
bool kvm_require_dr(struct kvm_vcpu *vcpu, int dr);
static inline int __kvm_irq_line_state(unsigned long *irq_state, static inline int __kvm_irq_line_state(unsigned long *irq_state,
int irq_source_id, int level) int irq_source_id, int level)
......
...@@ -2999,7 +2999,6 @@ static int dr_interception(struct vcpu_svm *svm) ...@@ -2999,7 +2999,6 @@ static int dr_interception(struct vcpu_svm *svm)
{ {
int reg, dr; int reg, dr;
unsigned long val; unsigned long val;
int err;
if (svm->vcpu.guest_debug == 0) { if (svm->vcpu.guest_debug == 0) {
/* /*
...@@ -3019,12 +3018,15 @@ static int dr_interception(struct vcpu_svm *svm) ...@@ -3019,12 +3018,15 @@ static int dr_interception(struct vcpu_svm *svm)
dr = svm->vmcb->control.exit_code - SVM_EXIT_READ_DR0; dr = svm->vmcb->control.exit_code - SVM_EXIT_READ_DR0;
if (dr >= 16) { /* mov to DRn */ if (dr >= 16) { /* mov to DRn */
if (!kvm_require_dr(&svm->vcpu, dr - 16))
return 1;
val = kvm_register_read(&svm->vcpu, reg); val = kvm_register_read(&svm->vcpu, reg);
kvm_set_dr(&svm->vcpu, dr - 16, val); kvm_set_dr(&svm->vcpu, dr - 16, val);
} else { } else {
err = kvm_get_dr(&svm->vcpu, dr, &val); if (!kvm_require_dr(&svm->vcpu, dr))
if (!err) return 1;
kvm_register_write(&svm->vcpu, reg, val); kvm_get_dr(&svm->vcpu, dr, &val);
kvm_register_write(&svm->vcpu, reg, val);
} }
skip_emulated_instruction(&svm->vcpu); skip_emulated_instruction(&svm->vcpu);
......
...@@ -5163,13 +5163,20 @@ static int handle_cr(struct kvm_vcpu *vcpu) ...@@ -5163,13 +5163,20 @@ static int handle_cr(struct kvm_vcpu *vcpu)
static int handle_dr(struct kvm_vcpu *vcpu) static int handle_dr(struct kvm_vcpu *vcpu)
{ {
unsigned long exit_qualification; unsigned long exit_qualification;
int dr, reg; int dr, dr7, reg;
exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
dr = exit_qualification & DEBUG_REG_ACCESS_NUM;
/* First, if DR does not exist, trigger UD */
if (!kvm_require_dr(vcpu, dr))
return 1;
/* Do not handle if the CPL > 0, will trigger GP on re-entry */ /* Do not handle if the CPL > 0, will trigger GP on re-entry */
if (!kvm_require_cpl(vcpu, 0)) if (!kvm_require_cpl(vcpu, 0))
return 1; return 1;
dr = vmcs_readl(GUEST_DR7); dr7 = vmcs_readl(GUEST_DR7);
if (dr & DR7_GD) { if (dr7 & DR7_GD) {
/* /*
* As the vm-exit takes precedence over the debug trap, we * As the vm-exit takes precedence over the debug trap, we
* need to emulate the latter, either for the host or the * need to emulate the latter, either for the host or the
...@@ -5177,7 +5184,7 @@ static int handle_dr(struct kvm_vcpu *vcpu) ...@@ -5177,7 +5184,7 @@ static int handle_dr(struct kvm_vcpu *vcpu)
*/ */
if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP) { if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP) {
vcpu->run->debug.arch.dr6 = vcpu->arch.dr6; vcpu->run->debug.arch.dr6 = vcpu->arch.dr6;
vcpu->run->debug.arch.dr7 = dr; vcpu->run->debug.arch.dr7 = dr7;
vcpu->run->debug.arch.pc = vcpu->run->debug.arch.pc =
vmcs_readl(GUEST_CS_BASE) + vmcs_readl(GUEST_CS_BASE) +
vmcs_readl(GUEST_RIP); vmcs_readl(GUEST_RIP);
...@@ -5207,8 +5214,6 @@ static int handle_dr(struct kvm_vcpu *vcpu) ...@@ -5207,8 +5214,6 @@ static int handle_dr(struct kvm_vcpu *vcpu)
return 1; return 1;
} }
exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
dr = exit_qualification & DEBUG_REG_ACCESS_NUM;
reg = DEBUG_REG_ACCESS_REG(exit_qualification); reg = DEBUG_REG_ACCESS_REG(exit_qualification);
if (exit_qualification & TYPE_MOV_FROM_DR) { if (exit_qualification & TYPE_MOV_FROM_DR) {
unsigned long val; unsigned long val;
......
...@@ -455,6 +455,16 @@ bool kvm_require_cpl(struct kvm_vcpu *vcpu, int required_cpl) ...@@ -455,6 +455,16 @@ bool kvm_require_cpl(struct kvm_vcpu *vcpu, int required_cpl)
} }
EXPORT_SYMBOL_GPL(kvm_require_cpl); EXPORT_SYMBOL_GPL(kvm_require_cpl);
bool kvm_require_dr(struct kvm_vcpu *vcpu, int dr)
{
if ((dr != 4 && dr != 5) || !kvm_read_cr4_bits(vcpu, X86_CR4_DE))
return true;
kvm_queue_exception(vcpu, UD_VECTOR);
return false;
}
EXPORT_SYMBOL_GPL(kvm_require_dr);
/* /*
* This function will be used to read from the physical memory of the currently * This function will be used to read from the physical memory of the currently
* running guest. The difference to kvm_read_guest_page is that this function * running guest. The difference to kvm_read_guest_page is that this function
...@@ -811,8 +821,6 @@ static int __kvm_set_dr(struct kvm_vcpu *vcpu, int dr, unsigned long val) ...@@ -811,8 +821,6 @@ static int __kvm_set_dr(struct kvm_vcpu *vcpu, int dr, unsigned long val)
vcpu->arch.eff_db[dr] = val; vcpu->arch.eff_db[dr] = val;
break; break;
case 4: case 4:
if (kvm_read_cr4_bits(vcpu, X86_CR4_DE))
return 1; /* #UD */
/* fall through */ /* fall through */
case 6: case 6:
if (val & 0xffffffff00000000ULL) if (val & 0xffffffff00000000ULL)
...@@ -821,8 +829,6 @@ static int __kvm_set_dr(struct kvm_vcpu *vcpu, int dr, unsigned long val) ...@@ -821,8 +829,6 @@ static int __kvm_set_dr(struct kvm_vcpu *vcpu, int dr, unsigned long val)
kvm_update_dr6(vcpu); kvm_update_dr6(vcpu);
break; break;
case 5: case 5:
if (kvm_read_cr4_bits(vcpu, X86_CR4_DE))
return 1; /* #UD */
/* fall through */ /* fall through */
default: /* 7 */ default: /* 7 */
if (val & 0xffffffff00000000ULL) if (val & 0xffffffff00000000ULL)
...@@ -837,27 +843,21 @@ static int __kvm_set_dr(struct kvm_vcpu *vcpu, int dr, unsigned long val) ...@@ -837,27 +843,21 @@ static int __kvm_set_dr(struct kvm_vcpu *vcpu, int dr, unsigned long val)
int kvm_set_dr(struct kvm_vcpu *vcpu, int dr, unsigned long val) int kvm_set_dr(struct kvm_vcpu *vcpu, int dr, unsigned long val)
{ {
int res; if (__kvm_set_dr(vcpu, dr, val)) {
res = __kvm_set_dr(vcpu, dr, val);
if (res > 0)
kvm_queue_exception(vcpu, UD_VECTOR);
else if (res < 0)
kvm_inject_gp(vcpu, 0); kvm_inject_gp(vcpu, 0);
return 1;
return res; }
return 0;
} }
EXPORT_SYMBOL_GPL(kvm_set_dr); EXPORT_SYMBOL_GPL(kvm_set_dr);
static int _kvm_get_dr(struct kvm_vcpu *vcpu, int dr, unsigned long *val) int kvm_get_dr(struct kvm_vcpu *vcpu, int dr, unsigned long *val)
{ {
switch (dr) { switch (dr) {
case 0 ... 3: case 0 ... 3:
*val = vcpu->arch.db[dr]; *val = vcpu->arch.db[dr];
break; break;
case 4: case 4:
if (kvm_read_cr4_bits(vcpu, X86_CR4_DE))
return 1;
/* fall through */ /* fall through */
case 6: case 6:
if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP) if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP)
...@@ -866,23 +866,11 @@ static int _kvm_get_dr(struct kvm_vcpu *vcpu, int dr, unsigned long *val) ...@@ -866,23 +866,11 @@ static int _kvm_get_dr(struct kvm_vcpu *vcpu, int dr, unsigned long *val)
*val = kvm_x86_ops->get_dr6(vcpu); *val = kvm_x86_ops->get_dr6(vcpu);
break; break;
case 5: case 5:
if (kvm_read_cr4_bits(vcpu, X86_CR4_DE))
return 1;
/* fall through */ /* fall through */
default: /* 7 */ default: /* 7 */
*val = vcpu->arch.dr7; *val = vcpu->arch.dr7;
break; break;
} }
return 0;
}
int kvm_get_dr(struct kvm_vcpu *vcpu, int dr, unsigned long *val)
{
if (_kvm_get_dr(vcpu, dr, val)) {
kvm_queue_exception(vcpu, UD_VECTOR);
return 1;
}
return 0; return 0;
} }
EXPORT_SYMBOL_GPL(kvm_get_dr); EXPORT_SYMBOL_GPL(kvm_get_dr);
...@@ -3106,7 +3094,7 @@ static void kvm_vcpu_ioctl_x86_get_debugregs(struct kvm_vcpu *vcpu, ...@@ -3106,7 +3094,7 @@ static void kvm_vcpu_ioctl_x86_get_debugregs(struct kvm_vcpu *vcpu,
unsigned long val; unsigned long val;
memcpy(dbgregs->db, vcpu->arch.db, sizeof(vcpu->arch.db)); memcpy(dbgregs->db, vcpu->arch.db, sizeof(vcpu->arch.db));
_kvm_get_dr(vcpu, 6, &val); kvm_get_dr(vcpu, 6, &val);
dbgregs->dr6 = val; dbgregs->dr6 = val;
dbgregs->dr7 = vcpu->arch.dr7; dbgregs->dr7 = vcpu->arch.dr7;
dbgregs->flags = 0; dbgregs->flags = 0;
...@@ -4667,7 +4655,7 @@ static void emulator_wbinvd(struct x86_emulate_ctxt *ctxt) ...@@ -4667,7 +4655,7 @@ static void emulator_wbinvd(struct x86_emulate_ctxt *ctxt)
int emulator_get_dr(struct x86_emulate_ctxt *ctxt, int dr, unsigned long *dest) int emulator_get_dr(struct x86_emulate_ctxt *ctxt, int dr, unsigned long *dest)
{ {
return _kvm_get_dr(emul_to_vcpu(ctxt), dr, dest); return kvm_get_dr(emul_to_vcpu(ctxt), dr, dest);
} }
int emulator_set_dr(struct x86_emulate_ctxt *ctxt, int dr, unsigned long value) int emulator_set_dr(struct x86_emulate_ctxt *ctxt, int dr, unsigned long value)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment