Commit f10c729f authored by Jim Mattson's avatar Jim Mattson Committed by Paolo Bonzini

kvm: vmx: Defer setting of DR6 until #DB delivery

When exception payloads are enabled by userspace (which is not yet
possible) and a #DB is raised in L2, defer the setting of DR6 until
later. Under VMX, this allows the L1 hypervisor to intercept the fault
before DR6 is modified. Under SVM, DR6 is modified before L1 can
intercept the fault (as has always been the case with DR7).

Note that the payload associated with a #DB exception includes only
the "new DR6 bits." When the payload is delievered, DR6.B0-B3 will be
cleared and DR6.RTM will be set prior to merging in the new DR6 bits.

Also note that bit 16 in the "new DR6 bits" is set to indicate that a
debug exception (#DB) or a breakpoint exception (#BP) occurred inside
an RTM region while advanced debugging of RTM transactional regions
was enabled. Though the reverse of DR6.RTM, this makes the #DB payload
field compatible with both the pending debug exceptions field under
VMX and the exit qualification for #DB exceptions under VMX.
Reported-by: default avatarJim Mattson <jmattson@google.com>
Suggested-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
Signed-off-by: default avatarJim Mattson <jmattson@google.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent da998b46
...@@ -3305,22 +3305,18 @@ static int nested_vmx_check_exception(struct kvm_vcpu *vcpu, unsigned long *exit ...@@ -3305,22 +3305,18 @@ static int nested_vmx_check_exception(struct kvm_vcpu *vcpu, unsigned long *exit
*exit_qual = has_payload ? payload : vcpu->arch.cr2; *exit_qual = has_payload ? payload : vcpu->arch.cr2;
return 1; return 1;
} }
} else { } else if (vmcs12->exception_bitmap & (1u << nr)) {
/*
* FIXME: we must not write DR6 when L1 intercepts an
* L2 #DB exception.
*/
if (vmcs12->exception_bitmap & (1u << nr)) {
if (nr == DB_VECTOR) { if (nr == DB_VECTOR) {
*exit_qual = vcpu->arch.dr6; if (!has_payload) {
*exit_qual &= ~(DR6_FIXED_1 | DR6_BT); payload = vcpu->arch.dr6;
*exit_qual ^= DR6_RTM; payload &= ~(DR6_FIXED_1 | DR6_BT);
} else { payload ^= DR6_RTM;
*exit_qual = 0;
} }
*exit_qual = payload;
} else
*exit_qual = 0;
return 1; return 1;
} }
}
return 0; return 0;
} }
......
...@@ -410,6 +410,28 @@ void kvm_deliver_exception_payload(struct kvm_vcpu *vcpu) ...@@ -410,6 +410,28 @@ void kvm_deliver_exception_payload(struct kvm_vcpu *vcpu)
return; return;
switch (nr) { switch (nr) {
case DB_VECTOR:
/*
* "Certain debug exceptions may clear bit 0-3. The
* remaining contents of the DR6 register are never
* cleared by the processor".
*/
vcpu->arch.dr6 &= ~DR_TRAP_BITS;
/*
* DR6.RTM is set by all #DB exceptions that don't clear it.
*/
vcpu->arch.dr6 |= DR6_RTM;
vcpu->arch.dr6 |= payload;
/*
* Bit 16 should be set in the payload whenever the #DB
* exception should clear DR6.RTM. This makes the payload
* compatible with the pending debug exceptions under VMX.
* Though not currently documented in the SDM, this also
* makes the payload compatible with the exit qualification
* for #DB exceptions under VMX.
*/
vcpu->arch.dr6 ^= payload & DR6_RTM;
break;
case PF_VECTOR: case PF_VECTOR:
vcpu->arch.cr2 = payload; vcpu->arch.cr2 = payload;
break; break;
...@@ -464,11 +486,13 @@ static void kvm_multiple_exception(struct kvm_vcpu *vcpu, ...@@ -464,11 +486,13 @@ static void kvm_multiple_exception(struct kvm_vcpu *vcpu,
/* /*
* In guest mode, payload delivery should be deferred, * In guest mode, payload delivery should be deferred,
* so that the L1 hypervisor can intercept #PF before * so that the L1 hypervisor can intercept #PF before
* CR2 is modified. However, for ABI compatibility * CR2 is modified (or intercept #DB before DR6 is
* with KVM_GET_VCPU_EVENTS and KVM_SET_VCPU_EVENTS, * modified under nVMX). However, for ABI
* we can't delay payload delivery unless userspace * compatibility with KVM_GET_VCPU_EVENTS and
* has enabled this functionality via the per-VM * KVM_SET_VCPU_EVENTS, we can't delay payload
* capability, KVM_CAP_EXCEPTION_PAYLOAD. * delivery unless userspace has enabled this
* functionality via the per-VM capability,
* KVM_CAP_EXCEPTION_PAYLOAD.
*/ */
if (!vcpu->kvm->arch.exception_payload_enabled || if (!vcpu->kvm->arch.exception_payload_enabled ||
!is_guest_mode(vcpu)) !is_guest_mode(vcpu))
...@@ -518,6 +542,12 @@ void kvm_requeue_exception(struct kvm_vcpu *vcpu, unsigned nr) ...@@ -518,6 +542,12 @@ void kvm_requeue_exception(struct kvm_vcpu *vcpu, unsigned nr)
} }
EXPORT_SYMBOL_GPL(kvm_requeue_exception); EXPORT_SYMBOL_GPL(kvm_requeue_exception);
static void kvm_queue_exception_p(struct kvm_vcpu *vcpu, unsigned nr,
unsigned long payload)
{
kvm_multiple_exception(vcpu, nr, false, 0, true, payload, false);
}
static void kvm_queue_exception_e_p(struct kvm_vcpu *vcpu, unsigned nr, static void kvm_queue_exception_e_p(struct kvm_vcpu *vcpu, unsigned nr,
u32 error_code, unsigned long payload) u32 error_code, unsigned long payload)
{ {
...@@ -6156,14 +6186,7 @@ static void kvm_vcpu_do_singlestep(struct kvm_vcpu *vcpu, int *r) ...@@ -6156,14 +6186,7 @@ static void kvm_vcpu_do_singlestep(struct kvm_vcpu *vcpu, int *r)
kvm_run->exit_reason = KVM_EXIT_DEBUG; kvm_run->exit_reason = KVM_EXIT_DEBUG;
*r = EMULATE_USER_EXIT; *r = EMULATE_USER_EXIT;
} else { } else {
/* kvm_queue_exception_p(vcpu, DB_VECTOR, DR6_BS);
* "Certain debug exceptions may clear bit 0-3. The
* remaining contents of the DR6 register are never
* cleared by the processor".
*/
vcpu->arch.dr6 &= ~15;
vcpu->arch.dr6 |= DR6_BS | DR6_RTM;
kvm_queue_exception(vcpu, DB_VECTOR);
} }
} }
...@@ -7102,11 +7125,23 @@ static int inject_pending_event(struct kvm_vcpu *vcpu, bool req_int_win) ...@@ -7102,11 +7125,23 @@ static int inject_pending_event(struct kvm_vcpu *vcpu, bool req_int_win)
__kvm_set_rflags(vcpu, kvm_get_rflags(vcpu) | __kvm_set_rflags(vcpu, kvm_get_rflags(vcpu) |
X86_EFLAGS_RF); X86_EFLAGS_RF);
if (vcpu->arch.exception.nr == DB_VECTOR && if (vcpu->arch.exception.nr == DB_VECTOR) {
(vcpu->arch.dr7 & DR7_GD)) { /*
* This code assumes that nSVM doesn't use
* check_nested_events(). If it does, the
* DR6/DR7 changes should happen before L1
* gets a #VMEXIT for an intercepted #DB in
* L2. (Under VMX, on the other hand, the
* DR6/DR7 changes should not happen in the
* event of a VM-exit to L1 for an intercepted
* #DB in L2.)
*/
kvm_deliver_exception_payload(vcpu);
if (vcpu->arch.dr7 & DR7_GD) {
vcpu->arch.dr7 &= ~DR7_GD; vcpu->arch.dr7 &= ~DR7_GD;
kvm_update_dr7(vcpu); kvm_update_dr7(vcpu);
} }
}
kvm_x86_ops->queue_exception(vcpu); kvm_x86_ops->queue_exception(vcpu);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment