Commit 59073aaf authored by Jim Mattson's avatar Jim Mattson Committed by Paolo Bonzini

kvm: x86: Add exception payload fields to kvm_vcpu_events

The per-VM capability KVM_CAP_EXCEPTION_PAYLOAD (to be introduced in a
later commit) adds the following fields to struct kvm_vcpu_events:
exception_has_payload, exception_payload, and exception.pending.

With this capability set, all of the details of vcpu->arch.exception,
including the payload for a pending exception, are reported to
userspace in response to KVM_GET_VCPU_EVENTS.

With this capability clear, the original ABI is preserved, and the
exception.injected field is set for either pending or injected
exceptions.

When userspace calls KVM_SET_VCPU_EVENTS with
KVM_CAP_EXCEPTION_PAYLOAD clear, exception.injected is no longer
translated to exception.pending. KVM_SET_VCPU_EVENTS can now only
establish a pending exception when KVM_CAP_EXCEPTION_PAYLOAD is set.
Reported-by: default avatarJim Mattson <jmattson@google.com>
Suggested-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
Signed-off-by: default avatarJim Mattson <jmattson@google.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent c851436a
...@@ -850,7 +850,7 @@ struct kvm_vcpu_events { ...@@ -850,7 +850,7 @@ struct kvm_vcpu_events {
__u8 injected; __u8 injected;
__u8 nr; __u8 nr;
__u8 has_error_code; __u8 has_error_code;
__u8 pad; __u8 pending;
__u32 error_code; __u32 error_code;
} exception; } exception;
struct { struct {
...@@ -873,16 +873,23 @@ struct kvm_vcpu_events { ...@@ -873,16 +873,23 @@ struct kvm_vcpu_events {
__u8 smm_inside_nmi; __u8 smm_inside_nmi;
__u8 latched_init; __u8 latched_init;
} smi; } smi;
__u32 reserved[9]; __u8 reserved[27];
__u8 exception_has_payload;
__u64 exception_payload;
}; };
Only two fields are defined in the flags field: The following bits are defined in the flags field:
- KVM_VCPUEVENT_VALID_SHADOW may be set in the flags field to signal that - KVM_VCPUEVENT_VALID_SHADOW may be set to signal that
interrupt.shadow contains a valid state. interrupt.shadow contains a valid state.
- KVM_VCPUEVENT_VALID_SMM may be set in the flags field to signal that - KVM_VCPUEVENT_VALID_SMM may be set to signal that smi contains a
smi contains a valid state. valid state.
- KVM_VCPUEVENT_VALID_PAYLOAD may be set to signal that the
exception_has_payload, exception_payload, and exception.pending
fields contain a valid state. This bit will be set whenever
KVM_CAP_EXCEPTION_PAYLOAD is enabled.
ARM/ARM64: ARM/ARM64:
...@@ -962,6 +969,11 @@ shall be written into the VCPU. ...@@ -962,6 +969,11 @@ shall be written into the VCPU.
KVM_VCPUEVENT_VALID_SMM can only be set if KVM_CAP_X86_SMM is available. KVM_VCPUEVENT_VALID_SMM can only be set if KVM_CAP_X86_SMM is available.
If KVM_CAP_EXCEPTION_PAYLOAD is enabled, KVM_VCPUEVENT_VALID_PAYLOAD
can be set in the flags field to signal that the
exception_has_payload, exception_payload, and exception.pending fields
contain a valid state and shall be written into the VCPU.
ARM/ARM64: ARM/ARM64:
Set the pending SError exception state for this VCPU. It is not possible to Set the pending SError exception state for this VCPU. It is not possible to
......
...@@ -919,6 +919,7 @@ struct kvm_arch { ...@@ -919,6 +919,7 @@ struct kvm_arch {
bool x2apic_broadcast_quirk_disabled; bool x2apic_broadcast_quirk_disabled;
bool guest_can_read_msr_platform_info; bool guest_can_read_msr_platform_info;
bool exception_payload_enabled;
}; };
struct kvm_vm_stat { struct kvm_vm_stat {
......
...@@ -288,6 +288,7 @@ struct kvm_reinject_control { ...@@ -288,6 +288,7 @@ struct kvm_reinject_control {
#define KVM_VCPUEVENT_VALID_SIPI_VECTOR 0x00000002 #define KVM_VCPUEVENT_VALID_SIPI_VECTOR 0x00000002
#define KVM_VCPUEVENT_VALID_SHADOW 0x00000004 #define KVM_VCPUEVENT_VALID_SHADOW 0x00000004
#define KVM_VCPUEVENT_VALID_SMM 0x00000008 #define KVM_VCPUEVENT_VALID_SMM 0x00000008
#define KVM_VCPUEVENT_VALID_PAYLOAD 0x00000010
/* Interrupt shadow states */ /* Interrupt shadow states */
#define KVM_X86_SHADOW_INT_MOV_SS 0x01 #define KVM_X86_SHADOW_INT_MOV_SS 0x01
...@@ -299,7 +300,7 @@ struct kvm_vcpu_events { ...@@ -299,7 +300,7 @@ struct kvm_vcpu_events {
__u8 injected; __u8 injected;
__u8 nr; __u8 nr;
__u8 has_error_code; __u8 has_error_code;
__u8 pad; __u8 pending;
__u32 error_code; __u32 error_code;
} exception; } exception;
struct { struct {
...@@ -322,7 +323,9 @@ struct kvm_vcpu_events { ...@@ -322,7 +323,9 @@ struct kvm_vcpu_events {
__u8 smm_inside_nmi; __u8 smm_inside_nmi;
__u8 latched_init; __u8 latched_init;
} smi; } smi;
__u32 reserved[9]; __u8 reserved[27];
__u8 exception_has_payload;
__u64 exception_payload;
}; };
/* for KVM_GET/SET_DEBUGREGS */ /* for KVM_GET/SET_DEBUGREGS */
......
...@@ -3373,19 +3373,33 @@ static void kvm_vcpu_ioctl_x86_get_vcpu_events(struct kvm_vcpu *vcpu, ...@@ -3373,19 +3373,33 @@ static void kvm_vcpu_ioctl_x86_get_vcpu_events(struct kvm_vcpu *vcpu,
struct kvm_vcpu_events *events) struct kvm_vcpu_events *events)
{ {
process_nmi(vcpu); process_nmi(vcpu);
/* /*
* FIXME: pass injected and pending separately. This is only * The API doesn't provide the instruction length for software
* needed for nested virtualization, whose state cannot be * exceptions, so don't report them. As long as the guest RIP
* migrated yet. For now we can combine them. * isn't advanced, we should expect to encounter the exception
* again.
*/ */
events->exception.injected = if (kvm_exception_is_soft(vcpu->arch.exception.nr)) {
(vcpu->arch.exception.pending || events->exception.injected = 0;
vcpu->arch.exception.injected) && events->exception.pending = 0;
!kvm_exception_is_soft(vcpu->arch.exception.nr); } else {
events->exception.injected = vcpu->arch.exception.injected;
events->exception.pending = vcpu->arch.exception.pending;
/*
* For ABI compatibility, deliberately conflate
* pending and injected exceptions when
* KVM_CAP_EXCEPTION_PAYLOAD isn't enabled.
*/
if (!vcpu->kvm->arch.exception_payload_enabled)
events->exception.injected |=
vcpu->arch.exception.pending;
}
events->exception.nr = vcpu->arch.exception.nr; events->exception.nr = vcpu->arch.exception.nr;
events->exception.has_error_code = vcpu->arch.exception.has_error_code; events->exception.has_error_code = vcpu->arch.exception.has_error_code;
events->exception.pad = 0;
events->exception.error_code = vcpu->arch.exception.error_code; events->exception.error_code = vcpu->arch.exception.error_code;
events->exception_has_payload = vcpu->arch.exception.has_payload;
events->exception_payload = vcpu->arch.exception.payload;
events->interrupt.injected = events->interrupt.injected =
vcpu->arch.interrupt.injected && !vcpu->arch.interrupt.soft; vcpu->arch.interrupt.injected && !vcpu->arch.interrupt.soft;
...@@ -3409,6 +3423,9 @@ static void kvm_vcpu_ioctl_x86_get_vcpu_events(struct kvm_vcpu *vcpu, ...@@ -3409,6 +3423,9 @@ static void kvm_vcpu_ioctl_x86_get_vcpu_events(struct kvm_vcpu *vcpu,
events->flags = (KVM_VCPUEVENT_VALID_NMI_PENDING events->flags = (KVM_VCPUEVENT_VALID_NMI_PENDING
| KVM_VCPUEVENT_VALID_SHADOW | KVM_VCPUEVENT_VALID_SHADOW
| KVM_VCPUEVENT_VALID_SMM); | KVM_VCPUEVENT_VALID_SMM);
if (vcpu->kvm->arch.exception_payload_enabled)
events->flags |= KVM_VCPUEVENT_VALID_PAYLOAD;
memset(&events->reserved, 0, sizeof(events->reserved)); memset(&events->reserved, 0, sizeof(events->reserved));
} }
...@@ -3420,12 +3437,24 @@ static int kvm_vcpu_ioctl_x86_set_vcpu_events(struct kvm_vcpu *vcpu, ...@@ -3420,12 +3437,24 @@ static int kvm_vcpu_ioctl_x86_set_vcpu_events(struct kvm_vcpu *vcpu,
if (events->flags & ~(KVM_VCPUEVENT_VALID_NMI_PENDING if (events->flags & ~(KVM_VCPUEVENT_VALID_NMI_PENDING
| KVM_VCPUEVENT_VALID_SIPI_VECTOR | KVM_VCPUEVENT_VALID_SIPI_VECTOR
| KVM_VCPUEVENT_VALID_SHADOW | KVM_VCPUEVENT_VALID_SHADOW
| KVM_VCPUEVENT_VALID_SMM)) | KVM_VCPUEVENT_VALID_SMM
| KVM_VCPUEVENT_VALID_PAYLOAD))
return -EINVAL; return -EINVAL;
if (events->exception.injected && if (events->flags & KVM_VCPUEVENT_VALID_PAYLOAD) {
(events->exception.nr > 31 || events->exception.nr == NMI_VECTOR || if (!vcpu->kvm->arch.exception_payload_enabled)
is_guest_mode(vcpu))) return -EINVAL;
if (events->exception.pending)
events->exception.injected = 0;
else
events->exception_has_payload = 0;
} else {
events->exception.pending = 0;
events->exception_has_payload = 0;
}
if ((events->exception.injected || events->exception.pending) &&
(events->exception.nr > 31 || events->exception.nr == NMI_VECTOR))
return -EINVAL; return -EINVAL;
/* INITs are latched while in SMM */ /* INITs are latched while in SMM */
...@@ -3435,13 +3464,13 @@ static int kvm_vcpu_ioctl_x86_set_vcpu_events(struct kvm_vcpu *vcpu, ...@@ -3435,13 +3464,13 @@ static int kvm_vcpu_ioctl_x86_set_vcpu_events(struct kvm_vcpu *vcpu,
return -EINVAL; return -EINVAL;
process_nmi(vcpu); process_nmi(vcpu);
vcpu->arch.exception.injected = false; vcpu->arch.exception.injected = events->exception.injected;
vcpu->arch.exception.pending = events->exception.injected; vcpu->arch.exception.pending = events->exception.pending;
vcpu->arch.exception.nr = events->exception.nr; vcpu->arch.exception.nr = events->exception.nr;
vcpu->arch.exception.has_error_code = events->exception.has_error_code; vcpu->arch.exception.has_error_code = events->exception.has_error_code;
vcpu->arch.exception.error_code = events->exception.error_code; vcpu->arch.exception.error_code = events->exception.error_code;
vcpu->arch.exception.has_payload = false; vcpu->arch.exception.has_payload = events->exception_has_payload;
vcpu->arch.exception.payload = 0; vcpu->arch.exception.payload = events->exception_payload;
vcpu->arch.interrupt.injected = events->interrupt.injected; vcpu->arch.interrupt.injected = events->interrupt.injected;
vcpu->arch.interrupt.nr = events->interrupt.nr; vcpu->arch.interrupt.nr = events->interrupt.nr;
......
...@@ -288,6 +288,7 @@ struct kvm_reinject_control { ...@@ -288,6 +288,7 @@ struct kvm_reinject_control {
#define KVM_VCPUEVENT_VALID_SIPI_VECTOR 0x00000002 #define KVM_VCPUEVENT_VALID_SIPI_VECTOR 0x00000002
#define KVM_VCPUEVENT_VALID_SHADOW 0x00000004 #define KVM_VCPUEVENT_VALID_SHADOW 0x00000004
#define KVM_VCPUEVENT_VALID_SMM 0x00000008 #define KVM_VCPUEVENT_VALID_SMM 0x00000008
#define KVM_VCPUEVENT_VALID_PAYLOAD 0x00000010
/* Interrupt shadow states */ /* Interrupt shadow states */
#define KVM_X86_SHADOW_INT_MOV_SS 0x01 #define KVM_X86_SHADOW_INT_MOV_SS 0x01
...@@ -299,7 +300,10 @@ struct kvm_vcpu_events { ...@@ -299,7 +300,10 @@ struct kvm_vcpu_events {
__u8 injected; __u8 injected;
__u8 nr; __u8 nr;
__u8 has_error_code; __u8 has_error_code;
__u8 pad; union {
__u8 pad;
__u8 pending;
};
__u32 error_code; __u32 error_code;
} exception; } exception;
struct { struct {
...@@ -322,7 +326,9 @@ struct kvm_vcpu_events { ...@@ -322,7 +326,9 @@ struct kvm_vcpu_events {
__u8 smm_inside_nmi; __u8 smm_inside_nmi;
__u8 latched_init; __u8 latched_init;
} smi; } smi;
__u32 reserved[9]; __u8 reserved[27];
__u8 exception_has_payload;
__u64 exception_payload;
}; };
/* for KVM_GET/SET_DEBUGREGS */ /* for KVM_GET/SET_DEBUGREGS */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment