Commit f077825a authored by Paolo Bonzini's avatar Paolo Bonzini

KVM: x86: API changes for SMM support

This patch includes changes to the external API for SMM support.
Userspace can predicate the availability of the new fields and
ioctls on a new capability, KVM_CAP_X86_SMM, which is added at the end
of the patch series.
Reviewed-by: default avatarRadim Krčmář <rkrcmar@redhat.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent a584539b
...@@ -820,11 +820,21 @@ struct kvm_vcpu_events { ...@@ -820,11 +820,21 @@ struct kvm_vcpu_events {
} nmi; } nmi;
__u32 sipi_vector; __u32 sipi_vector;
__u32 flags; __u32 flags;
struct {
__u8 smm;
__u8 pending;
__u8 smm_inside_nmi;
__u8 latched_init;
} smi;
}; };
KVM_VCPUEVENT_VALID_SHADOW may be set in the flags field to signal that Only two fields are defined in the flags field:
interrupt.shadow contains a valid state. Otherwise, this field is undefined.
- KVM_VCPUEVENT_VALID_SHADOW may be set in the flags field to signal that
interrupt.shadow contains a valid state.
- KVM_VCPUEVENT_VALID_SMM may be set in the flags field to signal that
smi contains a valid state.
4.32 KVM_SET_VCPU_EVENTS 4.32 KVM_SET_VCPU_EVENTS
...@@ -841,17 +851,20 @@ vcpu. ...@@ -841,17 +851,20 @@ vcpu.
See KVM_GET_VCPU_EVENTS for the data structure. See KVM_GET_VCPU_EVENTS for the data structure.
Fields that may be modified asynchronously by running VCPUs can be excluded Fields that may be modified asynchronously by running VCPUs can be excluded
from the update. These fields are nmi.pending and sipi_vector. Keep the from the update. These fields are nmi.pending, sipi_vector, smi.smm,
corresponding bits in the flags field cleared to suppress overwriting the smi.pending. Keep the corresponding bits in the flags field cleared to
current in-kernel state. The bits are: suppress overwriting the current in-kernel state. The bits are:
KVM_VCPUEVENT_VALID_NMI_PENDING - transfer nmi.pending to the kernel KVM_VCPUEVENT_VALID_NMI_PENDING - transfer nmi.pending to the kernel
KVM_VCPUEVENT_VALID_SIPI_VECTOR - transfer sipi_vector KVM_VCPUEVENT_VALID_SIPI_VECTOR - transfer sipi_vector
KVM_VCPUEVENT_VALID_SMM - transfer the smi sub-struct.
If KVM_CAP_INTR_SHADOW is available, KVM_VCPUEVENT_VALID_SHADOW can be set in If KVM_CAP_INTR_SHADOW is available, KVM_VCPUEVENT_VALID_SHADOW can be set in
the flags field to signal that interrupt.shadow contains a valid state and the flags field to signal that interrupt.shadow contains a valid state and
shall be written into the VCPU. shall be written into the VCPU.
KVM_VCPUEVENT_VALID_SMM can only be set if KVM_CAP_X86_SMM is available.
4.33 KVM_GET_DEBUGREGS 4.33 KVM_GET_DEBUGREGS
...@@ -2979,6 +2992,16 @@ len must be a multiple of sizeof(struct kvm_s390_irq). It must be > 0 ...@@ -2979,6 +2992,16 @@ len must be a multiple of sizeof(struct kvm_s390_irq). It must be > 0
and it must not exceed (max_vcpus + 32) * sizeof(struct kvm_s390_irq), and it must not exceed (max_vcpus + 32) * sizeof(struct kvm_s390_irq),
which is the maximum number of possibly pending cpu-local interrupts. which is the maximum number of possibly pending cpu-local interrupts.
4.90 KVM_SMI
Capability: KVM_CAP_X86_SMM
Architectures: x86
Type: vcpu ioctl
Parameters: none
Returns: 0 on success, -1 on error
Queues an SMI on the thread's vcpu.
5. The kvm_run structure 5. The kvm_run structure
------------------------ ------------------------
...@@ -3014,7 +3037,12 @@ an interrupt can be injected now with KVM_INTERRUPT. ...@@ -3014,7 +3037,12 @@ an interrupt can be injected now with KVM_INTERRUPT.
The value of the current interrupt flag. Only valid if in-kernel The value of the current interrupt flag. Only valid if in-kernel
local APIC is not used. local APIC is not used.
__u8 padding2[2]; __u16 flags;
More architecture-specific flags detailing state of the VCPU that may
affect the device's behavior. The only currently defined flag is
KVM_RUN_X86_SMM, which is valid on x86 machines and is set if the
VCPU is in system management mode.
/* in (pre_kvm_run), out (post_kvm_run) */ /* in (pre_kvm_run), out (post_kvm_run) */
__u64 cr8; __u64 cr8;
......
...@@ -471,6 +471,7 @@ struct kvm_vcpu_arch { ...@@ -471,6 +471,7 @@ struct kvm_vcpu_arch {
atomic_t nmi_queued; /* unprocessed asynchronous NMIs */ atomic_t nmi_queued; /* unprocessed asynchronous NMIs */
unsigned nmi_pending; /* NMI queued after currently running handler */ unsigned nmi_pending; /* NMI queued after currently running handler */
bool nmi_injected; /* Trying to inject an NMI this entry */ bool nmi_injected; /* Trying to inject an NMI this entry */
bool smi_pending; /* SMI queued after currently running handler */
struct mtrr_state_type mtrr_state; struct mtrr_state_type mtrr_state;
u64 pat; u64 pat;
...@@ -1115,6 +1116,8 @@ enum { ...@@ -1115,6 +1116,8 @@ enum {
#define HF_NMI_MASK (1 << 3) #define HF_NMI_MASK (1 << 3)
#define HF_IRET_MASK (1 << 4) #define HF_IRET_MASK (1 << 4)
#define HF_GUEST_MASK (1 << 5) /* VCPU is in guest-mode */ #define HF_GUEST_MASK (1 << 5) /* VCPU is in guest-mode */
#define HF_SMM_MASK (1 << 6)
#define HF_SMM_INSIDE_NMI_MASK (1 << 7)
/* /*
* Hardware virtualization extension instructions may fault if a * Hardware virtualization extension instructions may fault if a
......
...@@ -106,6 +106,8 @@ struct kvm_ioapic_state { ...@@ -106,6 +106,8 @@ struct kvm_ioapic_state {
#define KVM_IRQCHIP_IOAPIC 2 #define KVM_IRQCHIP_IOAPIC 2
#define KVM_NR_IRQCHIPS 3 #define KVM_NR_IRQCHIPS 3
#define KVM_RUN_X86_SMM (1 << 0)
/* for KVM_GET_REGS and KVM_SET_REGS */ /* for KVM_GET_REGS and KVM_SET_REGS */
struct kvm_regs { struct kvm_regs {
/* out (KVM_GET_REGS) / in (KVM_SET_REGS) */ /* out (KVM_GET_REGS) / in (KVM_SET_REGS) */
...@@ -281,6 +283,7 @@ struct kvm_reinject_control { ...@@ -281,6 +283,7 @@ struct kvm_reinject_control {
#define KVM_VCPUEVENT_VALID_NMI_PENDING 0x00000001 #define KVM_VCPUEVENT_VALID_NMI_PENDING 0x00000001
#define KVM_VCPUEVENT_VALID_SIPI_VECTOR 0x00000002 #define KVM_VCPUEVENT_VALID_SIPI_VECTOR 0x00000002
#define KVM_VCPUEVENT_VALID_SHADOW 0x00000004 #define KVM_VCPUEVENT_VALID_SHADOW 0x00000004
#define KVM_VCPUEVENT_VALID_SMM 0x00000008
/* Interrupt shadow states */ /* Interrupt shadow states */
#define KVM_X86_SHADOW_INT_MOV_SS 0x01 #define KVM_X86_SHADOW_INT_MOV_SS 0x01
...@@ -309,7 +312,13 @@ struct kvm_vcpu_events { ...@@ -309,7 +312,13 @@ struct kvm_vcpu_events {
} nmi; } nmi;
__u32 sipi_vector; __u32 sipi_vector;
__u32 flags; __u32 flags;
__u32 reserved[10]; struct {
__u8 smm;
__u8 pending;
__u8 smm_inside_nmi;
__u8 latched_init;
} smi;
__u32 reserved[9];
}; };
/* for KVM_GET/SET_DEBUGREGS */ /* for KVM_GET/SET_DEBUGREGS */
......
...@@ -99,4 +99,9 @@ static inline bool is_guest_mode(struct kvm_vcpu *vcpu) ...@@ -99,4 +99,9 @@ static inline bool is_guest_mode(struct kvm_vcpu *vcpu)
return vcpu->arch.hflags & HF_GUEST_MASK; return vcpu->arch.hflags & HF_GUEST_MASK;
} }
static inline bool is_smm(struct kvm_vcpu *vcpu)
{
return vcpu->arch.hflags & HF_SMM_MASK;
}
#endif #endif
...@@ -159,6 +159,11 @@ static inline bool kvm_lowest_prio_delivery(struct kvm_lapic_irq *irq) ...@@ -159,6 +159,11 @@ static inline bool kvm_lowest_prio_delivery(struct kvm_lapic_irq *irq)
irq->msi_redir_hint); irq->msi_redir_hint);
} }
static inline int kvm_lapic_latched_init(struct kvm_vcpu *vcpu)
{
return kvm_vcpu_has_lapic(vcpu) && test_bit(KVM_APIC_INIT, &vcpu->arch.apic->pending_events);
}
bool kvm_apic_pending_eoi(struct kvm_vcpu *vcpu, int vector); bool kvm_apic_pending_eoi(struct kvm_vcpu *vcpu, int vector);
void wait_lapic_expire(struct kvm_vcpu *vcpu); void wait_lapic_expire(struct kvm_vcpu *vcpu);
......
...@@ -3101,6 +3101,11 @@ static int kvm_vcpu_ioctl_nmi(struct kvm_vcpu *vcpu) ...@@ -3101,6 +3101,11 @@ static int kvm_vcpu_ioctl_nmi(struct kvm_vcpu *vcpu)
return 0; return 0;
} }
static int kvm_vcpu_ioctl_smi(struct kvm_vcpu *vcpu)
{
return 0;
}
static int vcpu_ioctl_tpr_access_reporting(struct kvm_vcpu *vcpu, static int vcpu_ioctl_tpr_access_reporting(struct kvm_vcpu *vcpu,
struct kvm_tpr_access_ctl *tac) struct kvm_tpr_access_ctl *tac)
{ {
...@@ -3206,8 +3211,15 @@ static void kvm_vcpu_ioctl_x86_get_vcpu_events(struct kvm_vcpu *vcpu, ...@@ -3206,8 +3211,15 @@ static void kvm_vcpu_ioctl_x86_get_vcpu_events(struct kvm_vcpu *vcpu,
events->sipi_vector = 0; /* never valid when reporting to user space */ events->sipi_vector = 0; /* never valid when reporting to user space */
events->smi.smm = is_smm(vcpu);
events->smi.pending = vcpu->arch.smi_pending;
events->smi.smm_inside_nmi =
!!(vcpu->arch.hflags & HF_SMM_INSIDE_NMI_MASK);
events->smi.latched_init = kvm_lapic_latched_init(vcpu);
events->flags = (KVM_VCPUEVENT_VALID_NMI_PENDING events->flags = (KVM_VCPUEVENT_VALID_NMI_PENDING
| KVM_VCPUEVENT_VALID_SHADOW); | KVM_VCPUEVENT_VALID_SHADOW
| KVM_VCPUEVENT_VALID_SMM);
memset(&events->reserved, 0, sizeof(events->reserved)); memset(&events->reserved, 0, sizeof(events->reserved));
} }
...@@ -3216,7 +3228,8 @@ static int kvm_vcpu_ioctl_x86_set_vcpu_events(struct kvm_vcpu *vcpu, ...@@ -3216,7 +3228,8 @@ static int kvm_vcpu_ioctl_x86_set_vcpu_events(struct kvm_vcpu *vcpu,
{ {
if (events->flags & ~(KVM_VCPUEVENT_VALID_NMI_PENDING if (events->flags & ~(KVM_VCPUEVENT_VALID_NMI_PENDING
| KVM_VCPUEVENT_VALID_SIPI_VECTOR | KVM_VCPUEVENT_VALID_SIPI_VECTOR
| KVM_VCPUEVENT_VALID_SHADOW)) | KVM_VCPUEVENT_VALID_SHADOW
| KVM_VCPUEVENT_VALID_SMM))
return -EINVAL; return -EINVAL;
process_nmi(vcpu); process_nmi(vcpu);
...@@ -3241,6 +3254,24 @@ static int kvm_vcpu_ioctl_x86_set_vcpu_events(struct kvm_vcpu *vcpu, ...@@ -3241,6 +3254,24 @@ static int kvm_vcpu_ioctl_x86_set_vcpu_events(struct kvm_vcpu *vcpu,
kvm_vcpu_has_lapic(vcpu)) kvm_vcpu_has_lapic(vcpu))
vcpu->arch.apic->sipi_vector = events->sipi_vector; vcpu->arch.apic->sipi_vector = events->sipi_vector;
if (events->flags & KVM_VCPUEVENT_VALID_SMM) {
if (events->smi.smm)
vcpu->arch.hflags |= HF_SMM_MASK;
else
vcpu->arch.hflags &= ~HF_SMM_MASK;
vcpu->arch.smi_pending = events->smi.pending;
if (events->smi.smm_inside_nmi)
vcpu->arch.hflags |= HF_SMM_INSIDE_NMI_MASK;
else
vcpu->arch.hflags &= ~HF_SMM_INSIDE_NMI_MASK;
if (kvm_vcpu_has_lapic(vcpu)) {
if (events->smi.latched_init)
set_bit(KVM_APIC_INIT, &vcpu->arch.apic->pending_events);
else
clear_bit(KVM_APIC_INIT, &vcpu->arch.apic->pending_events);
}
}
kvm_make_request(KVM_REQ_EVENT, vcpu); kvm_make_request(KVM_REQ_EVENT, vcpu);
return 0; return 0;
...@@ -3500,6 +3531,10 @@ long kvm_arch_vcpu_ioctl(struct file *filp, ...@@ -3500,6 +3531,10 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
r = kvm_vcpu_ioctl_nmi(vcpu); r = kvm_vcpu_ioctl_nmi(vcpu);
break; break;
} }
case KVM_SMI: {
r = kvm_vcpu_ioctl_smi(vcpu);
break;
}
case KVM_SET_CPUID: { case KVM_SET_CPUID: {
struct kvm_cpuid __user *cpuid_arg = argp; struct kvm_cpuid __user *cpuid_arg = argp;
struct kvm_cpuid cpuid; struct kvm_cpuid cpuid;
...@@ -6182,6 +6217,7 @@ static void post_kvm_run_save(struct kvm_vcpu *vcpu) ...@@ -6182,6 +6217,7 @@ static void post_kvm_run_save(struct kvm_vcpu *vcpu)
struct kvm_run *kvm_run = vcpu->run; struct kvm_run *kvm_run = vcpu->run;
kvm_run->if_flag = (kvm_get_rflags(vcpu) & X86_EFLAGS_IF) != 0; kvm_run->if_flag = (kvm_get_rflags(vcpu) & X86_EFLAGS_IF) != 0;
kvm_run->flags = is_smm(vcpu) ? KVM_RUN_X86_SMM : 0;
kvm_run->cr8 = kvm_get_cr8(vcpu); kvm_run->cr8 = kvm_get_cr8(vcpu);
kvm_run->apic_base = kvm_get_apic_base(vcpu); kvm_run->apic_base = kvm_get_apic_base(vcpu);
if (irqchip_in_kernel(vcpu->kvm)) if (irqchip_in_kernel(vcpu->kvm))
......
...@@ -202,7 +202,7 @@ struct kvm_run { ...@@ -202,7 +202,7 @@ struct kvm_run {
__u32 exit_reason; __u32 exit_reason;
__u8 ready_for_interrupt_injection; __u8 ready_for_interrupt_injection;
__u8 if_flag; __u8 if_flag;
__u8 padding2[2]; __u16 flags;
/* in (pre_kvm_run), out (post_kvm_run) */ /* in (pre_kvm_run), out (post_kvm_run) */
__u64 cr8; __u64 cr8;
...@@ -815,6 +815,7 @@ struct kvm_ppc_smmu_info { ...@@ -815,6 +815,7 @@ struct kvm_ppc_smmu_info {
#define KVM_CAP_S390_IRQ_STATE 114 #define KVM_CAP_S390_IRQ_STATE 114
#define KVM_CAP_PPC_HWRNG 115 #define KVM_CAP_PPC_HWRNG 115
#define KVM_CAP_DISABLE_QUIRKS 116 #define KVM_CAP_DISABLE_QUIRKS 116
#define KVM_CAP_X86_SMM 117
#ifdef KVM_CAP_IRQ_ROUTING #ifdef KVM_CAP_IRQ_ROUTING
...@@ -1200,6 +1201,8 @@ struct kvm_s390_ucas_mapping { ...@@ -1200,6 +1201,8 @@ struct kvm_s390_ucas_mapping {
/* Available with KVM_CAP_S390_IRQ_STATE */ /* Available with KVM_CAP_S390_IRQ_STATE */
#define KVM_S390_SET_IRQ_STATE _IOW(KVMIO, 0xb5, struct kvm_s390_irq_state) #define KVM_S390_SET_IRQ_STATE _IOW(KVMIO, 0xb5, struct kvm_s390_irq_state)
#define KVM_S390_GET_IRQ_STATE _IOW(KVMIO, 0xb6, struct kvm_s390_irq_state) #define KVM_S390_GET_IRQ_STATE _IOW(KVMIO, 0xb6, struct kvm_s390_irq_state)
/* Available with KVM_CAP_X86_SMM */
#define KVM_SMI _IO(KVMIO, 0xb7)
#define KVM_DEV_ASSIGN_ENABLE_IOMMU (1 << 0) #define KVM_DEV_ASSIGN_ENABLE_IOMMU (1 << 0)
#define KVM_DEV_ASSIGN_PCI_2_3 (1 << 1) #define KVM_DEV_ASSIGN_PCI_2_3 (1 << 1)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment