Commit a7662aa5 authored by Paolo Bonzini's avatar Paolo Bonzini

KVM: x86: do not define SMM-related constants if SMM disabled

The hidden processor flags HF_SMM_MASK and HF_SMM_INSIDE_NMI_MASK
are not needed if CONFIG_KVM_SMM is turned off.  Remove the
definitions altogether and the code that uses them.
Suggested-by: default avatarSean Christopherson <seanjc@google.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent 85672346
...@@ -1997,10 +1997,11 @@ enum { ...@@ -1997,10 +1997,11 @@ enum {
#define HF_NMI_MASK (1 << 3) #define HF_NMI_MASK (1 << 3)
#define HF_IRET_MASK (1 << 4) #define HF_IRET_MASK (1 << 4)
#define HF_GUEST_MASK (1 << 5) /* VCPU is in guest-mode */ #define HF_GUEST_MASK (1 << 5) /* VCPU is in guest-mode */
#ifdef CONFIG_KVM_SMM
#define HF_SMM_MASK (1 << 6) #define HF_SMM_MASK (1 << 6)
#define HF_SMM_INSIDE_NMI_MASK (1 << 7) #define HF_SMM_INSIDE_NMI_MASK (1 << 7)
#ifdef CONFIG_KVM_SMM
# define __KVM_VCPU_MULTIPLE_ADDRESS_SPACE # define __KVM_VCPU_MULTIPLE_ADDRESS_SPACE
# define KVM_ADDRESS_SPACE_NUM 2 # define KVM_ADDRESS_SPACE_NUM 2
# define kvm_arch_vcpu_memslots_id(vcpu) ((vcpu)->arch.hflags & HF_SMM_MASK ? 1 : 0) # define kvm_arch_vcpu_memslots_id(vcpu) ((vcpu)->arch.hflags & HF_SMM_MASK ? 1 : 0)
......
...@@ -278,7 +278,6 @@ enum x86emul_mode { ...@@ -278,7 +278,6 @@ enum x86emul_mode {
/* These match some of the HF_* flags defined in kvm_host.h */ /* These match some of the HF_* flags defined in kvm_host.h */
#define X86EMUL_GUEST_MASK (1 << 5) /* VCPU is in guest-mode */ #define X86EMUL_GUEST_MASK (1 << 5) /* VCPU is in guest-mode */
#define X86EMUL_SMM_MASK (1 << 6) #define X86EMUL_SMM_MASK (1 << 6)
#define X86EMUL_SMM_INSIDE_NMI_MASK (1 << 7)
/* /*
* fastop functions are declared as taking a never-defined fastop parameter, * fastop functions are declared as taking a never-defined fastop parameter,
......
...@@ -10,6 +10,8 @@ ...@@ -10,6 +10,8 @@
void kvm_smm_changed(struct kvm_vcpu *vcpu, bool entering_smm) void kvm_smm_changed(struct kvm_vcpu *vcpu, bool entering_smm)
{ {
BUILD_BUG_ON(HF_SMM_MASK != X86EMUL_SMM_MASK);
trace_kvm_smm_transition(vcpu->vcpu_id, vcpu->arch.smbase, entering_smm); trace_kvm_smm_transition(vcpu->vcpu_id, vcpu->arch.smbase, entering_smm);
if (entering_smm) { if (entering_smm) {
......
...@@ -5093,10 +5093,12 @@ static void kvm_vcpu_ioctl_x86_get_vcpu_events(struct kvm_vcpu *vcpu, ...@@ -5093,10 +5093,12 @@ static void kvm_vcpu_ioctl_x86_get_vcpu_events(struct kvm_vcpu *vcpu,
/* events->sipi_vector is never valid when reporting to user space */ /* events->sipi_vector is never valid when reporting to user space */
#ifdef CONFIG_KVM_SMM
events->smi.smm = is_smm(vcpu); events->smi.smm = is_smm(vcpu);
events->smi.pending = vcpu->arch.smi_pending; events->smi.pending = vcpu->arch.smi_pending;
events->smi.smm_inside_nmi = events->smi.smm_inside_nmi =
!!(vcpu->arch.hflags & HF_SMM_INSIDE_NMI_MASK); !!(vcpu->arch.hflags & HF_SMM_INSIDE_NMI_MASK);
#endif
events->smi.latched_init = kvm_lapic_latched_init(vcpu); events->smi.latched_init = kvm_lapic_latched_init(vcpu);
events->flags = (KVM_VCPUEVENT_VALID_NMI_PENDING events->flags = (KVM_VCPUEVENT_VALID_NMI_PENDING
...@@ -8267,8 +8269,6 @@ static void init_emulate_ctxt(struct kvm_vcpu *vcpu) ...@@ -8267,8 +8269,6 @@ static void init_emulate_ctxt(struct kvm_vcpu *vcpu)
cs_db ? X86EMUL_MODE_PROT32 : cs_db ? X86EMUL_MODE_PROT32 :
X86EMUL_MODE_PROT16; X86EMUL_MODE_PROT16;
BUILD_BUG_ON(HF_GUEST_MASK != X86EMUL_GUEST_MASK); BUILD_BUG_ON(HF_GUEST_MASK != X86EMUL_GUEST_MASK);
BUILD_BUG_ON(HF_SMM_MASK != X86EMUL_SMM_MASK);
BUILD_BUG_ON(HF_SMM_INSIDE_NMI_MASK != X86EMUL_SMM_INSIDE_NMI_MASK);
ctxt->interruptibility = 0; ctxt->interruptibility = 0;
ctxt->have_exception = false; ctxt->have_exception = false;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment