Commit cf7316d0 authored by Paolo Bonzini's avatar Paolo Bonzini

KVM: x86: do not define KVM_REQ_SMI if SMM disabled

This ensures that all the relevant code is compiled out, in fact
the process_smi stub can be removed too.
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
Reviewed-by: default avatarMaxim Levitsky <mlevitsk@redhat.com>
Message-Id: <20220929172016.319443-9-pbonzini@redhat.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent ba97bb07
...@@ -81,7 +81,9 @@ ...@@ -81,7 +81,9 @@
#define KVM_REQ_NMI KVM_ARCH_REQ(9) #define KVM_REQ_NMI KVM_ARCH_REQ(9)
#define KVM_REQ_PMU KVM_ARCH_REQ(10) #define KVM_REQ_PMU KVM_ARCH_REQ(10)
#define KVM_REQ_PMI KVM_ARCH_REQ(11) #define KVM_REQ_PMI KVM_ARCH_REQ(11)
#ifdef CONFIG_KVM_SMM
#define KVM_REQ_SMI KVM_ARCH_REQ(12) #define KVM_REQ_SMI KVM_ARCH_REQ(12)
#endif
#define KVM_REQ_MASTERCLOCK_UPDATE KVM_ARCH_REQ(13) #define KVM_REQ_MASTERCLOCK_UPDATE KVM_ARCH_REQ(13)
#define KVM_REQ_MCLOCK_INPROGRESS \ #define KVM_REQ_MCLOCK_INPROGRESS \
KVM_ARCH_REQ_FLAGS(14, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP) KVM_ARCH_REQ_FLAGS(14, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
......
...@@ -27,7 +27,6 @@ void process_smi(struct kvm_vcpu *vcpu); ...@@ -27,7 +27,6 @@ void process_smi(struct kvm_vcpu *vcpu);
#else #else
static inline int kvm_inject_smi(struct kvm_vcpu *vcpu) { return -ENOTTY; } static inline int kvm_inject_smi(struct kvm_vcpu *vcpu) { return -ENOTTY; }
static inline bool is_smm(struct kvm_vcpu *vcpu) { return false; } static inline bool is_smm(struct kvm_vcpu *vcpu) { return false; }
static inline void process_smi(struct kvm_vcpu *vcpu) { WARN_ON_ONCE(1); }
/* /*
* emulator_leave_smm is used as a function pointer, so the * emulator_leave_smm is used as a function pointer, so the
......
...@@ -5026,8 +5026,10 @@ static void kvm_vcpu_ioctl_x86_get_vcpu_events(struct kvm_vcpu *vcpu, ...@@ -5026,8 +5026,10 @@ static void kvm_vcpu_ioctl_x86_get_vcpu_events(struct kvm_vcpu *vcpu,
process_nmi(vcpu); process_nmi(vcpu);
#ifdef CONFIG_KVM_SMM
if (kvm_check_request(KVM_REQ_SMI, vcpu)) if (kvm_check_request(KVM_REQ_SMI, vcpu))
process_smi(vcpu); process_smi(vcpu);
#endif
/* /*
* KVM's ABI only allows for one exception to be migrated. Luckily, * KVM's ABI only allows for one exception to be migrated. Luckily,
...@@ -10266,8 +10268,10 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu) ...@@ -10266,8 +10268,10 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
} }
if (kvm_check_request(KVM_REQ_STEAL_UPDATE, vcpu)) if (kvm_check_request(KVM_REQ_STEAL_UPDATE, vcpu))
record_steal_time(vcpu); record_steal_time(vcpu);
#ifdef CONFIG_KVM_SMM
if (kvm_check_request(KVM_REQ_SMI, vcpu)) if (kvm_check_request(KVM_REQ_SMI, vcpu))
process_smi(vcpu); process_smi(vcpu);
#endif
if (kvm_check_request(KVM_REQ_NMI, vcpu)) if (kvm_check_request(KVM_REQ_NMI, vcpu))
process_nmi(vcpu); process_nmi(vcpu);
if (kvm_check_request(KVM_REQ_PMU, vcpu)) if (kvm_check_request(KVM_REQ_PMU, vcpu))
...@@ -12628,7 +12632,9 @@ bool kvm_arch_dy_runnable(struct kvm_vcpu *vcpu) ...@@ -12628,7 +12632,9 @@ bool kvm_arch_dy_runnable(struct kvm_vcpu *vcpu)
return true; return true;
if (kvm_test_request(KVM_REQ_NMI, vcpu) || if (kvm_test_request(KVM_REQ_NMI, vcpu) ||
#ifdef CONFIG_KVM_SMM
kvm_test_request(KVM_REQ_SMI, vcpu) || kvm_test_request(KVM_REQ_SMI, vcpu) ||
#endif
kvm_test_request(KVM_REQ_EVENT, vcpu)) kvm_test_request(KVM_REQ_EVENT, vcpu))
return true; return true;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment