Commit 72d7b374 authored by Ladi Prosek's avatar Ladi Prosek Committed by Paolo Bonzini

KVM: x86: introduce ISA specific smi_allowed callback

Similar to NMI, there may be ISA specific reasons why an SMI cannot be
injected into the guest. This commit adds a new smi_allowed callback to
be implemented in following commits.
Signed-off-by: default avatarLadi Prosek <lprosek@redhat.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent 0234bf88
...@@ -1062,6 +1062,7 @@ struct kvm_x86_ops { ...@@ -1062,6 +1062,7 @@ struct kvm_x86_ops {
void (*setup_mce)(struct kvm_vcpu *vcpu); void (*setup_mce)(struct kvm_vcpu *vcpu);
int (*smi_allowed)(struct kvm_vcpu *vcpu);
int (*pre_enter_smm)(struct kvm_vcpu *vcpu, char *smstate); int (*pre_enter_smm)(struct kvm_vcpu *vcpu, char *smstate);
int (*pre_leave_smm)(struct kvm_vcpu *vcpu, u64 smbase); int (*pre_leave_smm)(struct kvm_vcpu *vcpu, u64 smbase);
}; };
......
...@@ -5401,6 +5401,11 @@ static void svm_setup_mce(struct kvm_vcpu *vcpu) ...@@ -5401,6 +5401,11 @@ static void svm_setup_mce(struct kvm_vcpu *vcpu)
vcpu->arch.mcg_cap &= 0x1ff; vcpu->arch.mcg_cap &= 0x1ff;
} }
static int svm_smi_allowed(struct kvm_vcpu *vcpu)
{
return 1;
}
static int svm_pre_enter_smm(struct kvm_vcpu *vcpu, char *smstate) static int svm_pre_enter_smm(struct kvm_vcpu *vcpu, char *smstate)
{ {
/* TODO: Implement */ /* TODO: Implement */
...@@ -5524,6 +5529,7 @@ static struct kvm_x86_ops svm_x86_ops __ro_after_init = { ...@@ -5524,6 +5529,7 @@ static struct kvm_x86_ops svm_x86_ops __ro_after_init = {
.update_pi_irte = svm_update_pi_irte, .update_pi_irte = svm_update_pi_irte,
.setup_mce = svm_setup_mce, .setup_mce = svm_setup_mce,
.smi_allowed = svm_smi_allowed,
.pre_enter_smm = svm_pre_enter_smm, .pre_enter_smm = svm_pre_enter_smm,
.pre_leave_smm = svm_pre_leave_smm, .pre_leave_smm = svm_pre_leave_smm,
}; };
......
...@@ -11916,6 +11916,11 @@ static void vmx_setup_mce(struct kvm_vcpu *vcpu) ...@@ -11916,6 +11916,11 @@ static void vmx_setup_mce(struct kvm_vcpu *vcpu)
~FEATURE_CONTROL_LMCE; ~FEATURE_CONTROL_LMCE;
} }
static int vmx_smi_allowed(struct kvm_vcpu *vcpu)
{
return 1;
}
static int vmx_pre_enter_smm(struct kvm_vcpu *vcpu, char *smstate) static int vmx_pre_enter_smm(struct kvm_vcpu *vcpu, char *smstate)
{ {
/* TODO: Implement */ /* TODO: Implement */
...@@ -12054,6 +12059,7 @@ static struct kvm_x86_ops vmx_x86_ops __ro_after_init = { ...@@ -12054,6 +12059,7 @@ static struct kvm_x86_ops vmx_x86_ops __ro_after_init = {
.setup_mce = vmx_setup_mce, .setup_mce = vmx_setup_mce,
.smi_allowed = vmx_smi_allowed,
.pre_enter_smm = vmx_pre_enter_smm, .pre_enter_smm = vmx_pre_enter_smm,
.pre_leave_smm = vmx_pre_leave_smm, .pre_leave_smm = vmx_pre_leave_smm,
}; };
......
...@@ -6438,7 +6438,7 @@ static int inject_pending_event(struct kvm_vcpu *vcpu, bool req_int_win) ...@@ -6438,7 +6438,7 @@ static int inject_pending_event(struct kvm_vcpu *vcpu, bool req_int_win)
} }
kvm_x86_ops->queue_exception(vcpu); kvm_x86_ops->queue_exception(vcpu);
} else if (vcpu->arch.smi_pending && !is_smm(vcpu)) { } else if (vcpu->arch.smi_pending && !is_smm(vcpu) && kvm_x86_ops->smi_allowed(vcpu)) {
vcpu->arch.smi_pending = false; vcpu->arch.smi_pending = false;
enter_smm(vcpu); enter_smm(vcpu);
} else if (vcpu->arch.nmi_pending && kvm_x86_ops->nmi_allowed(vcpu)) { } else if (vcpu->arch.nmi_pending && kvm_x86_ops->nmi_allowed(vcpu)) {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment