Commit ed19321f authored by Sean Christopherson's avatar Sean Christopherson Committed by Paolo Bonzini

KVM: x86: Load SMRAM in a single shot when leaving SMM

RSM emulation is currently broken on VMX when the interrupted guest has
CR4.VMXE=1.  Rather than dance around the issue of HF_SMM_MASK being set
when loading SMSTATE into architectural state, ideally RSM emulation
itself would be reworked to clear HF_SMM_MASK prior to loading non-SMM
architectural state.

Ostensibly, the only motivation for having HF_SMM_MASK set throughout
the loading of state from the SMRAM save state area is so that the
memory accesses from GET_SMSTATE() are tagged with role.smm.  Load
all of the SMRAM save state area from guest memory at the beginning of
RSM emulation, and load state from the buffer instead of reading guest
memory one-by-one.

This paves the way for clearing HF_SMM_MASK prior to loading state,
and also aligns RSM with the enter_smm() behavior, which fills a
buffer and writes SMRAM save state in a single go.
Signed-off-by: default avatarSean Christopherson <sean.j.christopherson@intel.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent e51bfdb6
...@@ -226,7 +226,8 @@ struct x86_emulate_ops { ...@@ -226,7 +226,8 @@ struct x86_emulate_ops {
unsigned (*get_hflags)(struct x86_emulate_ctxt *ctxt); unsigned (*get_hflags)(struct x86_emulate_ctxt *ctxt);
void (*set_hflags)(struct x86_emulate_ctxt *ctxt, unsigned hflags); void (*set_hflags)(struct x86_emulate_ctxt *ctxt, unsigned hflags);
int (*pre_leave_smm)(struct x86_emulate_ctxt *ctxt, u64 smbase); int (*pre_leave_smm)(struct x86_emulate_ctxt *ctxt,
const char *smstate);
}; };
......
...@@ -1182,7 +1182,7 @@ struct kvm_x86_ops { ...@@ -1182,7 +1182,7 @@ struct kvm_x86_ops {
int (*smi_allowed)(struct kvm_vcpu *vcpu); int (*smi_allowed)(struct kvm_vcpu *vcpu);
int (*pre_enter_smm)(struct kvm_vcpu *vcpu, char *smstate); int (*pre_enter_smm)(struct kvm_vcpu *vcpu, char *smstate);
int (*pre_leave_smm)(struct kvm_vcpu *vcpu, u64 smbase); int (*pre_leave_smm)(struct kvm_vcpu *vcpu, const char *smstate);
int (*enable_smi_window)(struct kvm_vcpu *vcpu); int (*enable_smi_window)(struct kvm_vcpu *vcpu);
int (*mem_enc_op)(struct kvm *kvm, void __user *argp); int (*mem_enc_op)(struct kvm *kvm, void __user *argp);
...@@ -1592,4 +1592,7 @@ static inline int kvm_cpu_get_apicid(int mps_cpu) ...@@ -1592,4 +1592,7 @@ static inline int kvm_cpu_get_apicid(int mps_cpu)
#define put_smstate(type, buf, offset, val) \ #define put_smstate(type, buf, offset, val) \
*(type *)((buf) + (offset) - 0x7e00) = val *(type *)((buf) + (offset) - 0x7e00) = val
#define GET_SMSTATE(type, buf, offset) \
(*(type *)((buf) + (offset) - 0x7e00))
#endif /* _ASM_X86_KVM_HOST_H */ #endif /* _ASM_X86_KVM_HOST_H */
This diff is collapsed.
...@@ -6232,27 +6232,23 @@ static int svm_pre_enter_smm(struct kvm_vcpu *vcpu, char *smstate) ...@@ -6232,27 +6232,23 @@ static int svm_pre_enter_smm(struct kvm_vcpu *vcpu, char *smstate)
return 0; return 0;
} }
static int svm_pre_leave_smm(struct kvm_vcpu *vcpu, u64 smbase) static int svm_pre_leave_smm(struct kvm_vcpu *vcpu, const char *smstate)
{ {
struct vcpu_svm *svm = to_svm(vcpu); struct vcpu_svm *svm = to_svm(vcpu);
struct vmcb *nested_vmcb; struct vmcb *nested_vmcb;
struct page *page; struct page *page;
struct {
u64 guest; u64 guest;
u64 vmcb; u64 vmcb;
} svm_state_save;
int ret; int ret;
ret = kvm_vcpu_read_guest(vcpu, smbase + 0xfed8, &svm_state_save, guest = GET_SMSTATE(u64, smstate, 0x7ed8);
sizeof(svm_state_save)); vmcb = GET_SMSTATE(u64, smstate, 0x7ee0);
if (ret)
return ret;
if (svm_state_save.guest) { if (guest) {
vcpu->arch.hflags &= ~HF_SMM_MASK; vcpu->arch.hflags &= ~HF_SMM_MASK;
nested_vmcb = nested_svm_map(svm, svm_state_save.vmcb, &page); nested_vmcb = nested_svm_map(svm, vmcb, &page);
if (nested_vmcb) if (nested_vmcb)
enter_svm_guest_mode(svm, svm_state_save.vmcb, nested_vmcb, page); enter_svm_guest_mode(svm, vmcb, nested_vmcb, page);
else else
ret = 1; ret = 1;
vcpu->arch.hflags |= HF_SMM_MASK; vcpu->arch.hflags |= HF_SMM_MASK;
......
...@@ -7398,7 +7398,7 @@ static int vmx_pre_enter_smm(struct kvm_vcpu *vcpu, char *smstate) ...@@ -7398,7 +7398,7 @@ static int vmx_pre_enter_smm(struct kvm_vcpu *vcpu, char *smstate)
return 0; return 0;
} }
static int vmx_pre_leave_smm(struct kvm_vcpu *vcpu, u64 smbase) static int vmx_pre_leave_smm(struct kvm_vcpu *vcpu, const char *smstate)
{ {
struct vcpu_vmx *vmx = to_vmx(vcpu); struct vcpu_vmx *vmx = to_vmx(vcpu);
int ret; int ret;
......
...@@ -5963,9 +5963,10 @@ static void emulator_set_hflags(struct x86_emulate_ctxt *ctxt, unsigned emul_fla ...@@ -5963,9 +5963,10 @@ static void emulator_set_hflags(struct x86_emulate_ctxt *ctxt, unsigned emul_fla
kvm_set_hflags(emul_to_vcpu(ctxt), emul_flags); kvm_set_hflags(emul_to_vcpu(ctxt), emul_flags);
} }
static int emulator_pre_leave_smm(struct x86_emulate_ctxt *ctxt, u64 smbase) static int emulator_pre_leave_smm(struct x86_emulate_ctxt *ctxt,
const char *smstate)
{ {
return kvm_x86_ops->pre_leave_smm(emul_to_vcpu(ctxt), smbase); return kvm_x86_ops->pre_leave_smm(emul_to_vcpu(ctxt), smstate);
} }
static const struct x86_emulate_ops emulate_ops = { static const struct x86_emulate_ops emulate_ops = {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment