Commit e85d3e7b authored by Maxim Levitsky's avatar Maxim Levitsky Committed by Paolo Bonzini

KVM: x86: SVM: call KVM_REQ_GET_NESTED_STATE_PAGES on exit from SMM mode

Currently the KVM_REQ_GET_NESTED_STATE_PAGES on SVM only reloads PDPTRs,
and MSR bitmap, with former not really needed for SMM as SMM exit code
reloads them again from SMRAM'S CR3, and later happens to work
since MSR bitmap isn't modified while in SMM.

Still it is better to be consistient with VMX.
Signed-off-by: default avatarMaxim Levitsky <mlevitsk@redhat.com>
Message-Id: <20210913140954.165665-5-mlevitsk@redhat.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent 37687c40
...@@ -579,7 +579,7 @@ static void nested_svm_copy_common_state(struct vmcb *from_vmcb, struct vmcb *to ...@@ -579,7 +579,7 @@ static void nested_svm_copy_common_state(struct vmcb *from_vmcb, struct vmcb *to
} }
int enter_svm_guest_mode(struct kvm_vcpu *vcpu, u64 vmcb12_gpa, int enter_svm_guest_mode(struct kvm_vcpu *vcpu, u64 vmcb12_gpa,
struct vmcb *vmcb12) struct vmcb *vmcb12, bool from_vmrun)
{ {
struct vcpu_svm *svm = to_svm(vcpu); struct vcpu_svm *svm = to_svm(vcpu);
int ret; int ret;
...@@ -609,13 +609,16 @@ int enter_svm_guest_mode(struct kvm_vcpu *vcpu, u64 vmcb12_gpa, ...@@ -609,13 +609,16 @@ int enter_svm_guest_mode(struct kvm_vcpu *vcpu, u64 vmcb12_gpa,
nested_vmcb02_prepare_save(svm, vmcb12); nested_vmcb02_prepare_save(svm, vmcb12);
ret = nested_svm_load_cr3(&svm->vcpu, vmcb12->save.cr3, ret = nested_svm_load_cr3(&svm->vcpu, vmcb12->save.cr3,
nested_npt_enabled(svm), true); nested_npt_enabled(svm), from_vmrun);
if (ret) if (ret)
return ret; return ret;
if (!npt_enabled) if (!npt_enabled)
vcpu->arch.mmu->inject_page_fault = svm_inject_page_fault_nested; vcpu->arch.mmu->inject_page_fault = svm_inject_page_fault_nested;
if (!from_vmrun)
kvm_make_request(KVM_REQ_GET_NESTED_STATE_PAGES, vcpu);
svm_set_gif(svm, true); svm_set_gif(svm, true);
return 0; return 0;
...@@ -681,7 +684,7 @@ int nested_svm_vmrun(struct kvm_vcpu *vcpu) ...@@ -681,7 +684,7 @@ int nested_svm_vmrun(struct kvm_vcpu *vcpu)
svm->nested.nested_run_pending = 1; svm->nested.nested_run_pending = 1;
if (enter_svm_guest_mode(vcpu, vmcb12_gpa, vmcb12)) if (enter_svm_guest_mode(vcpu, vmcb12_gpa, vmcb12, true))
goto out_exit_err; goto out_exit_err;
if (nested_svm_vmrun_msrpm(svm)) if (nested_svm_vmrun_msrpm(svm))
......
...@@ -4369,7 +4369,7 @@ static int svm_leave_smm(struct kvm_vcpu *vcpu, const char *smstate) ...@@ -4369,7 +4369,7 @@ static int svm_leave_smm(struct kvm_vcpu *vcpu, const char *smstate)
*/ */
vmcb12 = map.hva; vmcb12 = map.hva;
nested_load_control_from_vmcb12(svm, &vmcb12->control); nested_load_control_from_vmcb12(svm, &vmcb12->control);
ret = enter_svm_guest_mode(vcpu, vmcb12_gpa, vmcb12); ret = enter_svm_guest_mode(vcpu, vmcb12_gpa, vmcb12, false);
kvm_vcpu_unmap(vcpu, &map_save, true); kvm_vcpu_unmap(vcpu, &map_save, true);
} }
......
...@@ -459,7 +459,8 @@ static inline bool nested_exit_on_nmi(struct vcpu_svm *svm) ...@@ -459,7 +459,8 @@ static inline bool nested_exit_on_nmi(struct vcpu_svm *svm)
return vmcb_is_intercept(&svm->nested.ctl, INTERCEPT_NMI); return vmcb_is_intercept(&svm->nested.ctl, INTERCEPT_NMI);
} }
int enter_svm_guest_mode(struct kvm_vcpu *vcpu, u64 vmcb_gpa, struct vmcb *vmcb12); int enter_svm_guest_mode(struct kvm_vcpu *vcpu,
u64 vmcb_gpa, struct vmcb *vmcb12, bool from_vmrun);
void svm_leave_nested(struct vcpu_svm *svm); void svm_leave_nested(struct vcpu_svm *svm);
void svm_free_nested(struct vcpu_svm *svm); void svm_free_nested(struct vcpu_svm *svm);
int svm_allocate_nested(struct vcpu_svm *svm); int svm_allocate_nested(struct vcpu_svm *svm);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment