Commit 136a55c0 authored by Maxim Levitsky's avatar Maxim Levitsky Committed by Paolo Bonzini

KVM: x86: nSVM: refactor svm_leave_smm and smm_enter_smm

Use return statements instead of nested if, and fix error
path to free all the maps that were allocated.
Suggested-by: default avatarSean Christopherson <seanjc@google.com>
Signed-off-by: default avatarMaxim Levitsky <mlevitsk@redhat.com>
Message-Id: <20210913140954.165665-2-mlevitsk@redhat.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent e85d3e7b
...@@ -4285,7 +4285,9 @@ static int svm_enter_smm(struct kvm_vcpu *vcpu, char *smstate) ...@@ -4285,7 +4285,9 @@ static int svm_enter_smm(struct kvm_vcpu *vcpu, char *smstate)
struct kvm_host_map map_save; struct kvm_host_map map_save;
int ret; int ret;
if (is_guest_mode(vcpu)) { if (!is_guest_mode(vcpu))
return 0;
/* FED8h - SVM Guest */ /* FED8h - SVM Guest */
put_smstate(u64, smstate, 0x7ed8, 1); put_smstate(u64, smstate, 0x7ed8, 1);
/* FEE0h - SVM Guest VMCB Physical Address */ /* FEE0h - SVM Guest VMCB Physical Address */
...@@ -4321,7 +4323,6 @@ static int svm_enter_smm(struct kvm_vcpu *vcpu, char *smstate) ...@@ -4321,7 +4323,6 @@ static int svm_enter_smm(struct kvm_vcpu *vcpu, char *smstate)
&svm->vmcb01.ptr->save); &svm->vmcb01.ptr->save);
kvm_vcpu_unmap(vcpu, &map_save, true); kvm_vcpu_unmap(vcpu, &map_save, true);
}
return 0; return 0;
} }
...@@ -4329,52 +4330,54 @@ static int svm_leave_smm(struct kvm_vcpu *vcpu, const char *smstate) ...@@ -4329,52 +4330,54 @@ static int svm_leave_smm(struct kvm_vcpu *vcpu, const char *smstate)
{ {
struct vcpu_svm *svm = to_svm(vcpu); struct vcpu_svm *svm = to_svm(vcpu);
struct kvm_host_map map, map_save; struct kvm_host_map map, map_save;
int ret = 0; u64 saved_efer, vmcb12_gpa;
if (guest_cpuid_has(vcpu, X86_FEATURE_LM)) {
u64 saved_efer = GET_SMSTATE(u64, smstate, 0x7ed0);
u64 guest = GET_SMSTATE(u64, smstate, 0x7ed8);
u64 vmcb12_gpa = GET_SMSTATE(u64, smstate, 0x7ee0);
struct vmcb *vmcb12; struct vmcb *vmcb12;
int ret;
if (!guest_cpuid_has(vcpu, X86_FEATURE_LM))
return 0;
/* Non-zero if SMI arrived while vCPU was in guest mode. */
if (!GET_SMSTATE(u64, smstate, 0x7ed8))
return 0;
if (guest) {
if (!guest_cpuid_has(vcpu, X86_FEATURE_SVM)) if (!guest_cpuid_has(vcpu, X86_FEATURE_SVM))
return 1; return 1;
saved_efer = GET_SMSTATE(u64, smstate, 0x7ed0);
if (!(saved_efer & EFER_SVME)) if (!(saved_efer & EFER_SVME))
return 1; return 1;
if (kvm_vcpu_map(vcpu, vmcb12_gpa = GET_SMSTATE(u64, smstate, 0x7ee0);
gpa_to_gfn(vmcb12_gpa), &map) == -EINVAL) if (kvm_vcpu_map(vcpu, gpa_to_gfn(vmcb12_gpa), &map) == -EINVAL)
return 1; return 1;
if (svm_allocate_nested(svm)) ret = 1;
return 1; if (kvm_vcpu_map(vcpu, gpa_to_gfn(svm->nested.hsave_msr), &map_save) == -EINVAL)
goto unmap_map;
kvm_vcpu_unmap(vcpu, &map, true); if (svm_allocate_nested(svm))
goto unmap_save;
/* /*
* Restore L1 host state from L1 HSAVE area as VMCB01 was * Restore L1 host state from L1 HSAVE area as VMCB01 was
* used during SMM (see svm_enter_smm()) * used during SMM (see svm_enter_smm())
*/ */
if (kvm_vcpu_map(vcpu, gpa_to_gfn(svm->nested.hsave_msr),
&map_save) == -EINVAL)
return 1;
svm_copy_vmrun_state(&svm->vmcb01.ptr->save, svm_copy_vmrun_state(&svm->vmcb01.ptr->save, map_save.hva + 0x400);
map_save.hva + 0x400);
/* /*
* Enter the nested guest now * Enter the nested guest now
*/ */
vmcb12 = map.hva; vmcb12 = map.hva;
nested_load_control_from_vmcb12(svm, &vmcb12->control); nested_load_control_from_vmcb12(svm, &vmcb12->control);
ret = enter_svm_guest_mode(vcpu, vmcb12_gpa, vmcb12, false); ret = enter_svm_guest_mode(vcpu, vmcb12_gpa, vmcb12, false);
unmap_save:
kvm_vcpu_unmap(vcpu, &map_save, true); kvm_vcpu_unmap(vcpu, &map_save, true);
} unmap_map:
} kvm_vcpu_unmap(vcpu, &map, true);
return ret; return ret;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment