Commit 136a55c0 authored by Maxim Levitsky's avatar Maxim Levitsky Committed by Paolo Bonzini

KVM: x86: nSVM: refactor svm_leave_smm and smm_enter_smm

Use return statements instead of nested if, and fix error
path to free all the maps that were allocated.
Suggested-by: default avatarSean Christopherson <seanjc@google.com>
Signed-off-by: default avatarMaxim Levitsky <mlevitsk@redhat.com>
Message-Id: <20210913140954.165665-2-mlevitsk@redhat.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent e85d3e7b
...@@ -4285,43 +4285,44 @@ static int svm_enter_smm(struct kvm_vcpu *vcpu, char *smstate) ...@@ -4285,43 +4285,44 @@ static int svm_enter_smm(struct kvm_vcpu *vcpu, char *smstate)
struct kvm_host_map map_save; struct kvm_host_map map_save;
int ret; int ret;
if (is_guest_mode(vcpu)) { if (!is_guest_mode(vcpu))
/* FED8h - SVM Guest */ return 0;
put_smstate(u64, smstate, 0x7ed8, 1);
/* FEE0h - SVM Guest VMCB Physical Address */
put_smstate(u64, smstate, 0x7ee0, svm->nested.vmcb12_gpa);
svm->vmcb->save.rax = vcpu->arch.regs[VCPU_REGS_RAX]; /* FED8h - SVM Guest */
svm->vmcb->save.rsp = vcpu->arch.regs[VCPU_REGS_RSP]; put_smstate(u64, smstate, 0x7ed8, 1);
svm->vmcb->save.rip = vcpu->arch.regs[VCPU_REGS_RIP]; /* FEE0h - SVM Guest VMCB Physical Address */
put_smstate(u64, smstate, 0x7ee0, svm->nested.vmcb12_gpa);
ret = nested_svm_vmexit(svm); svm->vmcb->save.rax = vcpu->arch.regs[VCPU_REGS_RAX];
if (ret) svm->vmcb->save.rsp = vcpu->arch.regs[VCPU_REGS_RSP];
return ret; svm->vmcb->save.rip = vcpu->arch.regs[VCPU_REGS_RIP];
/* ret = nested_svm_vmexit(svm);
* KVM uses VMCB01 to store L1 host state while L2 runs but if (ret)
* VMCB01 is going to be used during SMM and thus the state will return ret;
* be lost. Temporary save non-VMLOAD/VMSAVE state to the host save
* area pointed to by MSR_VM_HSAVE_PA. APM guarantees that the /*
* format of the area is identical to guest save area offsetted * KVM uses VMCB01 to store L1 host state while L2 runs but
* by 0x400 (matches the offset of 'struct vmcb_save_area' * VMCB01 is going to be used during SMM and thus the state will
* within 'struct vmcb'). Note: HSAVE area may also be used by * be lost. Temporary save non-VMLOAD/VMSAVE state to the host save
* L1 hypervisor to save additional host context (e.g. KVM does * area pointed to by MSR_VM_HSAVE_PA. APM guarantees that the
* that, see svm_prepare_guest_switch()) which must be * format of the area is identical to guest save area offsetted
* preserved. * by 0x400 (matches the offset of 'struct vmcb_save_area'
*/ * within 'struct vmcb'). Note: HSAVE area may also be used by
if (kvm_vcpu_map(vcpu, gpa_to_gfn(svm->nested.hsave_msr), * L1 hypervisor to save additional host context (e.g. KVM does
&map_save) == -EINVAL) * that, see svm_prepare_guest_switch()) which must be
return 1; * preserved.
*/
if (kvm_vcpu_map(vcpu, gpa_to_gfn(svm->nested.hsave_msr),
&map_save) == -EINVAL)
return 1;
BUILD_BUG_ON(offsetof(struct vmcb, save) != 0x400); BUILD_BUG_ON(offsetof(struct vmcb, save) != 0x400);
svm_copy_vmrun_state(map_save.hva + 0x400, svm_copy_vmrun_state(map_save.hva + 0x400,
&svm->vmcb01.ptr->save); &svm->vmcb01.ptr->save);
kvm_vcpu_unmap(vcpu, &map_save, true); kvm_vcpu_unmap(vcpu, &map_save, true);
}
return 0; return 0;
} }
...@@ -4329,52 +4330,54 @@ static int svm_leave_smm(struct kvm_vcpu *vcpu, const char *smstate) ...@@ -4329,52 +4330,54 @@ static int svm_leave_smm(struct kvm_vcpu *vcpu, const char *smstate)
{ {
struct vcpu_svm *svm = to_svm(vcpu); struct vcpu_svm *svm = to_svm(vcpu);
struct kvm_host_map map, map_save; struct kvm_host_map map, map_save;
int ret = 0; u64 saved_efer, vmcb12_gpa;
struct vmcb *vmcb12;
int ret;
if (guest_cpuid_has(vcpu, X86_FEATURE_LM)) { if (!guest_cpuid_has(vcpu, X86_FEATURE_LM))
u64 saved_efer = GET_SMSTATE(u64, smstate, 0x7ed0); return 0;
u64 guest = GET_SMSTATE(u64, smstate, 0x7ed8);
u64 vmcb12_gpa = GET_SMSTATE(u64, smstate, 0x7ee0);
struct vmcb *vmcb12;
if (guest) { /* Non-zero if SMI arrived while vCPU was in guest mode. */
if (!guest_cpuid_has(vcpu, X86_FEATURE_SVM)) if (!GET_SMSTATE(u64, smstate, 0x7ed8))
return 1; return 0;
if (!(saved_efer & EFER_SVME)) if (!guest_cpuid_has(vcpu, X86_FEATURE_SVM))
return 1; return 1;
if (kvm_vcpu_map(vcpu, saved_efer = GET_SMSTATE(u64, smstate, 0x7ed0);
gpa_to_gfn(vmcb12_gpa), &map) == -EINVAL) if (!(saved_efer & EFER_SVME))
return 1; return 1;
if (svm_allocate_nested(svm)) vmcb12_gpa = GET_SMSTATE(u64, smstate, 0x7ee0);
return 1; if (kvm_vcpu_map(vcpu, gpa_to_gfn(vmcb12_gpa), &map) == -EINVAL)
return 1;
kvm_vcpu_unmap(vcpu, &map, true); ret = 1;
if (kvm_vcpu_map(vcpu, gpa_to_gfn(svm->nested.hsave_msr), &map_save) == -EINVAL)
goto unmap_map;
/* if (svm_allocate_nested(svm))
* Restore L1 host state from L1 HSAVE area as VMCB01 was goto unmap_save;
* used during SMM (see svm_enter_smm())
*/
if (kvm_vcpu_map(vcpu, gpa_to_gfn(svm->nested.hsave_msr),
&map_save) == -EINVAL)
return 1;
svm_copy_vmrun_state(&svm->vmcb01.ptr->save, /*
map_save.hva + 0x400); * Restore L1 host state from L1 HSAVE area as VMCB01 was
* used during SMM (see svm_enter_smm())
*/
/* svm_copy_vmrun_state(&svm->vmcb01.ptr->save, map_save.hva + 0x400);
* Enter the nested guest now
*/
vmcb12 = map.hva;
nested_load_control_from_vmcb12(svm, &vmcb12->control);
ret = enter_svm_guest_mode(vcpu, vmcb12_gpa, vmcb12, false);
kvm_vcpu_unmap(vcpu, &map_save, true); /*
} * Enter the nested guest now
} */
vmcb12 = map.hva;
nested_load_control_from_vmcb12(svm, &vmcb12->control);
ret = enter_svm_guest_mode(vcpu, vmcb12_gpa, vmcb12, false);
unmap_save:
kvm_vcpu_unmap(vcpu, &map_save, true);
unmap_map:
kvm_vcpu_unmap(vcpu, &map, true);
return ret; return ret;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment