Commit 63129754 authored by Paolo Bonzini's avatar Paolo Bonzini

KVM: SVM: Pass struct kvm_vcpu to exit handlers (and many, many other places)

Refactor the svm_exit_handlers API to pass @vcpu instead of @svm to
allow directly invoking common x86 exit handlers (in a future patch).
Opportunistically convert an absurd number of instances of 'svm->vcpu'
to direct uses of 'vcpu' to avoid pointless casting.

No functional change intended.
Signed-off-by: default avatarSean Christopherson <seanjc@google.com>
Message-Id: <20210205005750.3841462-4-seanjc@google.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent 2a32a77c
...@@ -270,7 +270,7 @@ static int avic_init_backing_page(struct kvm_vcpu *vcpu) ...@@ -270,7 +270,7 @@ static int avic_init_backing_page(struct kvm_vcpu *vcpu)
if (id >= AVIC_MAX_PHYSICAL_ID_COUNT) if (id >= AVIC_MAX_PHYSICAL_ID_COUNT)
return -EINVAL; return -EINVAL;
if (!svm->vcpu.arch.apic->regs) if (!vcpu->arch.apic->regs)
return -EINVAL; return -EINVAL;
if (kvm_apicv_activated(vcpu->kvm)) { if (kvm_apicv_activated(vcpu->kvm)) {
...@@ -281,7 +281,7 @@ static int avic_init_backing_page(struct kvm_vcpu *vcpu) ...@@ -281,7 +281,7 @@ static int avic_init_backing_page(struct kvm_vcpu *vcpu)
return ret; return ret;
} }
svm->avic_backing_page = virt_to_page(svm->vcpu.arch.apic->regs); svm->avic_backing_page = virt_to_page(vcpu->arch.apic->regs);
/* Setting AVIC backing page address in the phy APIC ID table */ /* Setting AVIC backing page address in the phy APIC ID table */
entry = avic_get_physical_id_entry(vcpu, id); entry = avic_get_physical_id_entry(vcpu, id);
...@@ -315,15 +315,16 @@ static void avic_kick_target_vcpus(struct kvm *kvm, struct kvm_lapic *source, ...@@ -315,15 +315,16 @@ static void avic_kick_target_vcpus(struct kvm *kvm, struct kvm_lapic *source,
} }
} }
int avic_incomplete_ipi_interception(struct vcpu_svm *svm) int avic_incomplete_ipi_interception(struct kvm_vcpu *vcpu)
{ {
struct vcpu_svm *svm = to_svm(vcpu);
u32 icrh = svm->vmcb->control.exit_info_1 >> 32; u32 icrh = svm->vmcb->control.exit_info_1 >> 32;
u32 icrl = svm->vmcb->control.exit_info_1; u32 icrl = svm->vmcb->control.exit_info_1;
u32 id = svm->vmcb->control.exit_info_2 >> 32; u32 id = svm->vmcb->control.exit_info_2 >> 32;
u32 index = svm->vmcb->control.exit_info_2 & 0xFF; u32 index = svm->vmcb->control.exit_info_2 & 0xFF;
struct kvm_lapic *apic = svm->vcpu.arch.apic; struct kvm_lapic *apic = vcpu->arch.apic;
trace_kvm_avic_incomplete_ipi(svm->vcpu.vcpu_id, icrh, icrl, id, index); trace_kvm_avic_incomplete_ipi(vcpu->vcpu_id, icrh, icrl, id, index);
switch (id) { switch (id) {
case AVIC_IPI_FAILURE_INVALID_INT_TYPE: case AVIC_IPI_FAILURE_INVALID_INT_TYPE:
...@@ -347,11 +348,11 @@ int avic_incomplete_ipi_interception(struct vcpu_svm *svm) ...@@ -347,11 +348,11 @@ int avic_incomplete_ipi_interception(struct vcpu_svm *svm)
* set the appropriate IRR bits on the valid target * set the appropriate IRR bits on the valid target
* vcpus. So, we just need to kick the appropriate vcpu. * vcpus. So, we just need to kick the appropriate vcpu.
*/ */
avic_kick_target_vcpus(svm->vcpu.kvm, apic, icrl, icrh); avic_kick_target_vcpus(vcpu->kvm, apic, icrl, icrh);
break; break;
case AVIC_IPI_FAILURE_INVALID_TARGET: case AVIC_IPI_FAILURE_INVALID_TARGET:
WARN_ONCE(1, "Invalid IPI target: index=%u, vcpu=%d, icr=%#0x:%#0x\n", WARN_ONCE(1, "Invalid IPI target: index=%u, vcpu=%d, icr=%#0x:%#0x\n",
index, svm->vcpu.vcpu_id, icrh, icrl); index, vcpu->vcpu_id, icrh, icrl);
break; break;
case AVIC_IPI_FAILURE_INVALID_BACKING_PAGE: case AVIC_IPI_FAILURE_INVALID_BACKING_PAGE:
WARN_ONCE(1, "Invalid backing page\n"); WARN_ONCE(1, "Invalid backing page\n");
...@@ -539,8 +540,9 @@ static bool is_avic_unaccelerated_access_trap(u32 offset) ...@@ -539,8 +540,9 @@ static bool is_avic_unaccelerated_access_trap(u32 offset)
return ret; return ret;
} }
int avic_unaccelerated_access_interception(struct vcpu_svm *svm) int avic_unaccelerated_access_interception(struct kvm_vcpu *vcpu)
{ {
struct vcpu_svm *svm = to_svm(vcpu);
int ret = 0; int ret = 0;
u32 offset = svm->vmcb->control.exit_info_1 & u32 offset = svm->vmcb->control.exit_info_1 &
AVIC_UNACCEL_ACCESS_OFFSET_MASK; AVIC_UNACCEL_ACCESS_OFFSET_MASK;
...@@ -550,7 +552,7 @@ int avic_unaccelerated_access_interception(struct vcpu_svm *svm) ...@@ -550,7 +552,7 @@ int avic_unaccelerated_access_interception(struct vcpu_svm *svm)
AVIC_UNACCEL_ACCESS_WRITE_MASK; AVIC_UNACCEL_ACCESS_WRITE_MASK;
bool trap = is_avic_unaccelerated_access_trap(offset); bool trap = is_avic_unaccelerated_access_trap(offset);
trace_kvm_avic_unaccelerated_access(svm->vcpu.vcpu_id, offset, trace_kvm_avic_unaccelerated_access(vcpu->vcpu_id, offset,
trap, write, vector); trap, write, vector);
if (trap) { if (trap) {
/* Handling Trap */ /* Handling Trap */
...@@ -558,7 +560,7 @@ int avic_unaccelerated_access_interception(struct vcpu_svm *svm) ...@@ -558,7 +560,7 @@ int avic_unaccelerated_access_interception(struct vcpu_svm *svm)
ret = avic_unaccel_trap_write(svm); ret = avic_unaccel_trap_write(svm);
} else { } else {
/* Handling Fault */ /* Handling Fault */
ret = kvm_emulate_instruction(&svm->vcpu, 0); ret = kvm_emulate_instruction(vcpu, 0);
} }
return ret; return ret;
...@@ -572,7 +574,7 @@ int avic_init_vcpu(struct vcpu_svm *svm) ...@@ -572,7 +574,7 @@ int avic_init_vcpu(struct vcpu_svm *svm)
if (!avic || !irqchip_in_kernel(vcpu->kvm)) if (!avic || !irqchip_in_kernel(vcpu->kvm))
return 0; return 0;
ret = avic_init_backing_page(&svm->vcpu); ret = avic_init_backing_page(vcpu);
if (ret) if (ret)
return ret; return ret;
......
...@@ -247,11 +247,9 @@ static bool nested_vmcb_check_controls(struct vmcb_control_area *control) ...@@ -247,11 +247,9 @@ static bool nested_vmcb_check_controls(struct vmcb_control_area *control)
return true; return true;
} }
static bool nested_vmcb_check_cr3_cr4(struct vcpu_svm *svm, static bool nested_vmcb_check_cr3_cr4(struct kvm_vcpu *vcpu,
struct vmcb_save_area *save) struct vmcb_save_area *save)
{ {
struct kvm_vcpu *vcpu = &svm->vcpu;
/* /*
* These checks are also performed by KVM_SET_SREGS, * These checks are also performed by KVM_SET_SREGS,
* except that EFER.LMA is not checked by SVM against * except that EFER.LMA is not checked by SVM against
...@@ -271,7 +269,7 @@ static bool nested_vmcb_check_cr3_cr4(struct vcpu_svm *svm, ...@@ -271,7 +269,7 @@ static bool nested_vmcb_check_cr3_cr4(struct vcpu_svm *svm,
} }
/* Common checks that apply to both L1 and L2 state. */ /* Common checks that apply to both L1 and L2 state. */
static bool nested_vmcb_valid_sregs(struct vcpu_svm *svm, static bool nested_vmcb_valid_sregs(struct kvm_vcpu *vcpu,
struct vmcb_save_area *save) struct vmcb_save_area *save)
{ {
if (CC(!(save->efer & EFER_SVME))) if (CC(!(save->efer & EFER_SVME)))
...@@ -284,18 +282,18 @@ static bool nested_vmcb_valid_sregs(struct vcpu_svm *svm, ...@@ -284,18 +282,18 @@ static bool nested_vmcb_valid_sregs(struct vcpu_svm *svm,
if (CC(!kvm_dr6_valid(save->dr6)) || CC(!kvm_dr7_valid(save->dr7))) if (CC(!kvm_dr6_valid(save->dr6)) || CC(!kvm_dr7_valid(save->dr7)))
return false; return false;
if (!nested_vmcb_check_cr3_cr4(svm, save)) if (!nested_vmcb_check_cr3_cr4(vcpu, save))
return false; return false;
if (CC(!kvm_valid_efer(&svm->vcpu, save->efer))) if (CC(!kvm_valid_efer(vcpu, save->efer)))
return false; return false;
return true; return true;
} }
static bool nested_vmcb_checks(struct vcpu_svm *svm, struct vmcb *vmcb12) static bool nested_vmcb_checks(struct kvm_vcpu *vcpu, struct vmcb *vmcb12)
{ {
if (!nested_vmcb_valid_sregs(svm, &vmcb12->save)) if (!nested_vmcb_valid_sregs(vcpu, &vmcb12->save))
return false; return false;
return nested_vmcb_check_controls(&vmcb12->control); return nested_vmcb_check_controls(&vmcb12->control);
...@@ -514,9 +512,10 @@ static void nested_vmcb02_prepare_control(struct vcpu_svm *svm) ...@@ -514,9 +512,10 @@ static void nested_vmcb02_prepare_control(struct vcpu_svm *svm)
recalc_intercepts(svm); recalc_intercepts(svm);
} }
int enter_svm_guest_mode(struct vcpu_svm *svm, u64 vmcb12_gpa, int enter_svm_guest_mode(struct kvm_vcpu *vcpu, u64 vmcb12_gpa,
struct vmcb *vmcb12) struct vmcb *vmcb12)
{ {
struct vcpu_svm *svm = to_svm(vcpu);
int ret; int ret;
trace_kvm_nested_vmrun(svm->vmcb->save.rip, vmcb12_gpa, trace_kvm_nested_vmrun(svm->vmcb->save.rip, vmcb12_gpa,
...@@ -550,44 +549,45 @@ int enter_svm_guest_mode(struct vcpu_svm *svm, u64 vmcb12_gpa, ...@@ -550,44 +549,45 @@ int enter_svm_guest_mode(struct vcpu_svm *svm, u64 vmcb12_gpa,
return ret; return ret;
if (!npt_enabled) if (!npt_enabled)
svm->vcpu.arch.mmu->inject_page_fault = svm_inject_page_fault_nested; vcpu->arch.mmu->inject_page_fault = svm_inject_page_fault_nested;
svm_set_gif(svm, true); svm_set_gif(svm, true);
return 0; return 0;
} }
int nested_svm_vmrun(struct vcpu_svm *svm) int nested_svm_vmrun(struct kvm_vcpu *vcpu)
{ {
struct vcpu_svm *svm = to_svm(vcpu);
int ret; int ret;
struct vmcb *vmcb12; struct vmcb *vmcb12;
struct kvm_host_map map; struct kvm_host_map map;
u64 vmcb12_gpa; u64 vmcb12_gpa;
++svm->vcpu.stat.nested_run; ++vcpu->stat.nested_run;
if (is_smm(&svm->vcpu)) { if (is_smm(vcpu)) {
kvm_queue_exception(&svm->vcpu, UD_VECTOR); kvm_queue_exception(vcpu, UD_VECTOR);
return 1; return 1;
} }
vmcb12_gpa = svm->vmcb->save.rax; vmcb12_gpa = svm->vmcb->save.rax;
ret = kvm_vcpu_map(&svm->vcpu, gpa_to_gfn(vmcb12_gpa), &map); ret = kvm_vcpu_map(vcpu, gpa_to_gfn(vmcb12_gpa), &map);
if (ret == -EINVAL) { if (ret == -EINVAL) {
kvm_inject_gp(&svm->vcpu, 0); kvm_inject_gp(vcpu, 0);
return 1; return 1;
} else if (ret) { } else if (ret) {
return kvm_skip_emulated_instruction(&svm->vcpu); return kvm_skip_emulated_instruction(vcpu);
} }
ret = kvm_skip_emulated_instruction(&svm->vcpu); ret = kvm_skip_emulated_instruction(vcpu);
vmcb12 = map.hva; vmcb12 = map.hva;
if (WARN_ON_ONCE(!svm->nested.initialized)) if (WARN_ON_ONCE(!svm->nested.initialized))
return -EINVAL; return -EINVAL;
if (!nested_vmcb_checks(svm, vmcb12)) { if (!nested_vmcb_checks(vcpu, vmcb12)) {
vmcb12->control.exit_code = SVM_EXIT_ERR; vmcb12->control.exit_code = SVM_EXIT_ERR;
vmcb12->control.exit_code_hi = 0; vmcb12->control.exit_code_hi = 0;
vmcb12->control.exit_info_1 = 0; vmcb12->control.exit_info_1 = 0;
...@@ -597,25 +597,25 @@ int nested_svm_vmrun(struct vcpu_svm *svm) ...@@ -597,25 +597,25 @@ int nested_svm_vmrun(struct vcpu_svm *svm)
/* Clear internal status */ /* Clear internal status */
kvm_clear_exception_queue(&svm->vcpu); kvm_clear_exception_queue(vcpu);
kvm_clear_interrupt_queue(&svm->vcpu); kvm_clear_interrupt_queue(vcpu);
/* /*
* Since vmcb01 is not in use, we can use it to store some of the L1 * Since vmcb01 is not in use, we can use it to store some of the L1
* state. * state.
*/ */
svm->vmcb01.ptr->save.efer = svm->vcpu.arch.efer; svm->vmcb01.ptr->save.efer = vcpu->arch.efer;
svm->vmcb01.ptr->save.cr0 = kvm_read_cr0(&svm->vcpu); svm->vmcb01.ptr->save.cr0 = kvm_read_cr0(vcpu);
svm->vmcb01.ptr->save.cr4 = svm->vcpu.arch.cr4; svm->vmcb01.ptr->save.cr4 = vcpu->arch.cr4;
svm->vmcb01.ptr->save.rflags = kvm_get_rflags(&svm->vcpu); svm->vmcb01.ptr->save.rflags = kvm_get_rflags(vcpu);
svm->vmcb01.ptr->save.rip = kvm_rip_read(&svm->vcpu); svm->vmcb01.ptr->save.rip = kvm_rip_read(vcpu);
if (!npt_enabled) if (!npt_enabled)
svm->vmcb01.ptr->save.cr3 = kvm_read_cr3(&svm->vcpu); svm->vmcb01.ptr->save.cr3 = kvm_read_cr3(vcpu);
svm->nested.nested_run_pending = 1; svm->nested.nested_run_pending = 1;
if (enter_svm_guest_mode(svm, vmcb12_gpa, vmcb12)) if (enter_svm_guest_mode(vcpu, vmcb12_gpa, vmcb12))
goto out_exit_err; goto out_exit_err;
if (nested_svm_vmrun_msrpm(svm)) if (nested_svm_vmrun_msrpm(svm))
...@@ -632,7 +632,7 @@ int nested_svm_vmrun(struct vcpu_svm *svm) ...@@ -632,7 +632,7 @@ int nested_svm_vmrun(struct vcpu_svm *svm)
nested_svm_vmexit(svm); nested_svm_vmexit(svm);
out: out:
kvm_vcpu_unmap(&svm->vcpu, &map, true); kvm_vcpu_unmap(vcpu, &map, true);
return ret; return ret;
} }
...@@ -655,26 +655,27 @@ void nested_svm_vmloadsave(struct vmcb *from_vmcb, struct vmcb *to_vmcb) ...@@ -655,26 +655,27 @@ void nested_svm_vmloadsave(struct vmcb *from_vmcb, struct vmcb *to_vmcb)
int nested_svm_vmexit(struct vcpu_svm *svm) int nested_svm_vmexit(struct vcpu_svm *svm)
{ {
int rc; struct kvm_vcpu *vcpu = &svm->vcpu;
struct vmcb *vmcb12; struct vmcb *vmcb12;
struct vmcb *vmcb = svm->vmcb; struct vmcb *vmcb = svm->vmcb;
struct kvm_host_map map; struct kvm_host_map map;
int rc;
rc = kvm_vcpu_map(&svm->vcpu, gpa_to_gfn(svm->nested.vmcb12_gpa), &map); rc = kvm_vcpu_map(vcpu, gpa_to_gfn(svm->nested.vmcb12_gpa), &map);
if (rc) { if (rc) {
if (rc == -EINVAL) if (rc == -EINVAL)
kvm_inject_gp(&svm->vcpu, 0); kvm_inject_gp(vcpu, 0);
return 1; return 1;
} }
vmcb12 = map.hva; vmcb12 = map.hva;
/* Exit Guest-Mode */ /* Exit Guest-Mode */
leave_guest_mode(&svm->vcpu); leave_guest_mode(vcpu);
svm->nested.vmcb12_gpa = 0; svm->nested.vmcb12_gpa = 0;
WARN_ON_ONCE(svm->nested.nested_run_pending); WARN_ON_ONCE(svm->nested.nested_run_pending);
kvm_clear_request(KVM_REQ_GET_NESTED_STATE_PAGES, &svm->vcpu); kvm_clear_request(KVM_REQ_GET_NESTED_STATE_PAGES, vcpu);
/* in case we halted in L2 */ /* in case we halted in L2 */
svm->vcpu.arch.mp_state = KVM_MP_STATE_RUNNABLE; svm->vcpu.arch.mp_state = KVM_MP_STATE_RUNNABLE;
...@@ -688,14 +689,14 @@ int nested_svm_vmexit(struct vcpu_svm *svm) ...@@ -688,14 +689,14 @@ int nested_svm_vmexit(struct vcpu_svm *svm)
vmcb12->save.gdtr = vmcb->save.gdtr; vmcb12->save.gdtr = vmcb->save.gdtr;
vmcb12->save.idtr = vmcb->save.idtr; vmcb12->save.idtr = vmcb->save.idtr;
vmcb12->save.efer = svm->vcpu.arch.efer; vmcb12->save.efer = svm->vcpu.arch.efer;
vmcb12->save.cr0 = kvm_read_cr0(&svm->vcpu); vmcb12->save.cr0 = kvm_read_cr0(vcpu);
vmcb12->save.cr3 = kvm_read_cr3(&svm->vcpu); vmcb12->save.cr3 = kvm_read_cr3(vcpu);
vmcb12->save.cr2 = vmcb->save.cr2; vmcb12->save.cr2 = vmcb->save.cr2;
vmcb12->save.cr4 = svm->vcpu.arch.cr4; vmcb12->save.cr4 = svm->vcpu.arch.cr4;
vmcb12->save.rflags = kvm_get_rflags(&svm->vcpu); vmcb12->save.rflags = kvm_get_rflags(vcpu);
vmcb12->save.rip = kvm_rip_read(&svm->vcpu); vmcb12->save.rip = kvm_rip_read(vcpu);
vmcb12->save.rsp = kvm_rsp_read(&svm->vcpu); vmcb12->save.rsp = kvm_rsp_read(vcpu);
vmcb12->save.rax = kvm_rax_read(&svm->vcpu); vmcb12->save.rax = kvm_rax_read(vcpu);
vmcb12->save.dr7 = vmcb->save.dr7; vmcb12->save.dr7 = vmcb->save.dr7;
vmcb12->save.dr6 = svm->vcpu.arch.dr6; vmcb12->save.dr6 = svm->vcpu.arch.dr6;
vmcb12->save.cpl = vmcb->save.cpl; vmcb12->save.cpl = vmcb->save.cpl;
...@@ -744,13 +745,13 @@ int nested_svm_vmexit(struct vcpu_svm *svm) ...@@ -744,13 +745,13 @@ int nested_svm_vmexit(struct vcpu_svm *svm)
/* /*
* Restore processor state that had been saved in vmcb01 * Restore processor state that had been saved in vmcb01
*/ */
kvm_set_rflags(&svm->vcpu, svm->vmcb->save.rflags); kvm_set_rflags(vcpu, svm->vmcb->save.rflags);
svm_set_efer(&svm->vcpu, svm->vmcb->save.efer); svm_set_efer(vcpu, svm->vmcb->save.efer);
svm_set_cr0(&svm->vcpu, svm->vmcb->save.cr0 | X86_CR0_PE); svm_set_cr0(vcpu, svm->vmcb->save.cr0 | X86_CR0_PE);
svm_set_cr4(&svm->vcpu, svm->vmcb->save.cr4); svm_set_cr4(vcpu, svm->vmcb->save.cr4);
kvm_rax_write(&svm->vcpu, svm->vmcb->save.rax); kvm_rax_write(vcpu, svm->vmcb->save.rax);
kvm_rsp_write(&svm->vcpu, svm->vmcb->save.rsp); kvm_rsp_write(vcpu, svm->vmcb->save.rsp);
kvm_rip_write(&svm->vcpu, svm->vmcb->save.rip); kvm_rip_write(vcpu, svm->vmcb->save.rip);
svm->vcpu.arch.dr7 = DR7_FIXED_1; svm->vcpu.arch.dr7 = DR7_FIXED_1;
kvm_update_dr7(&svm->vcpu); kvm_update_dr7(&svm->vcpu);
...@@ -762,11 +763,11 @@ int nested_svm_vmexit(struct vcpu_svm *svm) ...@@ -762,11 +763,11 @@ int nested_svm_vmexit(struct vcpu_svm *svm)
vmcb12->control.exit_int_info_err, vmcb12->control.exit_int_info_err,
KVM_ISA_SVM); KVM_ISA_SVM);
kvm_vcpu_unmap(&svm->vcpu, &map, true); kvm_vcpu_unmap(vcpu, &map, true);
nested_svm_uninit_mmu_context(&svm->vcpu); nested_svm_uninit_mmu_context(vcpu);
rc = nested_svm_load_cr3(&svm->vcpu, svm->vmcb->save.cr3, false); rc = nested_svm_load_cr3(vcpu, svm->vmcb->save.cr3, false);
if (rc) if (rc)
return 1; return 1;
...@@ -775,8 +776,8 @@ int nested_svm_vmexit(struct vcpu_svm *svm) ...@@ -775,8 +776,8 @@ int nested_svm_vmexit(struct vcpu_svm *svm)
* doesn't end up in L1. * doesn't end up in L1.
*/ */
svm->vcpu.arch.nmi_injected = false; svm->vcpu.arch.nmi_injected = false;
kvm_clear_exception_queue(&svm->vcpu); kvm_clear_exception_queue(vcpu);
kvm_clear_interrupt_queue(&svm->vcpu); kvm_clear_interrupt_queue(vcpu);
return 0; return 0;
} }
...@@ -826,17 +827,19 @@ void svm_free_nested(struct vcpu_svm *svm) ...@@ -826,17 +827,19 @@ void svm_free_nested(struct vcpu_svm *svm)
*/ */
void svm_leave_nested(struct vcpu_svm *svm) void svm_leave_nested(struct vcpu_svm *svm)
{ {
if (is_guest_mode(&svm->vcpu)) { struct kvm_vcpu *vcpu = &svm->vcpu;
if (is_guest_mode(vcpu)) {
svm->nested.nested_run_pending = 0; svm->nested.nested_run_pending = 0;
leave_guest_mode(&svm->vcpu); leave_guest_mode(vcpu);
svm_switch_vmcb(svm, &svm->nested.vmcb02); svm_switch_vmcb(svm, &svm->nested.vmcb02);
nested_svm_uninit_mmu_context(&svm->vcpu); nested_svm_uninit_mmu_context(vcpu);
vmcb_mark_all_dirty(svm->vmcb); vmcb_mark_all_dirty(svm->vmcb);
} }
kvm_clear_request(KVM_REQ_GET_NESTED_STATE_PAGES, &svm->vcpu); kvm_clear_request(KVM_REQ_GET_NESTED_STATE_PAGES, vcpu);
} }
static int nested_svm_exit_handled_msr(struct vcpu_svm *svm) static int nested_svm_exit_handled_msr(struct vcpu_svm *svm)
...@@ -945,16 +948,15 @@ int nested_svm_exit_handled(struct vcpu_svm *svm) ...@@ -945,16 +948,15 @@ int nested_svm_exit_handled(struct vcpu_svm *svm)
return vmexit; return vmexit;
} }
int nested_svm_check_permissions(struct vcpu_svm *svm) int nested_svm_check_permissions(struct kvm_vcpu *vcpu)
{ {
if (!(svm->vcpu.arch.efer & EFER_SVME) || if (!(vcpu->arch.efer & EFER_SVME) || !is_paging(vcpu)) {
!is_paging(&svm->vcpu)) { kvm_queue_exception(vcpu, UD_VECTOR);
kvm_queue_exception(&svm->vcpu, UD_VECTOR);
return 1; return 1;
} }
if (svm->vmcb->save.cpl) { if (to_svm(vcpu)->vmcb->save.cpl) {
kvm_inject_gp(&svm->vcpu, 0); kvm_inject_gp(vcpu, 0);
return 1; return 1;
} }
...@@ -1265,7 +1267,7 @@ static int svm_set_nested_state(struct kvm_vcpu *vcpu, ...@@ -1265,7 +1267,7 @@ static int svm_set_nested_state(struct kvm_vcpu *vcpu,
if (!(save->cr0 & X86_CR0_PG) || if (!(save->cr0 & X86_CR0_PG) ||
!(save->cr0 & X86_CR0_PE) || !(save->cr0 & X86_CR0_PE) ||
(save->rflags & X86_EFLAGS_VM) || (save->rflags & X86_EFLAGS_VM) ||
!nested_vmcb_valid_sregs(svm, save)) !nested_vmcb_valid_sregs(vcpu, save))
goto out_free; goto out_free;
/* /*
......
...@@ -1849,7 +1849,7 @@ static int sev_handle_vmgexit_msr_protocol(struct vcpu_svm *svm) ...@@ -1849,7 +1849,7 @@ static int sev_handle_vmgexit_msr_protocol(struct vcpu_svm *svm)
vcpu->arch.regs[VCPU_REGS_RAX] = cpuid_fn; vcpu->arch.regs[VCPU_REGS_RAX] = cpuid_fn;
vcpu->arch.regs[VCPU_REGS_RCX] = 0; vcpu->arch.regs[VCPU_REGS_RCX] = 0;
ret = svm_invoke_exit_handler(svm, SVM_EXIT_CPUID); ret = svm_invoke_exit_handler(vcpu, SVM_EXIT_CPUID);
if (!ret) { if (!ret) {
ret = -EINVAL; ret = -EINVAL;
break; break;
...@@ -1899,8 +1899,9 @@ static int sev_handle_vmgexit_msr_protocol(struct vcpu_svm *svm) ...@@ -1899,8 +1899,9 @@ static int sev_handle_vmgexit_msr_protocol(struct vcpu_svm *svm)
return ret; return ret;
} }
int sev_handle_vmgexit(struct vcpu_svm *svm) int sev_handle_vmgexit(struct kvm_vcpu *vcpu)
{ {
struct vcpu_svm *svm = to_svm(vcpu);
struct vmcb_control_area *control = &svm->vmcb->control; struct vmcb_control_area *control = &svm->vmcb->control;
u64 ghcb_gpa, exit_code; u64 ghcb_gpa, exit_code;
struct ghcb *ghcb; struct ghcb *ghcb;
...@@ -1912,13 +1913,13 @@ int sev_handle_vmgexit(struct vcpu_svm *svm) ...@@ -1912,13 +1913,13 @@ int sev_handle_vmgexit(struct vcpu_svm *svm)
return sev_handle_vmgexit_msr_protocol(svm); return sev_handle_vmgexit_msr_protocol(svm);
if (!ghcb_gpa) { if (!ghcb_gpa) {
vcpu_unimpl(&svm->vcpu, "vmgexit: GHCB gpa is not set\n"); vcpu_unimpl(vcpu, "vmgexit: GHCB gpa is not set\n");
return -EINVAL; return -EINVAL;
} }
if (kvm_vcpu_map(&svm->vcpu, ghcb_gpa >> PAGE_SHIFT, &svm->ghcb_map)) { if (kvm_vcpu_map(vcpu, ghcb_gpa >> PAGE_SHIFT, &svm->ghcb_map)) {
/* Unable to map GHCB from guest */ /* Unable to map GHCB from guest */
vcpu_unimpl(&svm->vcpu, "vmgexit: error mapping GHCB [%#llx] from guest\n", vcpu_unimpl(vcpu, "vmgexit: error mapping GHCB [%#llx] from guest\n",
ghcb_gpa); ghcb_gpa);
return -EINVAL; return -EINVAL;
} }
...@@ -1926,7 +1927,7 @@ int sev_handle_vmgexit(struct vcpu_svm *svm) ...@@ -1926,7 +1927,7 @@ int sev_handle_vmgexit(struct vcpu_svm *svm)
svm->ghcb = svm->ghcb_map.hva; svm->ghcb = svm->ghcb_map.hva;
ghcb = svm->ghcb_map.hva; ghcb = svm->ghcb_map.hva;
trace_kvm_vmgexit_enter(svm->vcpu.vcpu_id, ghcb); trace_kvm_vmgexit_enter(vcpu->vcpu_id, ghcb);
exit_code = ghcb_get_sw_exit_code(ghcb); exit_code = ghcb_get_sw_exit_code(ghcb);
...@@ -1944,7 +1945,7 @@ int sev_handle_vmgexit(struct vcpu_svm *svm) ...@@ -1944,7 +1945,7 @@ int sev_handle_vmgexit(struct vcpu_svm *svm)
if (!setup_vmgexit_scratch(svm, true, control->exit_info_2)) if (!setup_vmgexit_scratch(svm, true, control->exit_info_2))
break; break;
ret = kvm_sev_es_mmio_read(&svm->vcpu, ret = kvm_sev_es_mmio_read(vcpu,
control->exit_info_1, control->exit_info_1,
control->exit_info_2, control->exit_info_2,
svm->ghcb_sa); svm->ghcb_sa);
...@@ -1953,19 +1954,19 @@ int sev_handle_vmgexit(struct vcpu_svm *svm) ...@@ -1953,19 +1954,19 @@ int sev_handle_vmgexit(struct vcpu_svm *svm)
if (!setup_vmgexit_scratch(svm, false, control->exit_info_2)) if (!setup_vmgexit_scratch(svm, false, control->exit_info_2))
break; break;
ret = kvm_sev_es_mmio_write(&svm->vcpu, ret = kvm_sev_es_mmio_write(vcpu,
control->exit_info_1, control->exit_info_1,
control->exit_info_2, control->exit_info_2,
svm->ghcb_sa); svm->ghcb_sa);
break; break;
case SVM_VMGEXIT_NMI_COMPLETE: case SVM_VMGEXIT_NMI_COMPLETE:
ret = svm_invoke_exit_handler(svm, SVM_EXIT_IRET); ret = svm_invoke_exit_handler(vcpu, SVM_EXIT_IRET);
break; break;
case SVM_VMGEXIT_AP_HLT_LOOP: case SVM_VMGEXIT_AP_HLT_LOOP:
ret = kvm_emulate_ap_reset_hold(&svm->vcpu); ret = kvm_emulate_ap_reset_hold(vcpu);
break; break;
case SVM_VMGEXIT_AP_JUMP_TABLE: { case SVM_VMGEXIT_AP_JUMP_TABLE: {
struct kvm_sev_info *sev = &to_kvm_svm(svm->vcpu.kvm)->sev_info; struct kvm_sev_info *sev = &to_kvm_svm(vcpu->kvm)->sev_info;
switch (control->exit_info_1) { switch (control->exit_info_1) {
case 0: case 0:
...@@ -1990,12 +1991,12 @@ int sev_handle_vmgexit(struct vcpu_svm *svm) ...@@ -1990,12 +1991,12 @@ int sev_handle_vmgexit(struct vcpu_svm *svm)
break; break;
} }
case SVM_VMGEXIT_UNSUPPORTED_EVENT: case SVM_VMGEXIT_UNSUPPORTED_EVENT:
vcpu_unimpl(&svm->vcpu, vcpu_unimpl(vcpu,
"vmgexit: unsupported event - exit_info_1=%#llx, exit_info_2=%#llx\n", "vmgexit: unsupported event - exit_info_1=%#llx, exit_info_2=%#llx\n",
control->exit_info_1, control->exit_info_2); control->exit_info_1, control->exit_info_2);
break; break;
default: default:
ret = svm_invoke_exit_handler(svm, exit_code); ret = svm_invoke_exit_handler(vcpu, exit_code);
} }
return ret; return ret;
......
...@@ -279,7 +279,7 @@ int svm_set_efer(struct kvm_vcpu *vcpu, u64 efer) ...@@ -279,7 +279,7 @@ int svm_set_efer(struct kvm_vcpu *vcpu, u64 efer)
* In this case we will return to the nested guest * In this case we will return to the nested guest
* as soon as we leave SMM. * as soon as we leave SMM.
*/ */
if (!is_smm(&svm->vcpu)) if (!is_smm(vcpu))
svm_free_nested(svm); svm_free_nested(svm);
} else { } else {
...@@ -363,10 +363,10 @@ static void svm_queue_exception(struct kvm_vcpu *vcpu) ...@@ -363,10 +363,10 @@ static void svm_queue_exception(struct kvm_vcpu *vcpu)
bool has_error_code = vcpu->arch.exception.has_error_code; bool has_error_code = vcpu->arch.exception.has_error_code;
u32 error_code = vcpu->arch.exception.error_code; u32 error_code = vcpu->arch.exception.error_code;
kvm_deliver_exception_payload(&svm->vcpu); kvm_deliver_exception_payload(vcpu);
if (nr == BP_VECTOR && !nrips) { if (nr == BP_VECTOR && !nrips) {
unsigned long rip, old_rip = kvm_rip_read(&svm->vcpu); unsigned long rip, old_rip = kvm_rip_read(vcpu);
/* /*
* For guest debugging where we have to reinject #BP if some * For guest debugging where we have to reinject #BP if some
...@@ -375,8 +375,8 @@ static void svm_queue_exception(struct kvm_vcpu *vcpu) ...@@ -375,8 +375,8 @@ static void svm_queue_exception(struct kvm_vcpu *vcpu)
* raises a fault that is not intercepted. Still better than * raises a fault that is not intercepted. Still better than
* failing in all cases. * failing in all cases.
*/ */
(void)skip_emulated_instruction(&svm->vcpu); (void)skip_emulated_instruction(vcpu);
rip = kvm_rip_read(&svm->vcpu); rip = kvm_rip_read(vcpu);
svm->int3_rip = rip + svm->vmcb->save.cs.base; svm->int3_rip = rip + svm->vmcb->save.cs.base;
svm->int3_injected = rip - old_rip; svm->int3_injected = rip - old_rip;
} }
...@@ -1113,12 +1113,13 @@ static void svm_check_invpcid(struct vcpu_svm *svm) ...@@ -1113,12 +1113,13 @@ static void svm_check_invpcid(struct vcpu_svm *svm)
} }
} }
static void init_vmcb(struct vcpu_svm *svm) static void init_vmcb(struct kvm_vcpu *vcpu)
{ {
struct vcpu_svm *svm = to_svm(vcpu);
struct vmcb_control_area *control = &svm->vmcb->control; struct vmcb_control_area *control = &svm->vmcb->control;
struct vmcb_save_area *save = &svm->vmcb->save; struct vmcb_save_area *save = &svm->vmcb->save;
svm->vcpu.arch.hflags = 0; vcpu->arch.hflags = 0;
svm_set_intercept(svm, INTERCEPT_CR0_READ); svm_set_intercept(svm, INTERCEPT_CR0_READ);
svm_set_intercept(svm, INTERCEPT_CR3_READ); svm_set_intercept(svm, INTERCEPT_CR3_READ);
...@@ -1126,7 +1127,7 @@ static void init_vmcb(struct vcpu_svm *svm) ...@@ -1126,7 +1127,7 @@ static void init_vmcb(struct vcpu_svm *svm)
svm_set_intercept(svm, INTERCEPT_CR0_WRITE); svm_set_intercept(svm, INTERCEPT_CR0_WRITE);
svm_set_intercept(svm, INTERCEPT_CR3_WRITE); svm_set_intercept(svm, INTERCEPT_CR3_WRITE);
svm_set_intercept(svm, INTERCEPT_CR4_WRITE); svm_set_intercept(svm, INTERCEPT_CR4_WRITE);
if (!kvm_vcpu_apicv_active(&svm->vcpu)) if (!kvm_vcpu_apicv_active(vcpu))
svm_set_intercept(svm, INTERCEPT_CR8_WRITE); svm_set_intercept(svm, INTERCEPT_CR8_WRITE);
set_dr_intercepts(svm); set_dr_intercepts(svm);
...@@ -1170,12 +1171,12 @@ static void init_vmcb(struct vcpu_svm *svm) ...@@ -1170,12 +1171,12 @@ static void init_vmcb(struct vcpu_svm *svm)
svm_set_intercept(svm, INTERCEPT_RDPRU); svm_set_intercept(svm, INTERCEPT_RDPRU);
svm_set_intercept(svm, INTERCEPT_RSM); svm_set_intercept(svm, INTERCEPT_RSM);
if (!kvm_mwait_in_guest(svm->vcpu.kvm)) { if (!kvm_mwait_in_guest(vcpu->kvm)) {
svm_set_intercept(svm, INTERCEPT_MONITOR); svm_set_intercept(svm, INTERCEPT_MONITOR);
svm_set_intercept(svm, INTERCEPT_MWAIT); svm_set_intercept(svm, INTERCEPT_MWAIT);
} }
if (!kvm_hlt_in_guest(svm->vcpu.kvm)) if (!kvm_hlt_in_guest(vcpu->kvm))
svm_set_intercept(svm, INTERCEPT_HLT); svm_set_intercept(svm, INTERCEPT_HLT);
control->iopm_base_pa = __sme_set(iopm_base); control->iopm_base_pa = __sme_set(iopm_base);
...@@ -1201,19 +1202,19 @@ static void init_vmcb(struct vcpu_svm *svm) ...@@ -1201,19 +1202,19 @@ static void init_vmcb(struct vcpu_svm *svm)
init_sys_seg(&save->ldtr, SEG_TYPE_LDT); init_sys_seg(&save->ldtr, SEG_TYPE_LDT);
init_sys_seg(&save->tr, SEG_TYPE_BUSY_TSS16); init_sys_seg(&save->tr, SEG_TYPE_BUSY_TSS16);
svm_set_cr4(&svm->vcpu, 0); svm_set_cr4(vcpu, 0);
svm_set_efer(&svm->vcpu, 0); svm_set_efer(vcpu, 0);
save->dr6 = 0xffff0ff0; save->dr6 = 0xffff0ff0;
kvm_set_rflags(&svm->vcpu, X86_EFLAGS_FIXED); kvm_set_rflags(vcpu, X86_EFLAGS_FIXED);
save->rip = 0x0000fff0; save->rip = 0x0000fff0;
svm->vcpu.arch.regs[VCPU_REGS_RIP] = save->rip; vcpu->arch.regs[VCPU_REGS_RIP] = save->rip;
/* /*
* svm_set_cr0() sets PG and WP and clears NW and CD on save->cr0. * svm_set_cr0() sets PG and WP and clears NW and CD on save->cr0.
* It also updates the guest-visible cr0 value. * It also updates the guest-visible cr0 value.
*/ */
svm_set_cr0(&svm->vcpu, X86_CR0_NW | X86_CR0_CD | X86_CR0_ET); svm_set_cr0(vcpu, X86_CR0_NW | X86_CR0_CD | X86_CR0_ET);
kvm_mmu_reset_context(&svm->vcpu); kvm_mmu_reset_context(vcpu);
save->cr4 = X86_CR4_PAE; save->cr4 = X86_CR4_PAE;
/* rdx = ?? */ /* rdx = ?? */
...@@ -1225,7 +1226,7 @@ static void init_vmcb(struct vcpu_svm *svm) ...@@ -1225,7 +1226,7 @@ static void init_vmcb(struct vcpu_svm *svm)
clr_exception_intercept(svm, PF_VECTOR); clr_exception_intercept(svm, PF_VECTOR);
svm_clr_intercept(svm, INTERCEPT_CR3_READ); svm_clr_intercept(svm, INTERCEPT_CR3_READ);
svm_clr_intercept(svm, INTERCEPT_CR3_WRITE); svm_clr_intercept(svm, INTERCEPT_CR3_WRITE);
save->g_pat = svm->vcpu.arch.pat; save->g_pat = vcpu->arch.pat;
save->cr3 = 0; save->cr3 = 0;
save->cr4 = 0; save->cr4 = 0;
} }
...@@ -1233,9 +1234,9 @@ static void init_vmcb(struct vcpu_svm *svm) ...@@ -1233,9 +1234,9 @@ static void init_vmcb(struct vcpu_svm *svm)
svm->asid = 0; svm->asid = 0;
svm->nested.vmcb12_gpa = 0; svm->nested.vmcb12_gpa = 0;
svm->vcpu.arch.hflags = 0; vcpu->arch.hflags = 0;
if (!kvm_pause_in_guest(svm->vcpu.kvm)) { if (!kvm_pause_in_guest(vcpu->kvm)) {
control->pause_filter_count = pause_filter_count; control->pause_filter_count = pause_filter_count;
if (pause_filter_thresh) if (pause_filter_thresh)
control->pause_filter_thresh = pause_filter_thresh; control->pause_filter_thresh = pause_filter_thresh;
...@@ -1246,7 +1247,7 @@ static void init_vmcb(struct vcpu_svm *svm) ...@@ -1246,7 +1247,7 @@ static void init_vmcb(struct vcpu_svm *svm)
svm_check_invpcid(svm); svm_check_invpcid(svm);
if (kvm_vcpu_apicv_active(&svm->vcpu)) if (kvm_vcpu_apicv_active(vcpu))
avic_init_vmcb(svm); avic_init_vmcb(svm);
/* /*
...@@ -1265,11 +1266,11 @@ static void init_vmcb(struct vcpu_svm *svm) ...@@ -1265,11 +1266,11 @@ static void init_vmcb(struct vcpu_svm *svm)
svm->vmcb->control.int_ctl |= V_GIF_ENABLE_MASK; svm->vmcb->control.int_ctl |= V_GIF_ENABLE_MASK;
} }
if (sev_guest(svm->vcpu.kvm)) { if (sev_guest(vcpu->kvm)) {
svm->vmcb->control.nested_ctl |= SVM_NESTED_CTL_SEV_ENABLE; svm->vmcb->control.nested_ctl |= SVM_NESTED_CTL_SEV_ENABLE;
clr_exception_intercept(svm, UD_VECTOR); clr_exception_intercept(svm, UD_VECTOR);
if (sev_es_guest(svm->vcpu.kvm)) { if (sev_es_guest(vcpu->kvm)) {
/* Perform SEV-ES specific VMCB updates */ /* Perform SEV-ES specific VMCB updates */
sev_es_init_vmcb(svm); sev_es_init_vmcb(svm);
} }
...@@ -1291,12 +1292,12 @@ static void svm_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event) ...@@ -1291,12 +1292,12 @@ static void svm_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event)
svm->virt_spec_ctrl = 0; svm->virt_spec_ctrl = 0;
if (!init_event) { if (!init_event) {
svm->vcpu.arch.apic_base = APIC_DEFAULT_PHYS_BASE | vcpu->arch.apic_base = APIC_DEFAULT_PHYS_BASE |
MSR_IA32_APICBASE_ENABLE; MSR_IA32_APICBASE_ENABLE;
if (kvm_vcpu_is_reset_bsp(&svm->vcpu)) if (kvm_vcpu_is_reset_bsp(vcpu))
svm->vcpu.arch.apic_base |= MSR_IA32_APICBASE_BSP; vcpu->arch.apic_base |= MSR_IA32_APICBASE_BSP;
} }
init_vmcb(svm); init_vmcb(vcpu);
kvm_cpuid(vcpu, &eax, &dummy, &dummy, &dummy, false); kvm_cpuid(vcpu, &eax, &dummy, &dummy, &dummy, false);
kvm_rdx_write(vcpu, eax); kvm_rdx_write(vcpu, eax);
...@@ -1335,7 +1336,7 @@ static int svm_create_vcpu(struct kvm_vcpu *vcpu) ...@@ -1335,7 +1336,7 @@ static int svm_create_vcpu(struct kvm_vcpu *vcpu)
if (!vmcb01_page) if (!vmcb01_page)
goto out; goto out;
if (sev_es_guest(svm->vcpu.kvm)) { if (sev_es_guest(vcpu->kvm)) {
/* /*
* SEV-ES guests require a separate VMSA page used to contain * SEV-ES guests require a separate VMSA page used to contain
* the encrypted register state of the guest. * the encrypted register state of the guest.
...@@ -1380,12 +1381,12 @@ static int svm_create_vcpu(struct kvm_vcpu *vcpu) ...@@ -1380,12 +1381,12 @@ static int svm_create_vcpu(struct kvm_vcpu *vcpu)
svm->guest_state_loaded = false; svm->guest_state_loaded = false;
svm_switch_vmcb(svm, &svm->vmcb01); svm_switch_vmcb(svm, &svm->vmcb01);
init_vmcb(svm); init_vmcb(vcpu);
svm_init_osvw(vcpu); svm_init_osvw(vcpu);
vcpu->arch.microcode_version = 0x01000065; vcpu->arch.microcode_version = 0x01000065;
if (sev_es_guest(svm->vcpu.kvm)) if (sev_es_guest(vcpu->kvm))
/* Perform SEV-ES specific VMCB creation updates */ /* Perform SEV-ES specific VMCB creation updates */
sev_es_create_vcpu(svm); sev_es_create_vcpu(svm);
...@@ -1448,7 +1449,7 @@ static void svm_prepare_guest_switch(struct kvm_vcpu *vcpu) ...@@ -1448,7 +1449,7 @@ static void svm_prepare_guest_switch(struct kvm_vcpu *vcpu)
* Save additional host state that will be restored on VMEXIT (sev-es) * Save additional host state that will be restored on VMEXIT (sev-es)
* or subsequent vmload of host save area. * or subsequent vmload of host save area.
*/ */
if (sev_es_guest(svm->vcpu.kvm)) { if (sev_es_guest(vcpu->kvm)) {
sev_es_prepare_guest_switch(svm, vcpu->cpu); sev_es_prepare_guest_switch(svm, vcpu->cpu);
} else { } else {
vmsave(__sme_page_pa(sd->save_area)); vmsave(__sme_page_pa(sd->save_area));
...@@ -1758,7 +1759,7 @@ void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0) ...@@ -1758,7 +1759,7 @@ void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
* SEV-ES guests must always keep the CR intercepts cleared. CR * SEV-ES guests must always keep the CR intercepts cleared. CR
* tracking is done using the CR write traps. * tracking is done using the CR write traps.
*/ */
if (sev_es_guest(svm->vcpu.kvm)) if (sev_es_guest(vcpu->kvm))
return; return;
if (hcr0 == cr0) { if (hcr0 == cr0) {
...@@ -1769,7 +1770,6 @@ void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0) ...@@ -1769,7 +1770,6 @@ void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
svm_set_intercept(svm, INTERCEPT_CR0_READ); svm_set_intercept(svm, INTERCEPT_CR0_READ);
svm_set_intercept(svm, INTERCEPT_CR0_WRITE); svm_set_intercept(svm, INTERCEPT_CR0_WRITE);
} }
} }
static bool svm_is_valid_cr4(struct kvm_vcpu *vcpu, unsigned long cr4) static bool svm_is_valid_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
...@@ -1897,39 +1897,43 @@ static void svm_set_dr7(struct kvm_vcpu *vcpu, unsigned long value) ...@@ -1897,39 +1897,43 @@ static void svm_set_dr7(struct kvm_vcpu *vcpu, unsigned long value)
vmcb_mark_dirty(svm->vmcb, VMCB_DR); vmcb_mark_dirty(svm->vmcb, VMCB_DR);
} }
static int pf_interception(struct vcpu_svm *svm) static int pf_interception(struct kvm_vcpu *vcpu)
{ {
struct vcpu_svm *svm = to_svm(vcpu);
u64 fault_address = svm->vmcb->control.exit_info_2; u64 fault_address = svm->vmcb->control.exit_info_2;
u64 error_code = svm->vmcb->control.exit_info_1; u64 error_code = svm->vmcb->control.exit_info_1;
return kvm_handle_page_fault(&svm->vcpu, error_code, fault_address, return kvm_handle_page_fault(vcpu, error_code, fault_address,
static_cpu_has(X86_FEATURE_DECODEASSISTS) ? static_cpu_has(X86_FEATURE_DECODEASSISTS) ?
svm->vmcb->control.insn_bytes : NULL, svm->vmcb->control.insn_bytes : NULL,
svm->vmcb->control.insn_len); svm->vmcb->control.insn_len);
} }
static int npf_interception(struct vcpu_svm *svm) static int npf_interception(struct kvm_vcpu *vcpu)
{ {
struct vcpu_svm *svm = to_svm(vcpu);
u64 fault_address = __sme_clr(svm->vmcb->control.exit_info_2); u64 fault_address = __sme_clr(svm->vmcb->control.exit_info_2);
u64 error_code = svm->vmcb->control.exit_info_1; u64 error_code = svm->vmcb->control.exit_info_1;
trace_kvm_page_fault(fault_address, error_code); trace_kvm_page_fault(fault_address, error_code);
return kvm_mmu_page_fault(&svm->vcpu, fault_address, error_code, return kvm_mmu_page_fault(vcpu, fault_address, error_code,
static_cpu_has(X86_FEATURE_DECODEASSISTS) ? static_cpu_has(X86_FEATURE_DECODEASSISTS) ?
svm->vmcb->control.insn_bytes : NULL, svm->vmcb->control.insn_bytes : NULL,
svm->vmcb->control.insn_len); svm->vmcb->control.insn_len);
} }
static int db_interception(struct vcpu_svm *svm) static int db_interception(struct kvm_vcpu *vcpu)
{ {
struct kvm_run *kvm_run = svm->vcpu.run; struct kvm_run *kvm_run = vcpu->run;
struct kvm_vcpu *vcpu = &svm->vcpu; struct vcpu_svm *svm = to_svm(vcpu);
if (!(svm->vcpu.guest_debug & if (!(vcpu->guest_debug &
(KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP)) && (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP)) &&
!svm->nmi_singlestep) { !svm->nmi_singlestep) {
u32 payload = svm->vmcb->save.dr6 ^ DR6_ACTIVE_LOW; u32 payload = svm->vmcb->save.dr6 ^ DR6_ACTIVE_LOW;
kvm_queue_exception_p(&svm->vcpu, DB_VECTOR, payload); kvm_queue_exception_p(vcpu, DB_VECTOR, payload);
return 1; return 1;
} }
...@@ -1939,7 +1943,7 @@ static int db_interception(struct vcpu_svm *svm) ...@@ -1939,7 +1943,7 @@ static int db_interception(struct vcpu_svm *svm)
kvm_make_request(KVM_REQ_EVENT, vcpu); kvm_make_request(KVM_REQ_EVENT, vcpu);
} }
if (svm->vcpu.guest_debug & if (vcpu->guest_debug &
(KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP)) { (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP)) {
kvm_run->exit_reason = KVM_EXIT_DEBUG; kvm_run->exit_reason = KVM_EXIT_DEBUG;
kvm_run->debug.arch.dr6 = svm->vmcb->save.dr6; kvm_run->debug.arch.dr6 = svm->vmcb->save.dr6;
...@@ -1953,9 +1957,10 @@ static int db_interception(struct vcpu_svm *svm) ...@@ -1953,9 +1957,10 @@ static int db_interception(struct vcpu_svm *svm)
return 1; return 1;
} }
static int bp_interception(struct vcpu_svm *svm) static int bp_interception(struct kvm_vcpu *vcpu)
{ {
struct kvm_run *kvm_run = svm->vcpu.run; struct vcpu_svm *svm = to_svm(vcpu);
struct kvm_run *kvm_run = vcpu->run;
kvm_run->exit_reason = KVM_EXIT_DEBUG; kvm_run->exit_reason = KVM_EXIT_DEBUG;
kvm_run->debug.arch.pc = svm->vmcb->save.cs.base + svm->vmcb->save.rip; kvm_run->debug.arch.pc = svm->vmcb->save.cs.base + svm->vmcb->save.rip;
...@@ -1963,14 +1968,14 @@ static int bp_interception(struct vcpu_svm *svm) ...@@ -1963,14 +1968,14 @@ static int bp_interception(struct vcpu_svm *svm)
return 0; return 0;
} }
static int ud_interception(struct vcpu_svm *svm) static int ud_interception(struct kvm_vcpu *vcpu)
{ {
return handle_ud(&svm->vcpu); return handle_ud(vcpu);
} }
static int ac_interception(struct vcpu_svm *svm) static int ac_interception(struct kvm_vcpu *vcpu)
{ {
kvm_queue_exception_e(&svm->vcpu, AC_VECTOR, 0); kvm_queue_exception_e(vcpu, AC_VECTOR, 0);
return 1; return 1;
} }
...@@ -2013,7 +2018,7 @@ static bool is_erratum_383(void) ...@@ -2013,7 +2018,7 @@ static bool is_erratum_383(void)
return true; return true;
} }
static void svm_handle_mce(struct vcpu_svm *svm) static void svm_handle_mce(struct kvm_vcpu *vcpu)
{ {
if (is_erratum_383()) { if (is_erratum_383()) {
/* /*
...@@ -2022,7 +2027,7 @@ static void svm_handle_mce(struct vcpu_svm *svm) ...@@ -2022,7 +2027,7 @@ static void svm_handle_mce(struct vcpu_svm *svm)
*/ */
pr_err("KVM: Guest triggered AMD Erratum 383\n"); pr_err("KVM: Guest triggered AMD Erratum 383\n");
kvm_make_request(KVM_REQ_TRIPLE_FAULT, &svm->vcpu); kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu);
return; return;
} }
...@@ -2034,20 +2039,21 @@ static void svm_handle_mce(struct vcpu_svm *svm) ...@@ -2034,20 +2039,21 @@ static void svm_handle_mce(struct vcpu_svm *svm)
kvm_machine_check(); kvm_machine_check();
} }
static int mc_interception(struct vcpu_svm *svm) static int mc_interception(struct kvm_vcpu *vcpu)
{ {
return 1; return 1;
} }
static int shutdown_interception(struct vcpu_svm *svm) static int shutdown_interception(struct kvm_vcpu *vcpu)
{ {
struct kvm_run *kvm_run = svm->vcpu.run; struct kvm_run *kvm_run = vcpu->run;
struct vcpu_svm *svm = to_svm(vcpu);
/* /*
* The VM save area has already been encrypted so it * The VM save area has already been encrypted so it
* cannot be reinitialized - just terminate. * cannot be reinitialized - just terminate.
*/ */
if (sev_es_guest(svm->vcpu.kvm)) if (sev_es_guest(vcpu->kvm))
return -EINVAL; return -EINVAL;
/* /*
...@@ -2055,20 +2061,20 @@ static int shutdown_interception(struct vcpu_svm *svm) ...@@ -2055,20 +2061,20 @@ static int shutdown_interception(struct vcpu_svm *svm)
* so reinitialize it. * so reinitialize it.
*/ */
clear_page(svm->vmcb); clear_page(svm->vmcb);
init_vmcb(svm); init_vmcb(vcpu);
kvm_run->exit_reason = KVM_EXIT_SHUTDOWN; kvm_run->exit_reason = KVM_EXIT_SHUTDOWN;
return 0; return 0;
} }
static int io_interception(struct vcpu_svm *svm) static int io_interception(struct kvm_vcpu *vcpu)
{ {
struct kvm_vcpu *vcpu = &svm->vcpu; struct vcpu_svm *svm = to_svm(vcpu);
u32 io_info = svm->vmcb->control.exit_info_1; /* address size bug? */ u32 io_info = svm->vmcb->control.exit_info_1; /* address size bug? */
int size, in, string; int size, in, string;
unsigned port; unsigned port;
++svm->vcpu.stat.io_exits; ++vcpu->stat.io_exits;
string = (io_info & SVM_IOIO_STR_MASK) != 0; string = (io_info & SVM_IOIO_STR_MASK) != 0;
in = (io_info & SVM_IOIO_TYPE_MASK) != 0; in = (io_info & SVM_IOIO_TYPE_MASK) != 0;
port = io_info >> 16; port = io_info >> 16;
...@@ -2083,93 +2089,95 @@ static int io_interception(struct vcpu_svm *svm) ...@@ -2083,93 +2089,95 @@ static int io_interception(struct vcpu_svm *svm)
svm->next_rip = svm->vmcb->control.exit_info_2; svm->next_rip = svm->vmcb->control.exit_info_2;
return kvm_fast_pio(&svm->vcpu, size, port, in); return kvm_fast_pio(vcpu, size, port, in);
} }
static int nmi_interception(struct vcpu_svm *svm) static int nmi_interception(struct kvm_vcpu *vcpu)
{ {
return 1; return 1;
} }
static int intr_interception(struct vcpu_svm *svm) static int intr_interception(struct kvm_vcpu *vcpu)
{ {
++svm->vcpu.stat.irq_exits; ++vcpu->stat.irq_exits;
return 1; return 1;
} }
static int nop_on_interception(struct vcpu_svm *svm) static int nop_on_interception(struct kvm_vcpu *vcpu)
{ {
return 1; return 1;
} }
static int halt_interception(struct vcpu_svm *svm) static int halt_interception(struct kvm_vcpu *vcpu)
{ {
return kvm_emulate_halt(&svm->vcpu); return kvm_emulate_halt(vcpu);
} }
static int vmmcall_interception(struct vcpu_svm *svm) static int vmmcall_interception(struct kvm_vcpu *vcpu)
{ {
return kvm_emulate_hypercall(&svm->vcpu); return kvm_emulate_hypercall(vcpu);
} }
static int vmload_interception(struct vcpu_svm *svm) static int vmload_interception(struct kvm_vcpu *vcpu)
{ {
struct vcpu_svm *svm = to_svm(vcpu);
struct vmcb *vmcb12; struct vmcb *vmcb12;
struct kvm_host_map map; struct kvm_host_map map;
int ret; int ret;
if (nested_svm_check_permissions(svm)) if (nested_svm_check_permissions(vcpu))
return 1; return 1;
ret = kvm_vcpu_map(&svm->vcpu, gpa_to_gfn(svm->vmcb->save.rax), &map); ret = kvm_vcpu_map(vcpu, gpa_to_gfn(svm->vmcb->save.rax), &map);
if (ret) { if (ret) {
if (ret == -EINVAL) if (ret == -EINVAL)
kvm_inject_gp(&svm->vcpu, 0); kvm_inject_gp(vcpu, 0);
return 1; return 1;
} }
vmcb12 = map.hva; vmcb12 = map.hva;
ret = kvm_skip_emulated_instruction(&svm->vcpu); ret = kvm_skip_emulated_instruction(vcpu);
nested_svm_vmloadsave(vmcb12, svm->vmcb); nested_svm_vmloadsave(vmcb12, svm->vmcb);
kvm_vcpu_unmap(&svm->vcpu, &map, true); kvm_vcpu_unmap(vcpu, &map, true);
return ret; return ret;
} }
static int vmsave_interception(struct vcpu_svm *svm) static int vmsave_interception(struct kvm_vcpu *vcpu)
{ {
struct vcpu_svm *svm = to_svm(vcpu);
struct vmcb *vmcb12; struct vmcb *vmcb12;
struct kvm_host_map map; struct kvm_host_map map;
int ret; int ret;
if (nested_svm_check_permissions(svm)) if (nested_svm_check_permissions(vcpu))
return 1; return 1;
ret = kvm_vcpu_map(&svm->vcpu, gpa_to_gfn(svm->vmcb->save.rax), &map); ret = kvm_vcpu_map(vcpu, gpa_to_gfn(svm->vmcb->save.rax), &map);
if (ret) { if (ret) {
if (ret == -EINVAL) if (ret == -EINVAL)
kvm_inject_gp(&svm->vcpu, 0); kvm_inject_gp(vcpu, 0);
return 1; return 1;
} }
vmcb12 = map.hva; vmcb12 = map.hva;
ret = kvm_skip_emulated_instruction(&svm->vcpu); ret = kvm_skip_emulated_instruction(vcpu);
nested_svm_vmloadsave(svm->vmcb, vmcb12); nested_svm_vmloadsave(svm->vmcb, vmcb12);
kvm_vcpu_unmap(&svm->vcpu, &map, true); kvm_vcpu_unmap(vcpu, &map, true);
return ret; return ret;
} }
static int vmrun_interception(struct vcpu_svm *svm) static int vmrun_interception(struct kvm_vcpu *vcpu)
{ {
if (nested_svm_check_permissions(svm)) if (nested_svm_check_permissions(vcpu))
return 1; return 1;
return nested_svm_vmrun(svm); return nested_svm_vmrun(vcpu);
} }
enum { enum {
...@@ -2208,7 +2216,7 @@ static int emulate_svm_instr(struct kvm_vcpu *vcpu, int opcode) ...@@ -2208,7 +2216,7 @@ static int emulate_svm_instr(struct kvm_vcpu *vcpu, int opcode)
[SVM_INSTR_VMLOAD] = SVM_EXIT_VMLOAD, [SVM_INSTR_VMLOAD] = SVM_EXIT_VMLOAD,
[SVM_INSTR_VMSAVE] = SVM_EXIT_VMSAVE, [SVM_INSTR_VMSAVE] = SVM_EXIT_VMSAVE,
}; };
int (*const svm_instr_handlers[])(struct vcpu_svm *svm) = { int (*const svm_instr_handlers[])(struct kvm_vcpu *vcpu) = {
[SVM_INSTR_VMRUN] = vmrun_interception, [SVM_INSTR_VMRUN] = vmrun_interception,
[SVM_INSTR_VMLOAD] = vmload_interception, [SVM_INSTR_VMLOAD] = vmload_interception,
[SVM_INSTR_VMSAVE] = vmsave_interception, [SVM_INSTR_VMSAVE] = vmsave_interception,
...@@ -2227,7 +2235,7 @@ static int emulate_svm_instr(struct kvm_vcpu *vcpu, int opcode) ...@@ -2227,7 +2235,7 @@ static int emulate_svm_instr(struct kvm_vcpu *vcpu, int opcode)
return ret; return ret;
return 1; return 1;
} }
return svm_instr_handlers[opcode](svm); return svm_instr_handlers[opcode](vcpu);
} }
/* /*
...@@ -2238,9 +2246,9 @@ static int emulate_svm_instr(struct kvm_vcpu *vcpu, int opcode) ...@@ -2238,9 +2246,9 @@ static int emulate_svm_instr(struct kvm_vcpu *vcpu, int opcode)
* regions (e.g. SMM memory on host). * regions (e.g. SMM memory on host).
* 2) VMware backdoor * 2) VMware backdoor
*/ */
static int gp_interception(struct vcpu_svm *svm) static int gp_interception(struct kvm_vcpu *vcpu)
{ {
struct kvm_vcpu *vcpu = &svm->vcpu; struct vcpu_svm *svm = to_svm(vcpu);
u32 error_code = svm->vmcb->control.exit_info_1; u32 error_code = svm->vmcb->control.exit_info_1;
int opcode; int opcode;
...@@ -2305,73 +2313,72 @@ void svm_set_gif(struct vcpu_svm *svm, bool value) ...@@ -2305,73 +2313,72 @@ void svm_set_gif(struct vcpu_svm *svm, bool value)
} }
} }
static int stgi_interception(struct vcpu_svm *svm) static int stgi_interception(struct kvm_vcpu *vcpu)
{ {
int ret; int ret;
if (nested_svm_check_permissions(svm)) if (nested_svm_check_permissions(vcpu))
return 1; return 1;
ret = kvm_skip_emulated_instruction(&svm->vcpu); ret = kvm_skip_emulated_instruction(vcpu);
svm_set_gif(svm, true); svm_set_gif(to_svm(vcpu), true);
return ret; return ret;
} }
static int clgi_interception(struct vcpu_svm *svm) static int clgi_interception(struct kvm_vcpu *vcpu)
{ {
int ret; int ret;
if (nested_svm_check_permissions(svm)) if (nested_svm_check_permissions(vcpu))
return 1; return 1;
ret = kvm_skip_emulated_instruction(&svm->vcpu); ret = kvm_skip_emulated_instruction(vcpu);
svm_set_gif(svm, false); svm_set_gif(to_svm(vcpu), false);
return ret; return ret;
} }
static int invlpga_interception(struct vcpu_svm *svm) static int invlpga_interception(struct kvm_vcpu *vcpu)
{ {
struct kvm_vcpu *vcpu = &svm->vcpu; trace_kvm_invlpga(to_svm(vcpu)->vmcb->save.rip, kvm_rcx_read(vcpu),
kvm_rax_read(vcpu));
trace_kvm_invlpga(svm->vmcb->save.rip, kvm_rcx_read(&svm->vcpu),
kvm_rax_read(&svm->vcpu));
/* Let's treat INVLPGA the same as INVLPG (can be optimized!) */ /* Let's treat INVLPGA the same as INVLPG (can be optimized!) */
kvm_mmu_invlpg(vcpu, kvm_rax_read(&svm->vcpu)); kvm_mmu_invlpg(vcpu, kvm_rax_read(vcpu));
return kvm_skip_emulated_instruction(&svm->vcpu); return kvm_skip_emulated_instruction(vcpu);
} }
static int skinit_interception(struct vcpu_svm *svm) static int skinit_interception(struct kvm_vcpu *vcpu)
{ {
trace_kvm_skinit(svm->vmcb->save.rip, kvm_rax_read(&svm->vcpu)); trace_kvm_skinit(to_svm(vcpu)->vmcb->save.rip, kvm_rax_read(vcpu));
kvm_queue_exception(&svm->vcpu, UD_VECTOR); kvm_queue_exception(vcpu, UD_VECTOR);
return 1; return 1;
} }
static int wbinvd_interception(struct vcpu_svm *svm) static int wbinvd_interception(struct kvm_vcpu *vcpu)
{ {
return kvm_emulate_wbinvd(&svm->vcpu); return kvm_emulate_wbinvd(vcpu);
} }
static int xsetbv_interception(struct vcpu_svm *svm) static int xsetbv_interception(struct kvm_vcpu *vcpu)
{ {
u64 new_bv = kvm_read_edx_eax(&svm->vcpu); u64 new_bv = kvm_read_edx_eax(vcpu);
u32 index = kvm_rcx_read(&svm->vcpu); u32 index = kvm_rcx_read(vcpu);
int err = kvm_set_xcr(&svm->vcpu, index, new_bv); int err = kvm_set_xcr(vcpu, index, new_bv);
return kvm_complete_insn_gp(&svm->vcpu, err); return kvm_complete_insn_gp(vcpu, err);
} }
static int rdpru_interception(struct vcpu_svm *svm) static int rdpru_interception(struct kvm_vcpu *vcpu)
{ {
kvm_queue_exception(&svm->vcpu, UD_VECTOR); kvm_queue_exception(vcpu, UD_VECTOR);
return 1; return 1;
} }
static int task_switch_interception(struct vcpu_svm *svm) static int task_switch_interception(struct kvm_vcpu *vcpu)
{ {
struct vcpu_svm *svm = to_svm(vcpu);
u16 tss_selector; u16 tss_selector;
int reason; int reason;
int int_type = svm->vmcb->control.exit_int_info & int int_type = svm->vmcb->control.exit_int_info &
...@@ -2400,7 +2407,7 @@ static int task_switch_interception(struct vcpu_svm *svm) ...@@ -2400,7 +2407,7 @@ static int task_switch_interception(struct vcpu_svm *svm)
if (reason == TASK_SWITCH_GATE) { if (reason == TASK_SWITCH_GATE) {
switch (type) { switch (type) {
case SVM_EXITINTINFO_TYPE_NMI: case SVM_EXITINTINFO_TYPE_NMI:
svm->vcpu.arch.nmi_injected = false; vcpu->arch.nmi_injected = false;
break; break;
case SVM_EXITINTINFO_TYPE_EXEPT: case SVM_EXITINTINFO_TYPE_EXEPT:
if (svm->vmcb->control.exit_info_2 & if (svm->vmcb->control.exit_info_2 &
...@@ -2409,10 +2416,10 @@ static int task_switch_interception(struct vcpu_svm *svm) ...@@ -2409,10 +2416,10 @@ static int task_switch_interception(struct vcpu_svm *svm)
error_code = error_code =
(u32)svm->vmcb->control.exit_info_2; (u32)svm->vmcb->control.exit_info_2;
} }
kvm_clear_exception_queue(&svm->vcpu); kvm_clear_exception_queue(vcpu);
break; break;
case SVM_EXITINTINFO_TYPE_INTR: case SVM_EXITINTINFO_TYPE_INTR:
kvm_clear_interrupt_queue(&svm->vcpu); kvm_clear_interrupt_queue(vcpu);
break; break;
default: default:
break; break;
...@@ -2423,77 +2430,80 @@ static int task_switch_interception(struct vcpu_svm *svm) ...@@ -2423,77 +2430,80 @@ static int task_switch_interception(struct vcpu_svm *svm)
int_type == SVM_EXITINTINFO_TYPE_SOFT || int_type == SVM_EXITINTINFO_TYPE_SOFT ||
(int_type == SVM_EXITINTINFO_TYPE_EXEPT && (int_type == SVM_EXITINTINFO_TYPE_EXEPT &&
(int_vec == OF_VECTOR || int_vec == BP_VECTOR))) { (int_vec == OF_VECTOR || int_vec == BP_VECTOR))) {
if (!skip_emulated_instruction(&svm->vcpu)) if (!skip_emulated_instruction(vcpu))
return 0; return 0;
} }
if (int_type != SVM_EXITINTINFO_TYPE_SOFT) if (int_type != SVM_EXITINTINFO_TYPE_SOFT)
int_vec = -1; int_vec = -1;
return kvm_task_switch(&svm->vcpu, tss_selector, int_vec, reason, return kvm_task_switch(vcpu, tss_selector, int_vec, reason,
has_error_code, error_code); has_error_code, error_code);
} }
static int cpuid_interception(struct vcpu_svm *svm) static int cpuid_interception(struct kvm_vcpu *vcpu)
{ {
return kvm_emulate_cpuid(&svm->vcpu); return kvm_emulate_cpuid(vcpu);
} }
static int iret_interception(struct vcpu_svm *svm) static int iret_interception(struct kvm_vcpu *vcpu)
{ {
++svm->vcpu.stat.nmi_window_exits; struct vcpu_svm *svm = to_svm(vcpu);
svm->vcpu.arch.hflags |= HF_IRET_MASK;
if (!sev_es_guest(svm->vcpu.kvm)) { ++vcpu->stat.nmi_window_exits;
vcpu->arch.hflags |= HF_IRET_MASK;
if (!sev_es_guest(vcpu->kvm)) {
svm_clr_intercept(svm, INTERCEPT_IRET); svm_clr_intercept(svm, INTERCEPT_IRET);
svm->nmi_iret_rip = kvm_rip_read(&svm->vcpu); svm->nmi_iret_rip = kvm_rip_read(vcpu);
} }
kvm_make_request(KVM_REQ_EVENT, &svm->vcpu); kvm_make_request(KVM_REQ_EVENT, vcpu);
return 1; return 1;
} }
static int invd_interception(struct vcpu_svm *svm) static int invd_interception(struct kvm_vcpu *vcpu)
{ {
/* Treat an INVD instruction as a NOP and just skip it. */ /* Treat an INVD instruction as a NOP and just skip it. */
return kvm_skip_emulated_instruction(&svm->vcpu); return kvm_skip_emulated_instruction(vcpu);
} }
static int invlpg_interception(struct vcpu_svm *svm) static int invlpg_interception(struct kvm_vcpu *vcpu)
{ {
if (!static_cpu_has(X86_FEATURE_DECODEASSISTS)) if (!static_cpu_has(X86_FEATURE_DECODEASSISTS))
return kvm_emulate_instruction(&svm->vcpu, 0); return kvm_emulate_instruction(vcpu, 0);
kvm_mmu_invlpg(&svm->vcpu, svm->vmcb->control.exit_info_1); kvm_mmu_invlpg(vcpu, to_svm(vcpu)->vmcb->control.exit_info_1);
return kvm_skip_emulated_instruction(&svm->vcpu); return kvm_skip_emulated_instruction(vcpu);
} }
static int emulate_on_interception(struct vcpu_svm *svm) static int emulate_on_interception(struct kvm_vcpu *vcpu)
{ {
return kvm_emulate_instruction(&svm->vcpu, 0); return kvm_emulate_instruction(vcpu, 0);
} }
static int rsm_interception(struct vcpu_svm *svm) static int rsm_interception(struct kvm_vcpu *vcpu)
{ {
return kvm_emulate_instruction_from_buffer(&svm->vcpu, rsm_ins_bytes, 2); return kvm_emulate_instruction_from_buffer(vcpu, rsm_ins_bytes, 2);
} }
static int rdpmc_interception(struct vcpu_svm *svm) static int rdpmc_interception(struct kvm_vcpu *vcpu)
{ {
int err; int err;
if (!nrips) if (!nrips)
return emulate_on_interception(svm); return emulate_on_interception(vcpu);
err = kvm_rdpmc(&svm->vcpu); err = kvm_rdpmc(vcpu);
return kvm_complete_insn_gp(&svm->vcpu, err); return kvm_complete_insn_gp(vcpu, err);
} }
static bool check_selective_cr0_intercepted(struct vcpu_svm *svm, static bool check_selective_cr0_intercepted(struct kvm_vcpu *vcpu,
unsigned long val) unsigned long val)
{ {
unsigned long cr0 = svm->vcpu.arch.cr0; struct vcpu_svm *svm = to_svm(vcpu);
unsigned long cr0 = vcpu->arch.cr0;
bool ret = false; bool ret = false;
if (!is_guest_mode(&svm->vcpu) || if (!is_guest_mode(vcpu) ||
(!(vmcb_is_intercept(&svm->nested.ctl, INTERCEPT_SELECTIVE_CR0)))) (!(vmcb_is_intercept(&svm->nested.ctl, INTERCEPT_SELECTIVE_CR0))))
return false; return false;
...@@ -2510,17 +2520,18 @@ static bool check_selective_cr0_intercepted(struct vcpu_svm *svm, ...@@ -2510,17 +2520,18 @@ static bool check_selective_cr0_intercepted(struct vcpu_svm *svm,
#define CR_VALID (1ULL << 63) #define CR_VALID (1ULL << 63)
static int cr_interception(struct vcpu_svm *svm) static int cr_interception(struct kvm_vcpu *vcpu)
{ {
struct vcpu_svm *svm = to_svm(vcpu);
int reg, cr; int reg, cr;
unsigned long val; unsigned long val;
int err; int err;
if (!static_cpu_has(X86_FEATURE_DECODEASSISTS)) if (!static_cpu_has(X86_FEATURE_DECODEASSISTS))
return emulate_on_interception(svm); return emulate_on_interception(vcpu);
if (unlikely((svm->vmcb->control.exit_info_1 & CR_VALID) == 0)) if (unlikely((svm->vmcb->control.exit_info_1 & CR_VALID) == 0))
return emulate_on_interception(svm); return emulate_on_interception(vcpu);
reg = svm->vmcb->control.exit_info_1 & SVM_EXITINFO_REG_MASK; reg = svm->vmcb->control.exit_info_1 & SVM_EXITINFO_REG_MASK;
if (svm->vmcb->control.exit_code == SVM_EXIT_CR0_SEL_WRITE) if (svm->vmcb->control.exit_code == SVM_EXIT_CR0_SEL_WRITE)
...@@ -2531,61 +2542,61 @@ static int cr_interception(struct vcpu_svm *svm) ...@@ -2531,61 +2542,61 @@ static int cr_interception(struct vcpu_svm *svm)
err = 0; err = 0;
if (cr >= 16) { /* mov to cr */ if (cr >= 16) { /* mov to cr */
cr -= 16; cr -= 16;
val = kvm_register_read(&svm->vcpu, reg); val = kvm_register_read(vcpu, reg);
trace_kvm_cr_write(cr, val); trace_kvm_cr_write(cr, val);
switch (cr) { switch (cr) {
case 0: case 0:
if (!check_selective_cr0_intercepted(svm, val)) if (!check_selective_cr0_intercepted(vcpu, val))
err = kvm_set_cr0(&svm->vcpu, val); err = kvm_set_cr0(vcpu, val);
else else
return 1; return 1;
break; break;
case 3: case 3:
err = kvm_set_cr3(&svm->vcpu, val); err = kvm_set_cr3(vcpu, val);
break; break;
case 4: case 4:
err = kvm_set_cr4(&svm->vcpu, val); err = kvm_set_cr4(vcpu, val);
break; break;
case 8: case 8:
err = kvm_set_cr8(&svm->vcpu, val); err = kvm_set_cr8(vcpu, val);
break; break;
default: default:
WARN(1, "unhandled write to CR%d", cr); WARN(1, "unhandled write to CR%d", cr);
kvm_queue_exception(&svm->vcpu, UD_VECTOR); kvm_queue_exception(vcpu, UD_VECTOR);
return 1; return 1;
} }
} else { /* mov from cr */ } else { /* mov from cr */
switch (cr) { switch (cr) {
case 0: case 0:
val = kvm_read_cr0(&svm->vcpu); val = kvm_read_cr0(vcpu);
break; break;
case 2: case 2:
val = svm->vcpu.arch.cr2; val = vcpu->arch.cr2;
break; break;
case 3: case 3:
val = kvm_read_cr3(&svm->vcpu); val = kvm_read_cr3(vcpu);
break; break;
case 4: case 4:
val = kvm_read_cr4(&svm->vcpu); val = kvm_read_cr4(vcpu);
break; break;
case 8: case 8:
val = kvm_get_cr8(&svm->vcpu); val = kvm_get_cr8(vcpu);
break; break;
default: default:
WARN(1, "unhandled read from CR%d", cr); WARN(1, "unhandled read from CR%d", cr);
kvm_queue_exception(&svm->vcpu, UD_VECTOR); kvm_queue_exception(vcpu, UD_VECTOR);
return 1; return 1;
} }
kvm_register_write(&svm->vcpu, reg, val); kvm_register_write(vcpu, reg, val);
trace_kvm_cr_read(cr, val); trace_kvm_cr_read(cr, val);
} }
return kvm_complete_insn_gp(&svm->vcpu, err); return kvm_complete_insn_gp(vcpu, err);
} }
static int cr_trap(struct vcpu_svm *svm) static int cr_trap(struct kvm_vcpu *vcpu)
{ {
struct kvm_vcpu *vcpu = &svm->vcpu; struct vcpu_svm *svm = to_svm(vcpu);
unsigned long old_value, new_value; unsigned long old_value, new_value;
unsigned int cr; unsigned int cr;
int ret = 0; int ret = 0;
...@@ -2607,7 +2618,7 @@ static int cr_trap(struct vcpu_svm *svm) ...@@ -2607,7 +2618,7 @@ static int cr_trap(struct vcpu_svm *svm)
kvm_post_set_cr4(vcpu, old_value, new_value); kvm_post_set_cr4(vcpu, old_value, new_value);
break; break;
case 8: case 8:
ret = kvm_set_cr8(&svm->vcpu, new_value); ret = kvm_set_cr8(vcpu, new_value);
break; break;
default: default:
WARN(1, "unhandled CR%d write trap", cr); WARN(1, "unhandled CR%d write trap", cr);
...@@ -2618,57 +2629,57 @@ static int cr_trap(struct vcpu_svm *svm) ...@@ -2618,57 +2629,57 @@ static int cr_trap(struct vcpu_svm *svm)
return kvm_complete_insn_gp(vcpu, ret); return kvm_complete_insn_gp(vcpu, ret);
} }
static int dr_interception(struct vcpu_svm *svm) static int dr_interception(struct kvm_vcpu *vcpu)
{ {
struct vcpu_svm *svm = to_svm(vcpu);
int reg, dr; int reg, dr;
unsigned long val; unsigned long val;
int err = 0; int err = 0;
if (svm->vcpu.guest_debug == 0) { if (vcpu->guest_debug == 0) {
/* /*
* No more DR vmexits; force a reload of the debug registers * No more DR vmexits; force a reload of the debug registers
* and reenter on this instruction. The next vmexit will * and reenter on this instruction. The next vmexit will
* retrieve the full state of the debug registers. * retrieve the full state of the debug registers.
*/ */
clr_dr_intercepts(svm); clr_dr_intercepts(svm);
svm->vcpu.arch.switch_db_regs |= KVM_DEBUGREG_WONT_EXIT; vcpu->arch.switch_db_regs |= KVM_DEBUGREG_WONT_EXIT;
return 1; return 1;
} }
if (!boot_cpu_has(X86_FEATURE_DECODEASSISTS)) if (!boot_cpu_has(X86_FEATURE_DECODEASSISTS))
return emulate_on_interception(svm); return emulate_on_interception(vcpu);
reg = svm->vmcb->control.exit_info_1 & SVM_EXITINFO_REG_MASK; reg = svm->vmcb->control.exit_info_1 & SVM_EXITINFO_REG_MASK;
dr = svm->vmcb->control.exit_code - SVM_EXIT_READ_DR0; dr = svm->vmcb->control.exit_code - SVM_EXIT_READ_DR0;
if (dr >= 16) { /* mov to DRn */ if (dr >= 16) { /* mov to DRn */
dr -= 16; dr -= 16;
val = kvm_register_read(&svm->vcpu, reg); val = kvm_register_read(vcpu, reg);
err = kvm_set_dr(&svm->vcpu, dr, val); err = kvm_set_dr(vcpu, dr, val);
} else { } else {
kvm_get_dr(&svm->vcpu, dr, &val); kvm_get_dr(vcpu, dr, &val);
kvm_register_write(&svm->vcpu, reg, val); kvm_register_write(vcpu, reg, val);
} }
return kvm_complete_insn_gp(&svm->vcpu, err); return kvm_complete_insn_gp(vcpu, err);
} }
static int cr8_write_interception(struct vcpu_svm *svm) static int cr8_write_interception(struct kvm_vcpu *vcpu)
{ {
struct kvm_run *kvm_run = svm->vcpu.run;
int r; int r;
u8 cr8_prev = kvm_get_cr8(&svm->vcpu); u8 cr8_prev = kvm_get_cr8(vcpu);
/* instruction emulation calls kvm_set_cr8() */ /* instruction emulation calls kvm_set_cr8() */
r = cr_interception(svm); r = cr_interception(vcpu);
if (lapic_in_kernel(&svm->vcpu)) if (lapic_in_kernel(vcpu))
return r; return r;
if (cr8_prev <= kvm_get_cr8(&svm->vcpu)) if (cr8_prev <= kvm_get_cr8(vcpu))
return r; return r;
kvm_run->exit_reason = KVM_EXIT_SET_TPR; vcpu->run->exit_reason = KVM_EXIT_SET_TPR;
return 0; return 0;
} }
static int efer_trap(struct vcpu_svm *svm) static int efer_trap(struct kvm_vcpu *vcpu)
{ {
struct msr_data msr_info; struct msr_data msr_info;
int ret; int ret;
...@@ -2681,10 +2692,10 @@ static int efer_trap(struct vcpu_svm *svm) ...@@ -2681,10 +2692,10 @@ static int efer_trap(struct vcpu_svm *svm)
*/ */
msr_info.host_initiated = false; msr_info.host_initiated = false;
msr_info.index = MSR_EFER; msr_info.index = MSR_EFER;
msr_info.data = svm->vmcb->control.exit_info_1 & ~EFER_SVME; msr_info.data = to_svm(vcpu)->vmcb->control.exit_info_1 & ~EFER_SVME;
ret = kvm_set_msr_common(&svm->vcpu, &msr_info); ret = kvm_set_msr_common(vcpu, &msr_info);
return kvm_complete_insn_gp(&svm->vcpu, ret); return kvm_complete_insn_gp(vcpu, ret);
} }
static int svm_get_msr_feature(struct kvm_msr_entry *msr) static int svm_get_msr_feature(struct kvm_msr_entry *msr)
...@@ -2810,8 +2821,8 @@ static int svm_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) ...@@ -2810,8 +2821,8 @@ static int svm_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
static int svm_complete_emulated_msr(struct kvm_vcpu *vcpu, int err) static int svm_complete_emulated_msr(struct kvm_vcpu *vcpu, int err)
{ {
struct vcpu_svm *svm = to_svm(vcpu); struct vcpu_svm *svm = to_svm(vcpu);
if (!sev_es_guest(svm->vcpu.kvm) || !err) if (!sev_es_guest(vcpu->kvm) || !err)
return kvm_complete_insn_gp(&svm->vcpu, err); return kvm_complete_insn_gp(vcpu, err);
ghcb_set_sw_exit_info_1(svm->ghcb, 1); ghcb_set_sw_exit_info_1(svm->ghcb, 1);
ghcb_set_sw_exit_info_2(svm->ghcb, ghcb_set_sw_exit_info_2(svm->ghcb,
...@@ -2821,9 +2832,9 @@ static int svm_complete_emulated_msr(struct kvm_vcpu *vcpu, int err) ...@@ -2821,9 +2832,9 @@ static int svm_complete_emulated_msr(struct kvm_vcpu *vcpu, int err)
return 1; return 1;
} }
static int rdmsr_interception(struct vcpu_svm *svm) static int rdmsr_interception(struct kvm_vcpu *vcpu)
{ {
return kvm_emulate_rdmsr(&svm->vcpu); return kvm_emulate_rdmsr(vcpu);
} }
static int svm_set_vm_cr(struct kvm_vcpu *vcpu, u64 data) static int svm_set_vm_cr(struct kvm_vcpu *vcpu, u64 data)
...@@ -3009,38 +3020,37 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr) ...@@ -3009,38 +3020,37 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr)
return 0; return 0;
} }
static int wrmsr_interception(struct vcpu_svm *svm) static int wrmsr_interception(struct kvm_vcpu *vcpu)
{ {
return kvm_emulate_wrmsr(&svm->vcpu); return kvm_emulate_wrmsr(vcpu);
} }
static int msr_interception(struct vcpu_svm *svm) static int msr_interception(struct kvm_vcpu *vcpu)
{ {
if (svm->vmcb->control.exit_info_1) if (to_svm(vcpu)->vmcb->control.exit_info_1)
return wrmsr_interception(svm); return wrmsr_interception(vcpu);
else else
return rdmsr_interception(svm); return rdmsr_interception(vcpu);
} }
static int interrupt_window_interception(struct vcpu_svm *svm) static int interrupt_window_interception(struct kvm_vcpu *vcpu)
{ {
kvm_make_request(KVM_REQ_EVENT, &svm->vcpu); kvm_make_request(KVM_REQ_EVENT, vcpu);
svm_clear_vintr(svm); svm_clear_vintr(to_svm(vcpu));
/* /*
* For AVIC, the only reason to end up here is ExtINTs. * For AVIC, the only reason to end up here is ExtINTs.
* In this case AVIC was temporarily disabled for * In this case AVIC was temporarily disabled for
* requesting the IRQ window and we have to re-enable it. * requesting the IRQ window and we have to re-enable it.
*/ */
svm_toggle_avic_for_irq_window(&svm->vcpu, true); svm_toggle_avic_for_irq_window(vcpu, true);
++svm->vcpu.stat.irq_window_exits; ++vcpu->stat.irq_window_exits;
return 1; return 1;
} }
static int pause_interception(struct vcpu_svm *svm) static int pause_interception(struct kvm_vcpu *vcpu)
{ {
struct kvm_vcpu *vcpu = &svm->vcpu;
bool in_kernel; bool in_kernel;
/* /*
...@@ -3048,7 +3058,7 @@ static int pause_interception(struct vcpu_svm *svm) ...@@ -3048,7 +3058,7 @@ static int pause_interception(struct vcpu_svm *svm)
* vcpu->arch.preempted_in_kernel can never be true. Just * vcpu->arch.preempted_in_kernel can never be true. Just
* set in_kernel to false as well. * set in_kernel to false as well.
*/ */
in_kernel = !sev_es_guest(svm->vcpu.kvm) && svm_get_cpl(vcpu) == 0; in_kernel = !sev_es_guest(vcpu->kvm) && svm_get_cpl(vcpu) == 0;
if (!kvm_pause_in_guest(vcpu->kvm)) if (!kvm_pause_in_guest(vcpu->kvm))
grow_ple_window(vcpu); grow_ple_window(vcpu);
...@@ -3057,26 +3067,26 @@ static int pause_interception(struct vcpu_svm *svm) ...@@ -3057,26 +3067,26 @@ static int pause_interception(struct vcpu_svm *svm)
return 1; return 1;
} }
static int nop_interception(struct vcpu_svm *svm) static int nop_interception(struct kvm_vcpu *vcpu)
{ {
return kvm_skip_emulated_instruction(&(svm->vcpu)); return kvm_skip_emulated_instruction(vcpu);
} }
static int monitor_interception(struct vcpu_svm *svm) static int monitor_interception(struct kvm_vcpu *vcpu)
{ {
printk_once(KERN_WARNING "kvm: MONITOR instruction emulated as NOP!\n"); printk_once(KERN_WARNING "kvm: MONITOR instruction emulated as NOP!\n");
return nop_interception(svm); return nop_interception(vcpu);
} }
static int mwait_interception(struct vcpu_svm *svm) static int mwait_interception(struct kvm_vcpu *vcpu)
{ {
printk_once(KERN_WARNING "kvm: MWAIT instruction emulated as NOP!\n"); printk_once(KERN_WARNING "kvm: MWAIT instruction emulated as NOP!\n");
return nop_interception(svm); return nop_interception(vcpu);
} }
static int invpcid_interception(struct vcpu_svm *svm) static int invpcid_interception(struct kvm_vcpu *vcpu)
{ {
struct kvm_vcpu *vcpu = &svm->vcpu; struct vcpu_svm *svm = to_svm(vcpu);
unsigned long type; unsigned long type;
gva_t gva; gva_t gva;
...@@ -3101,7 +3111,7 @@ static int invpcid_interception(struct vcpu_svm *svm) ...@@ -3101,7 +3111,7 @@ static int invpcid_interception(struct vcpu_svm *svm)
return kvm_handle_invpcid(vcpu, type, gva); return kvm_handle_invpcid(vcpu, type, gva);
} }
static int (*const svm_exit_handlers[])(struct vcpu_svm *svm) = { static int (*const svm_exit_handlers[])(struct kvm_vcpu *vcpu) = {
[SVM_EXIT_READ_CR0] = cr_interception, [SVM_EXIT_READ_CR0] = cr_interception,
[SVM_EXIT_READ_CR3] = cr_interception, [SVM_EXIT_READ_CR3] = cr_interception,
[SVM_EXIT_READ_CR4] = cr_interception, [SVM_EXIT_READ_CR4] = cr_interception,
...@@ -3312,24 +3322,24 @@ static int svm_handle_invalid_exit(struct kvm_vcpu *vcpu, u64 exit_code) ...@@ -3312,24 +3322,24 @@ static int svm_handle_invalid_exit(struct kvm_vcpu *vcpu, u64 exit_code)
return -EINVAL; return -EINVAL;
} }
int svm_invoke_exit_handler(struct vcpu_svm *svm, u64 exit_code) int svm_invoke_exit_handler(struct kvm_vcpu *vcpu, u64 exit_code)
{ {
if (svm_handle_invalid_exit(&svm->vcpu, exit_code)) if (svm_handle_invalid_exit(vcpu, exit_code))
return 0; return 0;
#ifdef CONFIG_RETPOLINE #ifdef CONFIG_RETPOLINE
if (exit_code == SVM_EXIT_MSR) if (exit_code == SVM_EXIT_MSR)
return msr_interception(svm); return msr_interception(vcpu);
else if (exit_code == SVM_EXIT_VINTR) else if (exit_code == SVM_EXIT_VINTR)
return interrupt_window_interception(svm); return interrupt_window_interception(vcpu);
else if (exit_code == SVM_EXIT_INTR) else if (exit_code == SVM_EXIT_INTR)
return intr_interception(svm); return intr_interception(vcpu);
else if (exit_code == SVM_EXIT_HLT) else if (exit_code == SVM_EXIT_HLT)
return halt_interception(svm); return halt_interception(vcpu);
else if (exit_code == SVM_EXIT_NPF) else if (exit_code == SVM_EXIT_NPF)
return npf_interception(svm); return npf_interception(vcpu);
#endif #endif
return svm_exit_handlers[exit_code](svm); return svm_exit_handlers[exit_code](vcpu);
} }
static void svm_get_exit_info(struct kvm_vcpu *vcpu, u64 *info1, u64 *info2, static void svm_get_exit_info(struct kvm_vcpu *vcpu, u64 *info1, u64 *info2,
...@@ -3398,7 +3408,7 @@ static int handle_exit(struct kvm_vcpu *vcpu, fastpath_t exit_fastpath) ...@@ -3398,7 +3408,7 @@ static int handle_exit(struct kvm_vcpu *vcpu, fastpath_t exit_fastpath)
if (exit_fastpath != EXIT_FASTPATH_NONE) if (exit_fastpath != EXIT_FASTPATH_NONE)
return 1; return 1;
return svm_invoke_exit_handler(svm, exit_code); return svm_invoke_exit_handler(vcpu, exit_code);
} }
static void reload_tss(struct kvm_vcpu *vcpu) static void reload_tss(struct kvm_vcpu *vcpu)
...@@ -3409,9 +3419,10 @@ static void reload_tss(struct kvm_vcpu *vcpu) ...@@ -3409,9 +3419,10 @@ static void reload_tss(struct kvm_vcpu *vcpu)
load_TR_desc(); load_TR_desc();
} }
static void pre_svm_run(struct vcpu_svm *svm) static void pre_svm_run(struct kvm_vcpu *vcpu)
{ {
struct svm_cpu_data *sd = per_cpu(svm_data, svm->vcpu.cpu); struct svm_cpu_data *sd = per_cpu(svm_data, vcpu->cpu);
struct vcpu_svm *svm = to_svm(vcpu);
/* /*
* If the previous vmrun of the vmcb occurred on * If the previous vmrun of the vmcb occurred on
...@@ -3419,14 +3430,14 @@ static void pre_svm_run(struct vcpu_svm *svm) ...@@ -3419,14 +3430,14 @@ static void pre_svm_run(struct vcpu_svm *svm)
* and assign a new asid. * and assign a new asid.
*/ */
if (unlikely(svm->current_vmcb->cpu != svm->vcpu.cpu)) { if (unlikely(svm->current_vmcb->cpu != vcpu->cpu)) {
svm->current_vmcb->asid_generation = 0; svm->current_vmcb->asid_generation = 0;
vmcb_mark_all_dirty(svm->vmcb); vmcb_mark_all_dirty(svm->vmcb);
svm->current_vmcb->cpu = svm->vcpu.cpu; svm->current_vmcb->cpu = vcpu->cpu;
} }
if (sev_guest(svm->vcpu.kvm)) if (sev_guest(vcpu->kvm))
return pre_sev_run(svm, svm->vcpu.cpu); return pre_sev_run(svm, vcpu->cpu);
/* FIXME: handle wraparound of asid_generation */ /* FIXME: handle wraparound of asid_generation */
if (svm->current_vmcb->asid_generation != sd->asid_generation) if (svm->current_vmcb->asid_generation != sd->asid_generation)
...@@ -3439,7 +3450,7 @@ static void svm_inject_nmi(struct kvm_vcpu *vcpu) ...@@ -3439,7 +3450,7 @@ static void svm_inject_nmi(struct kvm_vcpu *vcpu)
svm->vmcb->control.event_inj = SVM_EVTINJ_VALID | SVM_EVTINJ_TYPE_NMI; svm->vmcb->control.event_inj = SVM_EVTINJ_VALID | SVM_EVTINJ_TYPE_NMI;
vcpu->arch.hflags |= HF_NMI_MASK; vcpu->arch.hflags |= HF_NMI_MASK;
if (!sev_es_guest(svm->vcpu.kvm)) if (!sev_es_guest(vcpu->kvm))
svm_set_intercept(svm, INTERCEPT_IRET); svm_set_intercept(svm, INTERCEPT_IRET);
++vcpu->stat.nmi_injections; ++vcpu->stat.nmi_injections;
} }
...@@ -3493,7 +3504,7 @@ bool svm_nmi_blocked(struct kvm_vcpu *vcpu) ...@@ -3493,7 +3504,7 @@ bool svm_nmi_blocked(struct kvm_vcpu *vcpu)
return false; return false;
ret = (vmcb->control.int_state & SVM_INTERRUPT_SHADOW_MASK) || ret = (vmcb->control.int_state & SVM_INTERRUPT_SHADOW_MASK) ||
(svm->vcpu.arch.hflags & HF_NMI_MASK); (vcpu->arch.hflags & HF_NMI_MASK);
return ret; return ret;
} }
...@@ -3513,9 +3524,7 @@ static int svm_nmi_allowed(struct kvm_vcpu *vcpu, bool for_injection) ...@@ -3513,9 +3524,7 @@ static int svm_nmi_allowed(struct kvm_vcpu *vcpu, bool for_injection)
static bool svm_get_nmi_mask(struct kvm_vcpu *vcpu) static bool svm_get_nmi_mask(struct kvm_vcpu *vcpu)
{ {
struct vcpu_svm *svm = to_svm(vcpu); return !!(vcpu->arch.hflags & HF_NMI_MASK);
return !!(svm->vcpu.arch.hflags & HF_NMI_MASK);
} }
static void svm_set_nmi_mask(struct kvm_vcpu *vcpu, bool masked) static void svm_set_nmi_mask(struct kvm_vcpu *vcpu, bool masked)
...@@ -3523,12 +3532,12 @@ static void svm_set_nmi_mask(struct kvm_vcpu *vcpu, bool masked) ...@@ -3523,12 +3532,12 @@ static void svm_set_nmi_mask(struct kvm_vcpu *vcpu, bool masked)
struct vcpu_svm *svm = to_svm(vcpu); struct vcpu_svm *svm = to_svm(vcpu);
if (masked) { if (masked) {
svm->vcpu.arch.hflags |= HF_NMI_MASK; vcpu->arch.hflags |= HF_NMI_MASK;
if (!sev_es_guest(svm->vcpu.kvm)) if (!sev_es_guest(vcpu->kvm))
svm_set_intercept(svm, INTERCEPT_IRET); svm_set_intercept(svm, INTERCEPT_IRET);
} else { } else {
svm->vcpu.arch.hflags &= ~HF_NMI_MASK; vcpu->arch.hflags &= ~HF_NMI_MASK;
if (!sev_es_guest(svm->vcpu.kvm)) if (!sev_es_guest(vcpu->kvm))
svm_clr_intercept(svm, INTERCEPT_IRET); svm_clr_intercept(svm, INTERCEPT_IRET);
} }
} }
...@@ -3541,7 +3550,7 @@ bool svm_interrupt_blocked(struct kvm_vcpu *vcpu) ...@@ -3541,7 +3550,7 @@ bool svm_interrupt_blocked(struct kvm_vcpu *vcpu)
if (!gif_set(svm)) if (!gif_set(svm))
return true; return true;
if (sev_es_guest(svm->vcpu.kvm)) { if (sev_es_guest(vcpu->kvm)) {
/* /*
* SEV-ES guests to not expose RFLAGS. Use the VMCB interrupt mask * SEV-ES guests to not expose RFLAGS. Use the VMCB interrupt mask
* bit to determine the state of the IF flag. * bit to determine the state of the IF flag.
...@@ -3610,8 +3619,7 @@ static void svm_enable_nmi_window(struct kvm_vcpu *vcpu) ...@@ -3610,8 +3619,7 @@ static void svm_enable_nmi_window(struct kvm_vcpu *vcpu)
{ {
struct vcpu_svm *svm = to_svm(vcpu); struct vcpu_svm *svm = to_svm(vcpu);
if ((svm->vcpu.arch.hflags & (HF_NMI_MASK | HF_IRET_MASK)) if ((vcpu->arch.hflags & (HF_NMI_MASK | HF_IRET_MASK)) == HF_NMI_MASK)
== HF_NMI_MASK)
return; /* IRET will cause a vm exit */ return; /* IRET will cause a vm exit */
if (!gif_set(svm)) { if (!gif_set(svm)) {
...@@ -3690,8 +3698,9 @@ static inline void sync_lapic_to_cr8(struct kvm_vcpu *vcpu) ...@@ -3690,8 +3698,9 @@ static inline void sync_lapic_to_cr8(struct kvm_vcpu *vcpu)
svm->vmcb->control.int_ctl |= cr8 & V_TPR_MASK; svm->vmcb->control.int_ctl |= cr8 & V_TPR_MASK;
} }
static void svm_complete_interrupts(struct vcpu_svm *svm) static void svm_complete_interrupts(struct kvm_vcpu *vcpu)
{ {
struct vcpu_svm *svm = to_svm(vcpu);
u8 vector; u8 vector;
int type; int type;
u32 exitintinfo = svm->vmcb->control.exit_int_info; u32 exitintinfo = svm->vmcb->control.exit_int_info;
...@@ -3703,28 +3712,28 @@ static void svm_complete_interrupts(struct vcpu_svm *svm) ...@@ -3703,28 +3712,28 @@ static void svm_complete_interrupts(struct vcpu_svm *svm)
* If we've made progress since setting HF_IRET_MASK, we've * If we've made progress since setting HF_IRET_MASK, we've
* executed an IRET and can allow NMI injection. * executed an IRET and can allow NMI injection.
*/ */
if ((svm->vcpu.arch.hflags & HF_IRET_MASK) && if ((vcpu->arch.hflags & HF_IRET_MASK) &&
(sev_es_guest(svm->vcpu.kvm) || (sev_es_guest(vcpu->kvm) ||
kvm_rip_read(&svm->vcpu) != svm->nmi_iret_rip)) { kvm_rip_read(vcpu) != svm->nmi_iret_rip)) {
svm->vcpu.arch.hflags &= ~(HF_NMI_MASK | HF_IRET_MASK); vcpu->arch.hflags &= ~(HF_NMI_MASK | HF_IRET_MASK);
kvm_make_request(KVM_REQ_EVENT, &svm->vcpu); kvm_make_request(KVM_REQ_EVENT, vcpu);
} }
svm->vcpu.arch.nmi_injected = false; vcpu->arch.nmi_injected = false;
kvm_clear_exception_queue(&svm->vcpu); kvm_clear_exception_queue(vcpu);
kvm_clear_interrupt_queue(&svm->vcpu); kvm_clear_interrupt_queue(vcpu);
if (!(exitintinfo & SVM_EXITINTINFO_VALID)) if (!(exitintinfo & SVM_EXITINTINFO_VALID))
return; return;
kvm_make_request(KVM_REQ_EVENT, &svm->vcpu); kvm_make_request(KVM_REQ_EVENT, vcpu);
vector = exitintinfo & SVM_EXITINTINFO_VEC_MASK; vector = exitintinfo & SVM_EXITINTINFO_VEC_MASK;
type = exitintinfo & SVM_EXITINTINFO_TYPE_MASK; type = exitintinfo & SVM_EXITINTINFO_TYPE_MASK;
switch (type) { switch (type) {
case SVM_EXITINTINFO_TYPE_NMI: case SVM_EXITINTINFO_TYPE_NMI:
svm->vcpu.arch.nmi_injected = true; vcpu->arch.nmi_injected = true;
break; break;
case SVM_EXITINTINFO_TYPE_EXEPT: case SVM_EXITINTINFO_TYPE_EXEPT:
/* /*
...@@ -3740,21 +3749,20 @@ static void svm_complete_interrupts(struct vcpu_svm *svm) ...@@ -3740,21 +3749,20 @@ static void svm_complete_interrupts(struct vcpu_svm *svm)
*/ */
if (kvm_exception_is_soft(vector)) { if (kvm_exception_is_soft(vector)) {
if (vector == BP_VECTOR && int3_injected && if (vector == BP_VECTOR && int3_injected &&
kvm_is_linear_rip(&svm->vcpu, svm->int3_rip)) kvm_is_linear_rip(vcpu, svm->int3_rip))
kvm_rip_write(&svm->vcpu, kvm_rip_write(vcpu,
kvm_rip_read(&svm->vcpu) - kvm_rip_read(vcpu) - int3_injected);
int3_injected);
break; break;
} }
if (exitintinfo & SVM_EXITINTINFO_VALID_ERR) { if (exitintinfo & SVM_EXITINTINFO_VALID_ERR) {
u32 err = svm->vmcb->control.exit_int_info_err; u32 err = svm->vmcb->control.exit_int_info_err;
kvm_requeue_exception_e(&svm->vcpu, vector, err); kvm_requeue_exception_e(vcpu, vector, err);
} else } else
kvm_requeue_exception(&svm->vcpu, vector); kvm_requeue_exception(vcpu, vector);
break; break;
case SVM_EXITINTINFO_TYPE_INTR: case SVM_EXITINTINFO_TYPE_INTR:
kvm_queue_interrupt(&svm->vcpu, vector, false); kvm_queue_interrupt(vcpu, vector, false);
break; break;
default: default:
break; break;
...@@ -3769,7 +3777,7 @@ static void svm_cancel_injection(struct kvm_vcpu *vcpu) ...@@ -3769,7 +3777,7 @@ static void svm_cancel_injection(struct kvm_vcpu *vcpu)
control->exit_int_info = control->event_inj; control->exit_int_info = control->event_inj;
control->exit_int_info_err = control->event_inj_err; control->exit_int_info_err = control->event_inj_err;
control->event_inj = 0; control->event_inj = 0;
svm_complete_interrupts(svm); svm_complete_interrupts(vcpu);
} }
static fastpath_t svm_exit_handlers_fastpath(struct kvm_vcpu *vcpu) static fastpath_t svm_exit_handlers_fastpath(struct kvm_vcpu *vcpu)
...@@ -3781,9 +3789,10 @@ static fastpath_t svm_exit_handlers_fastpath(struct kvm_vcpu *vcpu) ...@@ -3781,9 +3789,10 @@ static fastpath_t svm_exit_handlers_fastpath(struct kvm_vcpu *vcpu)
return EXIT_FASTPATH_NONE; return EXIT_FASTPATH_NONE;
} }
static noinstr void svm_vcpu_enter_exit(struct kvm_vcpu *vcpu, static noinstr void svm_vcpu_enter_exit(struct kvm_vcpu *vcpu)
struct vcpu_svm *svm)
{ {
struct vcpu_svm *svm = to_svm(vcpu);
/* /*
* VMENTER enables interrupts (host state), but the kernel state is * VMENTER enables interrupts (host state), but the kernel state is
* interrupts disabled when this is invoked. Also tell RCU about * interrupts disabled when this is invoked. Also tell RCU about
...@@ -3804,12 +3813,12 @@ static noinstr void svm_vcpu_enter_exit(struct kvm_vcpu *vcpu, ...@@ -3804,12 +3813,12 @@ static noinstr void svm_vcpu_enter_exit(struct kvm_vcpu *vcpu,
guest_enter_irqoff(); guest_enter_irqoff();
lockdep_hardirqs_on(CALLER_ADDR0); lockdep_hardirqs_on(CALLER_ADDR0);
if (sev_es_guest(svm->vcpu.kvm)) { if (sev_es_guest(vcpu->kvm)) {
__svm_sev_es_vcpu_run(svm->vmcb_pa); __svm_sev_es_vcpu_run(svm->vmcb_pa);
} else { } else {
struct svm_cpu_data *sd = per_cpu(svm_data, vcpu->cpu); struct svm_cpu_data *sd = per_cpu(svm_data, vcpu->cpu);
__svm_vcpu_run(svm->vmcb_pa, (unsigned long *)&svm->vcpu.arch.regs); __svm_vcpu_run(svm->vmcb_pa, (unsigned long *)&vcpu->arch.regs);
vmload(__sme_page_pa(sd->save_area)); vmload(__sme_page_pa(sd->save_area));
} }
...@@ -3860,7 +3869,7 @@ static __no_kcsan fastpath_t svm_vcpu_run(struct kvm_vcpu *vcpu) ...@@ -3860,7 +3869,7 @@ static __no_kcsan fastpath_t svm_vcpu_run(struct kvm_vcpu *vcpu)
smp_send_reschedule(vcpu->cpu); smp_send_reschedule(vcpu->cpu);
} }
pre_svm_run(svm); pre_svm_run(vcpu);
sync_lapic_to_cr8(vcpu); sync_lapic_to_cr8(vcpu);
...@@ -3874,7 +3883,7 @@ static __no_kcsan fastpath_t svm_vcpu_run(struct kvm_vcpu *vcpu) ...@@ -3874,7 +3883,7 @@ static __no_kcsan fastpath_t svm_vcpu_run(struct kvm_vcpu *vcpu)
* Run with all-zero DR6 unless needed, so that we can get the exact cause * Run with all-zero DR6 unless needed, so that we can get the exact cause
* of a #DB. * of a #DB.
*/ */
if (unlikely(svm->vcpu.arch.switch_db_regs & KVM_DEBUGREG_WONT_EXIT)) if (unlikely(vcpu->arch.switch_db_regs & KVM_DEBUGREG_WONT_EXIT))
svm_set_dr6(svm, vcpu->arch.dr6); svm_set_dr6(svm, vcpu->arch.dr6);
else else
svm_set_dr6(svm, DR6_ACTIVE_LOW); svm_set_dr6(svm, DR6_ACTIVE_LOW);
...@@ -3892,7 +3901,7 @@ static __no_kcsan fastpath_t svm_vcpu_run(struct kvm_vcpu *vcpu) ...@@ -3892,7 +3901,7 @@ static __no_kcsan fastpath_t svm_vcpu_run(struct kvm_vcpu *vcpu)
*/ */
x86_spec_ctrl_set_guest(svm->spec_ctrl, svm->virt_spec_ctrl); x86_spec_ctrl_set_guest(svm->spec_ctrl, svm->virt_spec_ctrl);
svm_vcpu_enter_exit(vcpu, svm); svm_vcpu_enter_exit(vcpu);
/* /*
* We do not use IBRS in the kernel. If this vCPU has used the * We do not use IBRS in the kernel. If this vCPU has used the
...@@ -3912,12 +3921,12 @@ static __no_kcsan fastpath_t svm_vcpu_run(struct kvm_vcpu *vcpu) ...@@ -3912,12 +3921,12 @@ static __no_kcsan fastpath_t svm_vcpu_run(struct kvm_vcpu *vcpu)
if (unlikely(!msr_write_intercepted(vcpu, MSR_IA32_SPEC_CTRL))) if (unlikely(!msr_write_intercepted(vcpu, MSR_IA32_SPEC_CTRL)))
svm->spec_ctrl = native_read_msr(MSR_IA32_SPEC_CTRL); svm->spec_ctrl = native_read_msr(MSR_IA32_SPEC_CTRL);
if (!sev_es_guest(svm->vcpu.kvm)) if (!sev_es_guest(vcpu->kvm))
reload_tss(vcpu); reload_tss(vcpu);
x86_spec_ctrl_restore_host(svm->spec_ctrl, svm->virt_spec_ctrl); x86_spec_ctrl_restore_host(svm->spec_ctrl, svm->virt_spec_ctrl);
if (!sev_es_guest(svm->vcpu.kvm)) { if (!sev_es_guest(vcpu->kvm)) {
vcpu->arch.cr2 = svm->vmcb->save.cr2; vcpu->arch.cr2 = svm->vmcb->save.cr2;
vcpu->arch.regs[VCPU_REGS_RAX] = svm->vmcb->save.rax; vcpu->arch.regs[VCPU_REGS_RAX] = svm->vmcb->save.rax;
vcpu->arch.regs[VCPU_REGS_RSP] = svm->vmcb->save.rsp; vcpu->arch.regs[VCPU_REGS_RSP] = svm->vmcb->save.rsp;
...@@ -3925,7 +3934,7 @@ static __no_kcsan fastpath_t svm_vcpu_run(struct kvm_vcpu *vcpu) ...@@ -3925,7 +3934,7 @@ static __no_kcsan fastpath_t svm_vcpu_run(struct kvm_vcpu *vcpu)
} }
if (unlikely(svm->vmcb->control.exit_code == SVM_EXIT_NMI)) if (unlikely(svm->vmcb->control.exit_code == SVM_EXIT_NMI))
kvm_before_interrupt(&svm->vcpu); kvm_before_interrupt(vcpu);
kvm_load_host_xsave_state(vcpu); kvm_load_host_xsave_state(vcpu);
stgi(); stgi();
...@@ -3933,12 +3942,12 @@ static __no_kcsan fastpath_t svm_vcpu_run(struct kvm_vcpu *vcpu) ...@@ -3933,12 +3942,12 @@ static __no_kcsan fastpath_t svm_vcpu_run(struct kvm_vcpu *vcpu)
/* Any pending NMI will happen here */ /* Any pending NMI will happen here */
if (unlikely(svm->vmcb->control.exit_code == SVM_EXIT_NMI)) if (unlikely(svm->vmcb->control.exit_code == SVM_EXIT_NMI))
kvm_after_interrupt(&svm->vcpu); kvm_after_interrupt(vcpu);
sync_cr8_to_lapic(vcpu); sync_cr8_to_lapic(vcpu);
svm->next_rip = 0; svm->next_rip = 0;
if (is_guest_mode(&svm->vcpu)) { if (is_guest_mode(vcpu)) {
nested_sync_control_from_vmcb02(svm); nested_sync_control_from_vmcb02(svm);
svm->nested.nested_run_pending = 0; svm->nested.nested_run_pending = 0;
} }
...@@ -3948,7 +3957,7 @@ static __no_kcsan fastpath_t svm_vcpu_run(struct kvm_vcpu *vcpu) ...@@ -3948,7 +3957,7 @@ static __no_kcsan fastpath_t svm_vcpu_run(struct kvm_vcpu *vcpu)
/* if exit due to PF check for async PF */ /* if exit due to PF check for async PF */
if (svm->vmcb->control.exit_code == SVM_EXIT_EXCP_BASE + PF_VECTOR) if (svm->vmcb->control.exit_code == SVM_EXIT_EXCP_BASE + PF_VECTOR)
svm->vcpu.arch.apf.host_apf_flags = vcpu->arch.apf.host_apf_flags =
kvm_read_and_reset_apf_flags(); kvm_read_and_reset_apf_flags();
if (npt_enabled) { if (npt_enabled) {
...@@ -3962,9 +3971,9 @@ static __no_kcsan fastpath_t svm_vcpu_run(struct kvm_vcpu *vcpu) ...@@ -3962,9 +3971,9 @@ static __no_kcsan fastpath_t svm_vcpu_run(struct kvm_vcpu *vcpu)
*/ */
if (unlikely(svm->vmcb->control.exit_code == if (unlikely(svm->vmcb->control.exit_code ==
SVM_EXIT_EXCP_BASE + MC_VECTOR)) SVM_EXIT_EXCP_BASE + MC_VECTOR))
svm_handle_mce(svm); svm_handle_mce(vcpu);
svm_complete_interrupts(svm); svm_complete_interrupts(vcpu);
if (is_guest_mode(vcpu)) if (is_guest_mode(vcpu))
return EXIT_FASTPATH_NONE; return EXIT_FASTPATH_NONE;
...@@ -4063,7 +4072,7 @@ static void svm_vcpu_after_set_cpuid(struct kvm_vcpu *vcpu) ...@@ -4063,7 +4072,7 @@ static void svm_vcpu_after_set_cpuid(struct kvm_vcpu *vcpu)
/* Update nrips enabled cache */ /* Update nrips enabled cache */
svm->nrips_enabled = kvm_cpu_cap_has(X86_FEATURE_NRIPS) && svm->nrips_enabled = kvm_cpu_cap_has(X86_FEATURE_NRIPS) &&
guest_cpuid_has(&svm->vcpu, X86_FEATURE_NRIPS); guest_cpuid_has(vcpu, X86_FEATURE_NRIPS);
/* Check again if INVPCID interception if required */ /* Check again if INVPCID interception if required */
svm_check_invpcid(svm); svm_check_invpcid(svm);
...@@ -4364,15 +4373,15 @@ static int svm_pre_leave_smm(struct kvm_vcpu *vcpu, const char *smstate) ...@@ -4364,15 +4373,15 @@ static int svm_pre_leave_smm(struct kvm_vcpu *vcpu, const char *smstate)
if (!(saved_efer & EFER_SVME)) if (!(saved_efer & EFER_SVME))
return 1; return 1;
if (kvm_vcpu_map(&svm->vcpu, if (kvm_vcpu_map(vcpu,
gpa_to_gfn(vmcb12_gpa), &map) == -EINVAL) gpa_to_gfn(vmcb12_gpa), &map) == -EINVAL)
return 1; return 1;
if (svm_allocate_nested(svm)) if (svm_allocate_nested(svm))
return 1; return 1;
ret = enter_svm_guest_mode(svm, vmcb12_gpa, map.hva); ret = enter_svm_guest_mode(vcpu, vmcb12_gpa, map.hva);
kvm_vcpu_unmap(&svm->vcpu, &map, true); kvm_vcpu_unmap(vcpu, &map, true);
} }
} }
......
...@@ -405,7 +405,7 @@ bool svm_smi_blocked(struct kvm_vcpu *vcpu); ...@@ -405,7 +405,7 @@ bool svm_smi_blocked(struct kvm_vcpu *vcpu);
bool svm_nmi_blocked(struct kvm_vcpu *vcpu); bool svm_nmi_blocked(struct kvm_vcpu *vcpu);
bool svm_interrupt_blocked(struct kvm_vcpu *vcpu); bool svm_interrupt_blocked(struct kvm_vcpu *vcpu);
void svm_set_gif(struct vcpu_svm *svm, bool value); void svm_set_gif(struct vcpu_svm *svm, bool value);
int svm_invoke_exit_handler(struct vcpu_svm *svm, u64 exit_code); int svm_invoke_exit_handler(struct kvm_vcpu *vcpu, u64 exit_code);
void set_msr_interception(struct kvm_vcpu *vcpu, u32 *msrpm, u32 msr, void set_msr_interception(struct kvm_vcpu *vcpu, u32 *msrpm, u32 msr,
int read, int write); int read, int write);
...@@ -437,15 +437,15 @@ static inline bool nested_exit_on_nmi(struct vcpu_svm *svm) ...@@ -437,15 +437,15 @@ static inline bool nested_exit_on_nmi(struct vcpu_svm *svm)
return vmcb_is_intercept(&svm->nested.ctl, INTERCEPT_NMI); return vmcb_is_intercept(&svm->nested.ctl, INTERCEPT_NMI);
} }
int enter_svm_guest_mode(struct vcpu_svm *svm, u64 vmcb_gpa, struct vmcb *vmcb12); int enter_svm_guest_mode(struct kvm_vcpu *vcpu, u64 vmcb_gpa, struct vmcb *vmcb12);
void svm_leave_nested(struct vcpu_svm *svm); void svm_leave_nested(struct vcpu_svm *svm);
void svm_free_nested(struct vcpu_svm *svm); void svm_free_nested(struct vcpu_svm *svm);
int svm_allocate_nested(struct vcpu_svm *svm); int svm_allocate_nested(struct vcpu_svm *svm);
int nested_svm_vmrun(struct vcpu_svm *svm); int nested_svm_vmrun(struct kvm_vcpu *vcpu);
void nested_svm_vmloadsave(struct vmcb *from_vmcb, struct vmcb *to_vmcb); void nested_svm_vmloadsave(struct vmcb *from_vmcb, struct vmcb *to_vmcb);
int nested_svm_vmexit(struct vcpu_svm *svm); int nested_svm_vmexit(struct vcpu_svm *svm);
int nested_svm_exit_handled(struct vcpu_svm *svm); int nested_svm_exit_handled(struct vcpu_svm *svm);
int nested_svm_check_permissions(struct vcpu_svm *svm); int nested_svm_check_permissions(struct kvm_vcpu *vcpu);
int nested_svm_check_exception(struct vcpu_svm *svm, unsigned nr, int nested_svm_check_exception(struct vcpu_svm *svm, unsigned nr,
bool has_error_code, u32 error_code); bool has_error_code, u32 error_code);
int nested_svm_exit_special(struct vcpu_svm *svm); int nested_svm_exit_special(struct vcpu_svm *svm);
...@@ -492,8 +492,8 @@ void avic_vm_destroy(struct kvm *kvm); ...@@ -492,8 +492,8 @@ void avic_vm_destroy(struct kvm *kvm);
int avic_vm_init(struct kvm *kvm); int avic_vm_init(struct kvm *kvm);
void avic_init_vmcb(struct vcpu_svm *svm); void avic_init_vmcb(struct vcpu_svm *svm);
void svm_toggle_avic_for_irq_window(struct kvm_vcpu *vcpu, bool activate); void svm_toggle_avic_for_irq_window(struct kvm_vcpu *vcpu, bool activate);
int avic_incomplete_ipi_interception(struct vcpu_svm *svm); int avic_incomplete_ipi_interception(struct kvm_vcpu *vcpu);
int avic_unaccelerated_access_interception(struct vcpu_svm *svm); int avic_unaccelerated_access_interception(struct kvm_vcpu *vcpu);
int avic_init_vcpu(struct vcpu_svm *svm); int avic_init_vcpu(struct vcpu_svm *svm);
void avic_vcpu_load(struct kvm_vcpu *vcpu, int cpu); void avic_vcpu_load(struct kvm_vcpu *vcpu, int cpu);
void avic_vcpu_put(struct kvm_vcpu *vcpu); void avic_vcpu_put(struct kvm_vcpu *vcpu);
...@@ -566,7 +566,7 @@ void pre_sev_run(struct vcpu_svm *svm, int cpu); ...@@ -566,7 +566,7 @@ void pre_sev_run(struct vcpu_svm *svm, int cpu);
void __init sev_hardware_setup(void); void __init sev_hardware_setup(void);
void sev_hardware_teardown(void); void sev_hardware_teardown(void);
void sev_free_vcpu(struct kvm_vcpu *vcpu); void sev_free_vcpu(struct kvm_vcpu *vcpu);
int sev_handle_vmgexit(struct vcpu_svm *svm); int sev_handle_vmgexit(struct kvm_vcpu *vcpu);
int sev_es_string_io(struct vcpu_svm *svm, int size, unsigned int port, int in); int sev_es_string_io(struct vcpu_svm *svm, int size, unsigned int port, int in);
void sev_es_init_vmcb(struct vcpu_svm *svm); void sev_es_init_vmcb(struct vcpu_svm *svm);
void sev_es_create_vcpu(struct vcpu_svm *svm); void sev_es_create_vcpu(struct vcpu_svm *svm);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment