Commit 8c5fbf1a authored by KarimAllah Ahmed's avatar KarimAllah Ahmed Committed by Paolo Bonzini

KVM/nSVM: Use the new mapping API for mapping guest memory

Use the new mapping API for mapping guest memory to avoid depending on
"struct page".
Signed-off-by: default avatarKarimAllah Ahmed <karahmed@amazon.de>
Reviewed-by: default avatarKonrad Rzeszutek Wilk <konrad.wilk@oracle.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent 42e35f80
...@@ -3071,32 +3071,6 @@ static inline bool nested_svm_nmi(struct vcpu_svm *svm) ...@@ -3071,32 +3071,6 @@ static inline bool nested_svm_nmi(struct vcpu_svm *svm)
return false; return false;
} }
static void *nested_svm_map(struct vcpu_svm *svm, u64 gpa, struct page **_page)
{
struct page *page;
might_sleep();
page = kvm_vcpu_gfn_to_page(&svm->vcpu, gpa >> PAGE_SHIFT);
if (is_error_page(page))
goto error;
*_page = page;
return kmap(page);
error:
kvm_inject_gp(&svm->vcpu, 0);
return NULL;
}
static void nested_svm_unmap(struct page *page)
{
kunmap(page);
kvm_release_page_dirty(page);
}
static int nested_svm_intercept_ioio(struct vcpu_svm *svm) static int nested_svm_intercept_ioio(struct vcpu_svm *svm)
{ {
unsigned port, size, iopm_len; unsigned port, size, iopm_len;
...@@ -3299,10 +3273,11 @@ static inline void copy_vmcb_control_area(struct vmcb *dst_vmcb, struct vmcb *fr ...@@ -3299,10 +3273,11 @@ static inline void copy_vmcb_control_area(struct vmcb *dst_vmcb, struct vmcb *fr
static int nested_svm_vmexit(struct vcpu_svm *svm) static int nested_svm_vmexit(struct vcpu_svm *svm)
{ {
int rc;
struct vmcb *nested_vmcb; struct vmcb *nested_vmcb;
struct vmcb *hsave = svm->nested.hsave; struct vmcb *hsave = svm->nested.hsave;
struct vmcb *vmcb = svm->vmcb; struct vmcb *vmcb = svm->vmcb;
struct page *page; struct kvm_host_map map;
trace_kvm_nested_vmexit_inject(vmcb->control.exit_code, trace_kvm_nested_vmexit_inject(vmcb->control.exit_code,
vmcb->control.exit_info_1, vmcb->control.exit_info_1,
...@@ -3311,9 +3286,14 @@ static int nested_svm_vmexit(struct vcpu_svm *svm) ...@@ -3311,9 +3286,14 @@ static int nested_svm_vmexit(struct vcpu_svm *svm)
vmcb->control.exit_int_info_err, vmcb->control.exit_int_info_err,
KVM_ISA_SVM); KVM_ISA_SVM);
nested_vmcb = nested_svm_map(svm, svm->nested.vmcb, &page); rc = kvm_vcpu_map(&svm->vcpu, gfn_to_gpa(svm->nested.vmcb), &map);
if (!nested_vmcb) if (rc) {
if (rc == -EINVAL)
kvm_inject_gp(&svm->vcpu, 0);
return 1; return 1;
}
nested_vmcb = map.hva;
/* Exit Guest-Mode */ /* Exit Guest-Mode */
leave_guest_mode(&svm->vcpu); leave_guest_mode(&svm->vcpu);
...@@ -3417,7 +3397,7 @@ static int nested_svm_vmexit(struct vcpu_svm *svm) ...@@ -3417,7 +3397,7 @@ static int nested_svm_vmexit(struct vcpu_svm *svm)
mark_all_dirty(svm->vmcb); mark_all_dirty(svm->vmcb);
nested_svm_unmap(page); kvm_vcpu_unmap(&svm->vcpu, &map, true);
nested_svm_uninit_mmu_context(&svm->vcpu); nested_svm_uninit_mmu_context(&svm->vcpu);
kvm_mmu_reset_context(&svm->vcpu); kvm_mmu_reset_context(&svm->vcpu);
...@@ -3483,7 +3463,7 @@ static bool nested_vmcb_checks(struct vmcb *vmcb) ...@@ -3483,7 +3463,7 @@ static bool nested_vmcb_checks(struct vmcb *vmcb)
} }
static void enter_svm_guest_mode(struct vcpu_svm *svm, u64 vmcb_gpa, static void enter_svm_guest_mode(struct vcpu_svm *svm, u64 vmcb_gpa,
struct vmcb *nested_vmcb, struct page *page) struct vmcb *nested_vmcb, struct kvm_host_map *map)
{ {
if (kvm_get_rflags(&svm->vcpu) & X86_EFLAGS_IF) if (kvm_get_rflags(&svm->vcpu) & X86_EFLAGS_IF)
svm->vcpu.arch.hflags |= HF_HIF_MASK; svm->vcpu.arch.hflags |= HF_HIF_MASK;
...@@ -3567,7 +3547,7 @@ static void enter_svm_guest_mode(struct vcpu_svm *svm, u64 vmcb_gpa, ...@@ -3567,7 +3547,7 @@ static void enter_svm_guest_mode(struct vcpu_svm *svm, u64 vmcb_gpa,
svm->vmcb->control.pause_filter_thresh = svm->vmcb->control.pause_filter_thresh =
nested_vmcb->control.pause_filter_thresh; nested_vmcb->control.pause_filter_thresh;
nested_svm_unmap(page); kvm_vcpu_unmap(&svm->vcpu, map, true);
/* Enter Guest-Mode */ /* Enter Guest-Mode */
enter_guest_mode(&svm->vcpu); enter_guest_mode(&svm->vcpu);
...@@ -3587,17 +3567,23 @@ static void enter_svm_guest_mode(struct vcpu_svm *svm, u64 vmcb_gpa, ...@@ -3587,17 +3567,23 @@ static void enter_svm_guest_mode(struct vcpu_svm *svm, u64 vmcb_gpa,
static bool nested_svm_vmrun(struct vcpu_svm *svm) static bool nested_svm_vmrun(struct vcpu_svm *svm)
{ {
int rc;
struct vmcb *nested_vmcb; struct vmcb *nested_vmcb;
struct vmcb *hsave = svm->nested.hsave; struct vmcb *hsave = svm->nested.hsave;
struct vmcb *vmcb = svm->vmcb; struct vmcb *vmcb = svm->vmcb;
struct page *page; struct kvm_host_map map;
u64 vmcb_gpa; u64 vmcb_gpa;
vmcb_gpa = svm->vmcb->save.rax; vmcb_gpa = svm->vmcb->save.rax;
nested_vmcb = nested_svm_map(svm, svm->vmcb->save.rax, &page); rc = kvm_vcpu_map(&svm->vcpu, gfn_to_gpa(vmcb_gpa), &map);
if (!nested_vmcb) if (rc) {
if (rc == -EINVAL)
kvm_inject_gp(&svm->vcpu, 0);
return false; return false;
}
nested_vmcb = map.hva;
if (!nested_vmcb_checks(nested_vmcb)) { if (!nested_vmcb_checks(nested_vmcb)) {
nested_vmcb->control.exit_code = SVM_EXIT_ERR; nested_vmcb->control.exit_code = SVM_EXIT_ERR;
...@@ -3605,7 +3591,7 @@ static bool nested_svm_vmrun(struct vcpu_svm *svm) ...@@ -3605,7 +3591,7 @@ static bool nested_svm_vmrun(struct vcpu_svm *svm)
nested_vmcb->control.exit_info_1 = 0; nested_vmcb->control.exit_info_1 = 0;
nested_vmcb->control.exit_info_2 = 0; nested_vmcb->control.exit_info_2 = 0;
nested_svm_unmap(page); kvm_vcpu_unmap(&svm->vcpu, &map, true);
return false; return false;
} }
...@@ -3649,7 +3635,7 @@ static bool nested_svm_vmrun(struct vcpu_svm *svm) ...@@ -3649,7 +3635,7 @@ static bool nested_svm_vmrun(struct vcpu_svm *svm)
copy_vmcb_control_area(hsave, vmcb); copy_vmcb_control_area(hsave, vmcb);
enter_svm_guest_mode(svm, vmcb_gpa, nested_vmcb, page); enter_svm_guest_mode(svm, vmcb_gpa, nested_vmcb, &map);
return true; return true;
} }
...@@ -3673,21 +3659,26 @@ static void nested_svm_vmloadsave(struct vmcb *from_vmcb, struct vmcb *to_vmcb) ...@@ -3673,21 +3659,26 @@ static void nested_svm_vmloadsave(struct vmcb *from_vmcb, struct vmcb *to_vmcb)
static int vmload_interception(struct vcpu_svm *svm) static int vmload_interception(struct vcpu_svm *svm)
{ {
struct vmcb *nested_vmcb; struct vmcb *nested_vmcb;
struct page *page; struct kvm_host_map map;
int ret; int ret;
if (nested_svm_check_permissions(svm)) if (nested_svm_check_permissions(svm))
return 1; return 1;
nested_vmcb = nested_svm_map(svm, svm->vmcb->save.rax, &page); ret = kvm_vcpu_map(&svm->vcpu, gpa_to_gfn(svm->vmcb->save.rax), &map);
if (!nested_vmcb) if (ret) {
if (ret == -EINVAL)
kvm_inject_gp(&svm->vcpu, 0);
return 1; return 1;
}
nested_vmcb = map.hva;
svm->next_rip = kvm_rip_read(&svm->vcpu) + 3; svm->next_rip = kvm_rip_read(&svm->vcpu) + 3;
ret = kvm_skip_emulated_instruction(&svm->vcpu); ret = kvm_skip_emulated_instruction(&svm->vcpu);
nested_svm_vmloadsave(nested_vmcb, svm->vmcb); nested_svm_vmloadsave(nested_vmcb, svm->vmcb);
nested_svm_unmap(page); kvm_vcpu_unmap(&svm->vcpu, &map, true);
return ret; return ret;
} }
...@@ -3695,21 +3686,26 @@ static int vmload_interception(struct vcpu_svm *svm) ...@@ -3695,21 +3686,26 @@ static int vmload_interception(struct vcpu_svm *svm)
static int vmsave_interception(struct vcpu_svm *svm) static int vmsave_interception(struct vcpu_svm *svm)
{ {
struct vmcb *nested_vmcb; struct vmcb *nested_vmcb;
struct page *page; struct kvm_host_map map;
int ret; int ret;
if (nested_svm_check_permissions(svm)) if (nested_svm_check_permissions(svm))
return 1; return 1;
nested_vmcb = nested_svm_map(svm, svm->vmcb->save.rax, &page); ret = kvm_vcpu_map(&svm->vcpu, gpa_to_gfn(svm->vmcb->save.rax), &map);
if (!nested_vmcb) if (ret) {
if (ret == -EINVAL)
kvm_inject_gp(&svm->vcpu, 0);
return 1; return 1;
}
nested_vmcb = map.hva;
svm->next_rip = kvm_rip_read(&svm->vcpu) + 3; svm->next_rip = kvm_rip_read(&svm->vcpu) + 3;
ret = kvm_skip_emulated_instruction(&svm->vcpu); ret = kvm_skip_emulated_instruction(&svm->vcpu);
nested_svm_vmloadsave(svm->vmcb, nested_vmcb); nested_svm_vmloadsave(svm->vmcb, nested_vmcb);
nested_svm_unmap(page); kvm_vcpu_unmap(&svm->vcpu, &map, true);
return ret; return ret;
} }
...@@ -6236,7 +6232,7 @@ static int svm_pre_leave_smm(struct kvm_vcpu *vcpu, const char *smstate) ...@@ -6236,7 +6232,7 @@ static int svm_pre_leave_smm(struct kvm_vcpu *vcpu, const char *smstate)
{ {
struct vcpu_svm *svm = to_svm(vcpu); struct vcpu_svm *svm = to_svm(vcpu);
struct vmcb *nested_vmcb; struct vmcb *nested_vmcb;
struct page *page; struct kvm_host_map map;
u64 guest; u64 guest;
u64 vmcb; u64 vmcb;
...@@ -6244,10 +6240,10 @@ static int svm_pre_leave_smm(struct kvm_vcpu *vcpu, const char *smstate) ...@@ -6244,10 +6240,10 @@ static int svm_pre_leave_smm(struct kvm_vcpu *vcpu, const char *smstate)
vmcb = GET_SMSTATE(u64, smstate, 0x7ee0); vmcb = GET_SMSTATE(u64, smstate, 0x7ee0);
if (guest) { if (guest) {
nested_vmcb = nested_svm_map(svm, vmcb, &page); if (kvm_vcpu_map(&svm->vcpu, gpa_to_gfn(vmcb), &map) == -EINVAL)
if (!nested_vmcb)
return 1; return 1;
enter_svm_guest_mode(svm, vmcb, nested_vmcb, page); nested_vmcb = map.hva;
enter_svm_guest_mode(svm, vmcb, nested_vmcb, &map);
} }
return 0; return 0;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment