Commit 8fc78909 authored by Emanuele Giuseppe Esposito's avatar Emanuele Giuseppe Esposito Committed by Paolo Bonzini

KVM: nSVM: introduce struct vmcb_ctrl_area_cached

This structure will replace vmcb_control_area in
svm_nested_state, providing only the fields that are actually
used by the nested state. This avoids having and copying around
uninitialized fields. The cost of this, however, is that all
functions (in this case vmcb_is_intercept) expect the old
structure, so they need to be duplicated.

In addition, in svm_get_nested_state() user space expects a
vmcb_control_area struct, so we need to copy back all fields
in a temporary structure before copying it to userspace.
Signed-off-by: default avatarEmanuele Giuseppe Esposito <eesposit@redhat.com>
Reviewed-by: default avatarMaxim Levitsky <mlevitsk@redhat.com>
Message-Id: <20211103140527.752797-7-eesposit@redhat.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent bd95926c
...@@ -58,7 +58,8 @@ static void svm_inject_page_fault_nested(struct kvm_vcpu *vcpu, struct x86_excep ...@@ -58,7 +58,8 @@ static void svm_inject_page_fault_nested(struct kvm_vcpu *vcpu, struct x86_excep
struct vcpu_svm *svm = to_svm(vcpu); struct vcpu_svm *svm = to_svm(vcpu);
WARN_ON(!is_guest_mode(vcpu)); WARN_ON(!is_guest_mode(vcpu));
if (vmcb_is_intercept(&svm->nested.ctl, INTERCEPT_EXCEPTION_OFFSET + PF_VECTOR) && if (vmcb12_is_intercept(&svm->nested.ctl,
INTERCEPT_EXCEPTION_OFFSET + PF_VECTOR) &&
!svm->nested.nested_run_pending) { !svm->nested.nested_run_pending) {
svm->vmcb->control.exit_code = SVM_EXIT_EXCP_BASE + PF_VECTOR; svm->vmcb->control.exit_code = SVM_EXIT_EXCP_BASE + PF_VECTOR;
svm->vmcb->control.exit_code_hi = 0; svm->vmcb->control.exit_code_hi = 0;
...@@ -121,7 +122,8 @@ static void nested_svm_uninit_mmu_context(struct kvm_vcpu *vcpu) ...@@ -121,7 +122,8 @@ static void nested_svm_uninit_mmu_context(struct kvm_vcpu *vcpu)
void recalc_intercepts(struct vcpu_svm *svm) void recalc_intercepts(struct vcpu_svm *svm)
{ {
struct vmcb_control_area *c, *h, *g; struct vmcb_control_area *c, *h;
struct vmcb_ctrl_area_cached *g;
unsigned int i; unsigned int i;
vmcb_mark_dirty(svm->vmcb, VMCB_INTERCEPTS); vmcb_mark_dirty(svm->vmcb, VMCB_INTERCEPTS);
...@@ -172,7 +174,7 @@ static bool nested_svm_vmrun_msrpm(struct vcpu_svm *svm) ...@@ -172,7 +174,7 @@ static bool nested_svm_vmrun_msrpm(struct vcpu_svm *svm)
*/ */
int i; int i;
if (!(vmcb_is_intercept(&svm->nested.ctl, INTERCEPT_MSR_PROT))) if (!(vmcb12_is_intercept(&svm->nested.ctl, INTERCEPT_MSR_PROT)))
return true; return true;
for (i = 0; i < MSRPM_OFFSETS; i++) { for (i = 0; i < MSRPM_OFFSETS; i++) {
...@@ -220,9 +222,9 @@ static bool nested_svm_check_tlb_ctl(struct kvm_vcpu *vcpu, u8 tlb_ctl) ...@@ -220,9 +222,9 @@ static bool nested_svm_check_tlb_ctl(struct kvm_vcpu *vcpu, u8 tlb_ctl)
} }
static bool __nested_vmcb_check_controls(struct kvm_vcpu *vcpu, static bool __nested_vmcb_check_controls(struct kvm_vcpu *vcpu,
struct vmcb_control_area *control) struct vmcb_ctrl_area_cached *control)
{ {
if (CC(!vmcb_is_intercept(control, INTERCEPT_VMRUN))) if (CC(!vmcb12_is_intercept(control, INTERCEPT_VMRUN)))
return false; return false;
if (CC(control->asid == 0)) if (CC(control->asid == 0))
...@@ -290,13 +292,13 @@ static bool nested_vmcb_check_save(struct kvm_vcpu *vcpu) ...@@ -290,13 +292,13 @@ static bool nested_vmcb_check_save(struct kvm_vcpu *vcpu)
static bool nested_vmcb_check_controls(struct kvm_vcpu *vcpu) static bool nested_vmcb_check_controls(struct kvm_vcpu *vcpu)
{ {
struct vcpu_svm *svm = to_svm(vcpu); struct vcpu_svm *svm = to_svm(vcpu);
struct vmcb_control_area *ctl = &svm->nested.ctl; struct vmcb_ctrl_area_cached *ctl = &svm->nested.ctl;
return __nested_vmcb_check_controls(vcpu, ctl); return __nested_vmcb_check_controls(vcpu, ctl);
} }
static static
void __nested_copy_vmcb_control_to_cache(struct vmcb_control_area *to, void __nested_copy_vmcb_control_to_cache(struct vmcb_ctrl_area_cached *to,
struct vmcb_control_area *from) struct vmcb_control_area *from)
{ {
unsigned int i; unsigned int i;
...@@ -1006,7 +1008,7 @@ static int nested_svm_exit_handled_msr(struct vcpu_svm *svm) ...@@ -1006,7 +1008,7 @@ static int nested_svm_exit_handled_msr(struct vcpu_svm *svm)
u32 offset, msr, value; u32 offset, msr, value;
int write, mask; int write, mask;
if (!(vmcb_is_intercept(&svm->nested.ctl, INTERCEPT_MSR_PROT))) if (!(vmcb12_is_intercept(&svm->nested.ctl, INTERCEPT_MSR_PROT)))
return NESTED_EXIT_HOST; return NESTED_EXIT_HOST;
msr = svm->vcpu.arch.regs[VCPU_REGS_RCX]; msr = svm->vcpu.arch.regs[VCPU_REGS_RCX];
...@@ -1033,7 +1035,7 @@ static int nested_svm_intercept_ioio(struct vcpu_svm *svm) ...@@ -1033,7 +1035,7 @@ static int nested_svm_intercept_ioio(struct vcpu_svm *svm)
u8 start_bit; u8 start_bit;
u64 gpa; u64 gpa;
if (!(vmcb_is_intercept(&svm->nested.ctl, INTERCEPT_IOIO_PROT))) if (!(vmcb12_is_intercept(&svm->nested.ctl, INTERCEPT_IOIO_PROT)))
return NESTED_EXIT_HOST; return NESTED_EXIT_HOST;
port = svm->vmcb->control.exit_info_1 >> 16; port = svm->vmcb->control.exit_info_1 >> 16;
...@@ -1064,12 +1066,12 @@ static int nested_svm_intercept(struct vcpu_svm *svm) ...@@ -1064,12 +1066,12 @@ static int nested_svm_intercept(struct vcpu_svm *svm)
vmexit = nested_svm_intercept_ioio(svm); vmexit = nested_svm_intercept_ioio(svm);
break; break;
case SVM_EXIT_READ_CR0 ... SVM_EXIT_WRITE_CR8: { case SVM_EXIT_READ_CR0 ... SVM_EXIT_WRITE_CR8: {
if (vmcb_is_intercept(&svm->nested.ctl, exit_code)) if (vmcb12_is_intercept(&svm->nested.ctl, exit_code))
vmexit = NESTED_EXIT_DONE; vmexit = NESTED_EXIT_DONE;
break; break;
} }
case SVM_EXIT_READ_DR0 ... SVM_EXIT_WRITE_DR7: { case SVM_EXIT_READ_DR0 ... SVM_EXIT_WRITE_DR7: {
if (vmcb_is_intercept(&svm->nested.ctl, exit_code)) if (vmcb12_is_intercept(&svm->nested.ctl, exit_code))
vmexit = NESTED_EXIT_DONE; vmexit = NESTED_EXIT_DONE;
break; break;
} }
...@@ -1087,7 +1089,7 @@ static int nested_svm_intercept(struct vcpu_svm *svm) ...@@ -1087,7 +1089,7 @@ static int nested_svm_intercept(struct vcpu_svm *svm)
break; break;
} }
default: { default: {
if (vmcb_is_intercept(&svm->nested.ctl, exit_code)) if (vmcb12_is_intercept(&svm->nested.ctl, exit_code))
vmexit = NESTED_EXIT_DONE; vmexit = NESTED_EXIT_DONE;
} }
} }
...@@ -1165,7 +1167,7 @@ static void nested_svm_inject_exception_vmexit(struct vcpu_svm *svm) ...@@ -1165,7 +1167,7 @@ static void nested_svm_inject_exception_vmexit(struct vcpu_svm *svm)
static inline bool nested_exit_on_init(struct vcpu_svm *svm) static inline bool nested_exit_on_init(struct vcpu_svm *svm)
{ {
return vmcb_is_intercept(&svm->nested.ctl, INTERCEPT_INIT); return vmcb12_is_intercept(&svm->nested.ctl, INTERCEPT_INIT);
} }
static int svm_check_nested_events(struct kvm_vcpu *vcpu) static int svm_check_nested_events(struct kvm_vcpu *vcpu)
...@@ -1269,11 +1271,47 @@ void nested_svm_update_tsc_ratio_msr(struct kvm_vcpu *vcpu) ...@@ -1269,11 +1271,47 @@ void nested_svm_update_tsc_ratio_msr(struct kvm_vcpu *vcpu)
svm_write_tsc_multiplier(vcpu, vcpu->arch.tsc_scaling_ratio); svm_write_tsc_multiplier(vcpu, vcpu->arch.tsc_scaling_ratio);
} }
/* Inverse operation of nested_copy_vmcb_control_to_cache(). asid is copied too. */
static void nested_copy_vmcb_cache_to_control(struct vmcb_control_area *dst,
struct vmcb_ctrl_area_cached *from)
{
unsigned int i;
memset(dst, 0, sizeof(struct vmcb_control_area));
for (i = 0; i < MAX_INTERCEPT; i++)
dst->intercepts[i] = from->intercepts[i];
dst->iopm_base_pa = from->iopm_base_pa;
dst->msrpm_base_pa = from->msrpm_base_pa;
dst->tsc_offset = from->tsc_offset;
dst->asid = from->asid;
dst->tlb_ctl = from->tlb_ctl;
dst->int_ctl = from->int_ctl;
dst->int_vector = from->int_vector;
dst->int_state = from->int_state;
dst->exit_code = from->exit_code;
dst->exit_code_hi = from->exit_code_hi;
dst->exit_info_1 = from->exit_info_1;
dst->exit_info_2 = from->exit_info_2;
dst->exit_int_info = from->exit_int_info;
dst->exit_int_info_err = from->exit_int_info_err;
dst->nested_ctl = from->nested_ctl;
dst->event_inj = from->event_inj;
dst->event_inj_err = from->event_inj_err;
dst->nested_cr3 = from->nested_cr3;
dst->virt_ext = from->virt_ext;
dst->pause_filter_count = from->pause_filter_count;
dst->pause_filter_thresh = from->pause_filter_thresh;
}
static int svm_get_nested_state(struct kvm_vcpu *vcpu, static int svm_get_nested_state(struct kvm_vcpu *vcpu,
struct kvm_nested_state __user *user_kvm_nested_state, struct kvm_nested_state __user *user_kvm_nested_state,
u32 user_data_size) u32 user_data_size)
{ {
struct vcpu_svm *svm; struct vcpu_svm *svm;
struct vmcb_control_area *ctl;
unsigned long r;
struct kvm_nested_state kvm_state = { struct kvm_nested_state kvm_state = {
.flags = 0, .flags = 0,
.format = KVM_STATE_NESTED_FORMAT_SVM, .format = KVM_STATE_NESTED_FORMAT_SVM,
...@@ -1315,9 +1353,18 @@ static int svm_get_nested_state(struct kvm_vcpu *vcpu, ...@@ -1315,9 +1353,18 @@ static int svm_get_nested_state(struct kvm_vcpu *vcpu,
*/ */
if (clear_user(user_vmcb, KVM_STATE_NESTED_SVM_VMCB_SIZE)) if (clear_user(user_vmcb, KVM_STATE_NESTED_SVM_VMCB_SIZE))
return -EFAULT; return -EFAULT;
if (copy_to_user(&user_vmcb->control, &svm->nested.ctl,
sizeof(user_vmcb->control))) ctl = kzalloc(sizeof(*ctl), GFP_KERNEL);
if (!ctl)
return -ENOMEM;
nested_copy_vmcb_cache_to_control(ctl, &svm->nested.ctl);
r = copy_to_user(&user_vmcb->control, ctl,
sizeof(user_vmcb->control));
kfree(ctl);
if (r)
return -EFAULT; return -EFAULT;
if (copy_to_user(&user_vmcb->save, &svm->vmcb01.ptr->save, if (copy_to_user(&user_vmcb->save, &svm->vmcb01.ptr->save,
sizeof(user_vmcb->save))) sizeof(user_vmcb->save)))
return -EFAULT; return -EFAULT;
...@@ -1335,6 +1382,7 @@ static int svm_set_nested_state(struct kvm_vcpu *vcpu, ...@@ -1335,6 +1382,7 @@ static int svm_set_nested_state(struct kvm_vcpu *vcpu,
struct vmcb_control_area *ctl; struct vmcb_control_area *ctl;
struct vmcb_save_area *save; struct vmcb_save_area *save;
struct vmcb_save_area_cached save_cached; struct vmcb_save_area_cached save_cached;
struct vmcb_ctrl_area_cached ctl_cached;
unsigned long cr0; unsigned long cr0;
int ret; int ret;
...@@ -1387,7 +1435,8 @@ static int svm_set_nested_state(struct kvm_vcpu *vcpu, ...@@ -1387,7 +1435,8 @@ static int svm_set_nested_state(struct kvm_vcpu *vcpu,
goto out_free; goto out_free;
ret = -EINVAL; ret = -EINVAL;
if (!__nested_vmcb_check_controls(vcpu, ctl)) __nested_copy_vmcb_control_to_cache(&ctl_cached, ctl);
if (!__nested_vmcb_check_controls(vcpu, &ctl_cached))
goto out_free; goto out_free;
/* /*
......
...@@ -2508,7 +2508,7 @@ static bool check_selective_cr0_intercepted(struct kvm_vcpu *vcpu, ...@@ -2508,7 +2508,7 @@ static bool check_selective_cr0_intercepted(struct kvm_vcpu *vcpu,
bool ret = false; bool ret = false;
if (!is_guest_mode(vcpu) || if (!is_guest_mode(vcpu) ||
(!(vmcb_is_intercept(&svm->nested.ctl, INTERCEPT_SELECTIVE_CR0)))) (!(vmcb12_is_intercept(&svm->nested.ctl, INTERCEPT_SELECTIVE_CR0))))
return false; return false;
cr0 &= ~SVM_CR0_SELECTIVE_MASK; cr0 &= ~SVM_CR0_SELECTIVE_MASK;
...@@ -4215,7 +4215,7 @@ static int svm_check_intercept(struct kvm_vcpu *vcpu, ...@@ -4215,7 +4215,7 @@ static int svm_check_intercept(struct kvm_vcpu *vcpu,
info->intercept == x86_intercept_clts) info->intercept == x86_intercept_clts)
break; break;
if (!(vmcb_is_intercept(&svm->nested.ctl, if (!(vmcb12_is_intercept(&svm->nested.ctl,
INTERCEPT_SELECTIVE_CR0))) INTERCEPT_SELECTIVE_CR0)))
break; break;
......
...@@ -114,6 +114,31 @@ struct vmcb_save_area_cached { ...@@ -114,6 +114,31 @@ struct vmcb_save_area_cached {
u64 dr6; u64 dr6;
}; };
struct vmcb_ctrl_area_cached {
u32 intercepts[MAX_INTERCEPT];
u16 pause_filter_thresh;
u16 pause_filter_count;
u64 iopm_base_pa;
u64 msrpm_base_pa;
u64 tsc_offset;
u32 asid;
u8 tlb_ctl;
u32 int_ctl;
u32 int_vector;
u32 int_state;
u32 exit_code;
u32 exit_code_hi;
u64 exit_info_1;
u64 exit_info_2;
u32 exit_int_info;
u32 exit_int_info_err;
u64 nested_ctl;
u32 event_inj;
u32 event_inj_err;
u64 nested_cr3;
u64 virt_ext;
};
struct svm_nested_state { struct svm_nested_state {
struct kvm_vmcb_info vmcb02; struct kvm_vmcb_info vmcb02;
u64 hsave_msr; u64 hsave_msr;
...@@ -129,7 +154,7 @@ struct svm_nested_state { ...@@ -129,7 +154,7 @@ struct svm_nested_state {
bool nested_run_pending; bool nested_run_pending;
/* cache for control fields of the guest */ /* cache for control fields of the guest */
struct vmcb_control_area ctl; struct vmcb_ctrl_area_cached ctl;
/* /*
* Note: this struct is not kept up-to-date while L2 runs; it is only * Note: this struct is not kept up-to-date while L2 runs; it is only
...@@ -318,6 +343,12 @@ static inline bool vmcb_is_intercept(struct vmcb_control_area *control, u32 bit) ...@@ -318,6 +343,12 @@ static inline bool vmcb_is_intercept(struct vmcb_control_area *control, u32 bit)
return test_bit(bit, (unsigned long *)&control->intercepts); return test_bit(bit, (unsigned long *)&control->intercepts);
} }
static inline bool vmcb12_is_intercept(struct vmcb_ctrl_area_cached *control, u32 bit)
{
WARN_ON_ONCE(bit >= 32 * MAX_INTERCEPT);
return test_bit(bit, (unsigned long *)&control->intercepts);
}
static inline void set_dr_intercepts(struct vcpu_svm *svm) static inline void set_dr_intercepts(struct vcpu_svm *svm)
{ {
struct vmcb *vmcb = svm->vmcb01.ptr; struct vmcb *vmcb = svm->vmcb01.ptr;
...@@ -470,17 +501,17 @@ static inline bool nested_svm_virtualize_tpr(struct kvm_vcpu *vcpu) ...@@ -470,17 +501,17 @@ static inline bool nested_svm_virtualize_tpr(struct kvm_vcpu *vcpu)
static inline bool nested_exit_on_smi(struct vcpu_svm *svm) static inline bool nested_exit_on_smi(struct vcpu_svm *svm)
{ {
return vmcb_is_intercept(&svm->nested.ctl, INTERCEPT_SMI); return vmcb12_is_intercept(&svm->nested.ctl, INTERCEPT_SMI);
} }
static inline bool nested_exit_on_intr(struct vcpu_svm *svm) static inline bool nested_exit_on_intr(struct vcpu_svm *svm)
{ {
return vmcb_is_intercept(&svm->nested.ctl, INTERCEPT_INTR); return vmcb12_is_intercept(&svm->nested.ctl, INTERCEPT_INTR);
} }
static inline bool nested_exit_on_nmi(struct vcpu_svm *svm) static inline bool nested_exit_on_nmi(struct vcpu_svm *svm)
{ {
return vmcb_is_intercept(&svm->nested.ctl, INTERCEPT_NMI); return vmcb12_is_intercept(&svm->nested.ctl, INTERCEPT_NMI);
} }
int enter_svm_guest_mode(struct kvm_vcpu *vcpu, int enter_svm_guest_mode(struct kvm_vcpu *vcpu,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment