Commit 5228eb96 authored by Maxim Levitsky's avatar Maxim Levitsky Committed by Paolo Bonzini

KVM: x86: nSVM: implement nested TSC scaling

This was tested by booting a nested guest with TSC=1Ghz,
observing the clocks, and doing about 100 cycles of migration.

Note that qemu patch is needed to support migration because
of a new MSR that needs to be placed in the migration state.

The patch will be sent to the qemu mailing list soon.
Signed-off-by: default avatarMaxim Levitsky <mlevitsk@redhat.com>
Message-Id: <20210914154825.104886-14-mlevitsk@redhat.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent f800650a
...@@ -538,8 +538,17 @@ static void nested_vmcb02_prepare_control(struct vcpu_svm *svm) ...@@ -538,8 +538,17 @@ static void nested_vmcb02_prepare_control(struct vcpu_svm *svm)
if (nested_npt_enabled(svm)) if (nested_npt_enabled(svm))
nested_svm_init_mmu_context(vcpu); nested_svm_init_mmu_context(vcpu);
svm->vmcb->control.tsc_offset = vcpu->arch.tsc_offset = vcpu->arch.tsc_offset = kvm_calc_nested_tsc_offset(
vcpu->arch.l1_tsc_offset + svm->nested.ctl.tsc_offset; vcpu->arch.l1_tsc_offset,
svm->nested.ctl.tsc_offset,
svm->tsc_ratio_msr);
svm->vmcb->control.tsc_offset = vcpu->arch.tsc_offset;
if (svm->tsc_ratio_msr != kvm_default_tsc_scaling_ratio) {
WARN_ON(!svm->tsc_scaling_enabled);
nested_svm_update_tsc_ratio_msr(vcpu);
}
svm->vmcb->control.int_ctl = svm->vmcb->control.int_ctl =
(svm->nested.ctl.int_ctl & int_ctl_vmcb12_bits) | (svm->nested.ctl.int_ctl & int_ctl_vmcb12_bits) |
...@@ -824,6 +833,12 @@ int nested_svm_vmexit(struct vcpu_svm *svm) ...@@ -824,6 +833,12 @@ int nested_svm_vmexit(struct vcpu_svm *svm)
vmcb_mark_dirty(svm->vmcb, VMCB_INTERCEPTS); vmcb_mark_dirty(svm->vmcb, VMCB_INTERCEPTS);
} }
if (svm->tsc_ratio_msr != kvm_default_tsc_scaling_ratio) {
WARN_ON(!svm->tsc_scaling_enabled);
vcpu->arch.tsc_scaling_ratio = vcpu->arch.l1_tsc_scaling_ratio;
svm_write_tsc_multiplier(vcpu, vcpu->arch.tsc_scaling_ratio);
}
svm->nested.ctl.nested_cr3 = 0; svm->nested.ctl.nested_cr3 = 0;
/* /*
...@@ -1211,6 +1226,16 @@ int nested_svm_exit_special(struct vcpu_svm *svm) ...@@ -1211,6 +1226,16 @@ int nested_svm_exit_special(struct vcpu_svm *svm)
return NESTED_EXIT_CONTINUE; return NESTED_EXIT_CONTINUE;
} }
void nested_svm_update_tsc_ratio_msr(struct kvm_vcpu *vcpu)
{
struct vcpu_svm *svm = to_svm(vcpu);
vcpu->arch.tsc_scaling_ratio =
kvm_calc_nested_tsc_multiplier(vcpu->arch.l1_tsc_scaling_ratio,
svm->tsc_ratio_msr);
svm_write_tsc_multiplier(vcpu, vcpu->arch.tsc_scaling_ratio);
}
static int svm_get_nested_state(struct kvm_vcpu *vcpu, static int svm_get_nested_state(struct kvm_vcpu *vcpu,
struct kvm_nested_state __user *user_kvm_nested_state, struct kvm_nested_state __user *user_kvm_nested_state,
u32 user_data_size) u32 user_data_size)
......
...@@ -940,6 +940,9 @@ static __init void svm_set_cpu_caps(void) ...@@ -940,6 +940,9 @@ static __init void svm_set_cpu_caps(void)
if (npt_enabled) if (npt_enabled)
kvm_cpu_cap_set(X86_FEATURE_NPT); kvm_cpu_cap_set(X86_FEATURE_NPT);
if (tsc_scaling)
kvm_cpu_cap_set(X86_FEATURE_TSCRATEMSR);
/* Nested VM can receive #VMEXIT instead of triggering #GP */ /* Nested VM can receive #VMEXIT instead of triggering #GP */
kvm_cpu_cap_set(X86_FEATURE_SVME_ADDR_CHK); kvm_cpu_cap_set(X86_FEATURE_SVME_ADDR_CHK);
} }
...@@ -1132,7 +1135,9 @@ static u64 svm_get_l2_tsc_offset(struct kvm_vcpu *vcpu) ...@@ -1132,7 +1135,9 @@ static u64 svm_get_l2_tsc_offset(struct kvm_vcpu *vcpu)
static u64 svm_get_l2_tsc_multiplier(struct kvm_vcpu *vcpu) static u64 svm_get_l2_tsc_multiplier(struct kvm_vcpu *vcpu)
{ {
return kvm_default_tsc_scaling_ratio; struct vcpu_svm *svm = to_svm(vcpu);
return svm->tsc_ratio_msr;
} }
static void svm_write_tsc_offset(struct kvm_vcpu *vcpu, u64 offset) static void svm_write_tsc_offset(struct kvm_vcpu *vcpu, u64 offset)
...@@ -1144,7 +1149,7 @@ static void svm_write_tsc_offset(struct kvm_vcpu *vcpu, u64 offset) ...@@ -1144,7 +1149,7 @@ static void svm_write_tsc_offset(struct kvm_vcpu *vcpu, u64 offset)
vmcb_mark_dirty(svm->vmcb, VMCB_INTERCEPTS); vmcb_mark_dirty(svm->vmcb, VMCB_INTERCEPTS);
} }
static void svm_write_tsc_multiplier(struct kvm_vcpu *vcpu, u64 multiplier) void svm_write_tsc_multiplier(struct kvm_vcpu *vcpu, u64 multiplier)
{ {
wrmsrl(MSR_AMD64_TSC_RATIO, multiplier); wrmsrl(MSR_AMD64_TSC_RATIO, multiplier);
} }
...@@ -1356,7 +1361,6 @@ static void init_vmcb(struct kvm_vcpu *vcpu) ...@@ -1356,7 +1361,6 @@ static void init_vmcb(struct kvm_vcpu *vcpu)
vmcb_mark_all_dirty(svm->vmcb); vmcb_mark_all_dirty(svm->vmcb);
enable_gif(svm); enable_gif(svm);
} }
static void __svm_vcpu_reset(struct kvm_vcpu *vcpu) static void __svm_vcpu_reset(struct kvm_vcpu *vcpu)
...@@ -1367,6 +1371,7 @@ static void __svm_vcpu_reset(struct kvm_vcpu *vcpu) ...@@ -1367,6 +1371,7 @@ static void __svm_vcpu_reset(struct kvm_vcpu *vcpu)
svm_init_osvw(vcpu); svm_init_osvw(vcpu);
vcpu->arch.microcode_version = 0x01000065; vcpu->arch.microcode_version = 0x01000065;
svm->tsc_ratio_msr = kvm_default_tsc_scaling_ratio;
if (sev_es_guest(vcpu->kvm)) if (sev_es_guest(vcpu->kvm))
sev_es_vcpu_reset(svm); sev_es_vcpu_reset(svm);
...@@ -2718,6 +2723,11 @@ static int svm_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) ...@@ -2718,6 +2723,11 @@ static int svm_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
struct vcpu_svm *svm = to_svm(vcpu); struct vcpu_svm *svm = to_svm(vcpu);
switch (msr_info->index) { switch (msr_info->index) {
case MSR_AMD64_TSC_RATIO:
if (!msr_info->host_initiated && !svm->tsc_scaling_enabled)
return 1;
msr_info->data = svm->tsc_ratio_msr;
break;
case MSR_STAR: case MSR_STAR:
msr_info->data = svm->vmcb01.ptr->save.star; msr_info->data = svm->vmcb01.ptr->save.star;
break; break;
...@@ -2867,6 +2877,19 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr) ...@@ -2867,6 +2877,19 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr)
u32 ecx = msr->index; u32 ecx = msr->index;
u64 data = msr->data; u64 data = msr->data;
switch (ecx) { switch (ecx) {
case MSR_AMD64_TSC_RATIO:
if (!msr->host_initiated && !svm->tsc_scaling_enabled)
return 1;
if (data & TSC_RATIO_RSVD)
return 1;
svm->tsc_ratio_msr = data;
if (svm->tsc_scaling_enabled && is_guest_mode(vcpu))
nested_svm_update_tsc_ratio_msr(vcpu);
break;
case MSR_IA32_CR_PAT: case MSR_IA32_CR_PAT:
if (!kvm_mtrr_valid(vcpu, MSR_IA32_CR_PAT, data)) if (!kvm_mtrr_valid(vcpu, MSR_IA32_CR_PAT, data))
return 1; return 1;
...@@ -4062,6 +4085,8 @@ static void svm_vcpu_after_set_cpuid(struct kvm_vcpu *vcpu) ...@@ -4062,6 +4085,8 @@ static void svm_vcpu_after_set_cpuid(struct kvm_vcpu *vcpu)
svm->nrips_enabled = kvm_cpu_cap_has(X86_FEATURE_NRIPS) && svm->nrips_enabled = kvm_cpu_cap_has(X86_FEATURE_NRIPS) &&
guest_cpuid_has(vcpu, X86_FEATURE_NRIPS); guest_cpuid_has(vcpu, X86_FEATURE_NRIPS);
svm->tsc_scaling_enabled = tsc_scaling && guest_cpuid_has(vcpu, X86_FEATURE_TSCRATEMSR);
svm_recalc_instruction_intercepts(vcpu, svm); svm_recalc_instruction_intercepts(vcpu, svm);
/* For sev guests, the memory encryption bit is not reserved in CR3. */ /* For sev guests, the memory encryption bit is not reserved in CR3. */
......
...@@ -140,6 +140,8 @@ struct vcpu_svm { ...@@ -140,6 +140,8 @@ struct vcpu_svm {
u64 next_rip; u64 next_rip;
u64 spec_ctrl; u64 spec_ctrl;
u64 tsc_ratio_msr;
/* /*
* Contains guest-controlled bits of VIRT_SPEC_CTRL, which will be * Contains guest-controlled bits of VIRT_SPEC_CTRL, which will be
* translated into the appropriate L2_CFG bits on the host to * translated into the appropriate L2_CFG bits on the host to
...@@ -161,6 +163,7 @@ struct vcpu_svm { ...@@ -161,6 +163,7 @@ struct vcpu_svm {
/* cached guest cpuid flags for faster access */ /* cached guest cpuid flags for faster access */
bool nrips_enabled : 1; bool nrips_enabled : 1;
bool tsc_scaling_enabled : 1;
u32 ldr_reg; u32 ldr_reg;
u32 dfr_reg; u32 dfr_reg;
...@@ -483,6 +486,8 @@ int nested_svm_check_permissions(struct kvm_vcpu *vcpu); ...@@ -483,6 +486,8 @@ int nested_svm_check_permissions(struct kvm_vcpu *vcpu);
int nested_svm_check_exception(struct vcpu_svm *svm, unsigned nr, int nested_svm_check_exception(struct vcpu_svm *svm, unsigned nr,
bool has_error_code, u32 error_code); bool has_error_code, u32 error_code);
int nested_svm_exit_special(struct vcpu_svm *svm); int nested_svm_exit_special(struct vcpu_svm *svm);
void nested_svm_update_tsc_ratio_msr(struct kvm_vcpu *vcpu);
void svm_write_tsc_multiplier(struct kvm_vcpu *vcpu, u64 multiplier);
void nested_load_control_from_vmcb12(struct vcpu_svm *svm, void nested_load_control_from_vmcb12(struct vcpu_svm *svm,
struct vmcb_control_area *control); struct vmcb_control_area *control);
void nested_sync_control_from_vmcb02(struct vcpu_svm *svm); void nested_sync_control_from_vmcb02(struct vcpu_svm *svm);
......
...@@ -6435,6 +6435,7 @@ static bool vmx_has_emulated_msr(struct kvm *kvm, u32 index) ...@@ -6435,6 +6435,7 @@ static bool vmx_has_emulated_msr(struct kvm *kvm, u32 index)
case MSR_IA32_VMX_BASIC ... MSR_IA32_VMX_VMFUNC: case MSR_IA32_VMX_BASIC ... MSR_IA32_VMX_VMFUNC:
return nested; return nested;
case MSR_AMD64_VIRT_SPEC_CTRL: case MSR_AMD64_VIRT_SPEC_CTRL:
case MSR_AMD64_TSC_RATIO:
/* This is AMD only. */ /* This is AMD only. */
return false; return false;
default: default:
......
...@@ -1361,6 +1361,7 @@ static const u32 emulated_msrs_all[] = { ...@@ -1361,6 +1361,7 @@ static const u32 emulated_msrs_all[] = {
MSR_PLATFORM_INFO, MSR_PLATFORM_INFO,
MSR_MISC_FEATURES_ENABLES, MSR_MISC_FEATURES_ENABLES,
MSR_AMD64_VIRT_SPEC_CTRL, MSR_AMD64_VIRT_SPEC_CTRL,
MSR_AMD64_TSC_RATIO,
MSR_IA32_POWER_CTL, MSR_IA32_POWER_CTL,
MSR_IA32_UCODE_REV, MSR_IA32_UCODE_REV,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment