Commit bd65ba82 authored by Sean Christopherson's avatar Sean Christopherson Committed by Paolo Bonzini

KVM: VMX: Add vmx_setup_uret_msr() to handle lookup and swap

Add vmx_setup_uret_msr() to wrap the lookup and manipulation of the uret
MSRs array during setup_msrs().  In addition to consolidating code, this
eliminates move_msr_up(), which while being a very literally description
of the function, isn't exacly helpful in understanding the net effect of
the code.

No functional change intended.
Signed-off-by: default avatarSean Christopherson <sean.j.christopherson@intel.com>
Message-Id: <20200923180409.32255-12-sean.j.christopherson@intel.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent 86e3e494
...@@ -1614,12 +1614,15 @@ static void vmx_queue_exception(struct kvm_vcpu *vcpu) ...@@ -1614,12 +1614,15 @@ static void vmx_queue_exception(struct kvm_vcpu *vcpu)
vmx_clear_hlt(vcpu); vmx_clear_hlt(vcpu);
} }
/* static void vmx_setup_uret_msr(struct vcpu_vmx *vmx, unsigned int msr)
* Swap MSR entry in host/guest MSR entry array.
*/
static void move_msr_up(struct vcpu_vmx *vmx, int from, int to)
{ {
struct vmx_uret_msr tmp; struct vmx_uret_msr tmp;
int from, to;
from = __vmx_find_uret_msr(vmx, msr);
if (from < 0)
return;
to = vmx->nr_active_uret_msrs++;
tmp = vmx->guest_uret_msrs[to]; tmp = vmx->guest_uret_msrs[to];
vmx->guest_uret_msrs[to] = vmx->guest_uret_msrs[from]; vmx->guest_uret_msrs[to] = vmx->guest_uret_msrs[from];
...@@ -1633,42 +1636,26 @@ static void move_msr_up(struct vcpu_vmx *vmx, int from, int to) ...@@ -1633,42 +1636,26 @@ static void move_msr_up(struct vcpu_vmx *vmx, int from, int to)
*/ */
static void setup_msrs(struct vcpu_vmx *vmx) static void setup_msrs(struct vcpu_vmx *vmx)
{ {
int nr_active_uret_msrs, index; vmx->guest_uret_msrs_loaded = false;
vmx->nr_active_uret_msrs = 0;
nr_active_uret_msrs = 0;
#ifdef CONFIG_X86_64 #ifdef CONFIG_X86_64
/* /*
* The SYSCALL MSRs are only needed on long mode guests, and only * The SYSCALL MSRs are only needed on long mode guests, and only
* when EFER.SCE is set. * when EFER.SCE is set.
*/ */
if (is_long_mode(&vmx->vcpu) && (vmx->vcpu.arch.efer & EFER_SCE)) { if (is_long_mode(&vmx->vcpu) && (vmx->vcpu.arch.efer & EFER_SCE)) {
index = __vmx_find_uret_msr(vmx, MSR_STAR); vmx_setup_uret_msr(vmx, MSR_STAR);
if (index >= 0) vmx_setup_uret_msr(vmx, MSR_LSTAR);
move_msr_up(vmx, index, nr_active_uret_msrs++); vmx_setup_uret_msr(vmx, MSR_SYSCALL_MASK);
index = __vmx_find_uret_msr(vmx, MSR_LSTAR);
if (index >= 0)
move_msr_up(vmx, index, nr_active_uret_msrs++);
index = __vmx_find_uret_msr(vmx, MSR_SYSCALL_MASK);
if (index >= 0)
move_msr_up(vmx, index, nr_active_uret_msrs++);
} }
#endif #endif
if (update_transition_efer(vmx)) { if (update_transition_efer(vmx))
index = __vmx_find_uret_msr(vmx, MSR_EFER); vmx_setup_uret_msr(vmx, MSR_EFER);
if (index >= 0)
move_msr_up(vmx, index, nr_active_uret_msrs++);
}
if (guest_cpuid_has(&vmx->vcpu, X86_FEATURE_RDTSCP)) {
index = __vmx_find_uret_msr(vmx, MSR_TSC_AUX);
if (index >= 0)
move_msr_up(vmx, index, nr_active_uret_msrs++);
}
index = __vmx_find_uret_msr(vmx, MSR_IA32_TSX_CTRL);
if (index >= 0)
move_msr_up(vmx, index, nr_active_uret_msrs++);
vmx->nr_active_uret_msrs = nr_active_uret_msrs; if (guest_cpuid_has(&vmx->vcpu, X86_FEATURE_RDTSCP))
vmx->guest_uret_msrs_loaded = false; vmx_setup_uret_msr(vmx, MSR_TSC_AUX);
vmx_setup_uret_msr(vmx, MSR_IA32_TSX_CTRL);
if (cpu_has_vmx_msr_bitmap()) if (cpu_has_vmx_msr_bitmap())
vmx_update_msr_bitmap(&vmx->vcpu); vmx_update_msr_bitmap(&vmx->vcpu);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment