Commit 86e3e494 authored by Sean Christopherson's avatar Sean Christopherson Committed by Paolo Bonzini

KVM: VMX: Move uret MSR lookup into update_transition_efer()

Move checking for the existence of MSR_EFER in the uret MSR array into
update_transition_efer() so that the lookup and manipulation of the
array in setup_msrs() occur back-to-back.  This paves the way toward
adding a helper to wrap the lookup and manipulation.

To avoid unnecessary overhead, defer the lookup until the uret array
would actually be modified in update_transition_efer().  EFER obviously
exists on CPUs that support the dedicated VMCS fields for switching
EFER, and EFER must exist for the guest and host EFER.NX value to
diverge, i.e. there is no danger of attempting to read/write EFER when
it doesn't exist.
Signed-off-by: default avatarSean Christopherson <sean.j.christopherson@intel.com>
Message-Id: <20200923180409.32255-11-sean.j.christopherson@intel.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent ef1d2ee1
...@@ -941,10 +941,11 @@ static void add_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr, ...@@ -941,10 +941,11 @@ static void add_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr,
m->host.val[j].value = host_val; m->host.val[j].value = host_val;
} }
static bool update_transition_efer(struct vcpu_vmx *vmx, int efer_offset) static bool update_transition_efer(struct vcpu_vmx *vmx)
{ {
u64 guest_efer = vmx->vcpu.arch.efer; u64 guest_efer = vmx->vcpu.arch.efer;
u64 ignore_bits = 0; u64 ignore_bits = 0;
int i;
/* Shadow paging assumes NX to be available. */ /* Shadow paging assumes NX to be available. */
if (!enable_ept) if (!enable_ept)
...@@ -976,17 +977,21 @@ static bool update_transition_efer(struct vcpu_vmx *vmx, int efer_offset) ...@@ -976,17 +977,21 @@ static bool update_transition_efer(struct vcpu_vmx *vmx, int efer_offset)
else else
clear_atomic_switch_msr(vmx, MSR_EFER); clear_atomic_switch_msr(vmx, MSR_EFER);
return false; return false;
} else { }
clear_atomic_switch_msr(vmx, MSR_EFER);
guest_efer &= ~ignore_bits; i = __vmx_find_uret_msr(vmx, MSR_EFER);
guest_efer |= host_efer & ignore_bits; if (i < 0)
return false;
vmx->guest_uret_msrs[efer_offset].data = guest_efer; clear_atomic_switch_msr(vmx, MSR_EFER);
vmx->guest_uret_msrs[efer_offset].mask = ~ignore_bits;
return true; guest_efer &= ~ignore_bits;
} guest_efer |= host_efer & ignore_bits;
vmx->guest_uret_msrs[i].data = guest_efer;
vmx->guest_uret_msrs[i].mask = ~ignore_bits;
return true;
} }
#ifdef CONFIG_X86_32 #ifdef CONFIG_X86_32
...@@ -1648,9 +1653,11 @@ static void setup_msrs(struct vcpu_vmx *vmx) ...@@ -1648,9 +1653,11 @@ static void setup_msrs(struct vcpu_vmx *vmx)
move_msr_up(vmx, index, nr_active_uret_msrs++); move_msr_up(vmx, index, nr_active_uret_msrs++);
} }
#endif #endif
index = __vmx_find_uret_msr(vmx, MSR_EFER); if (update_transition_efer(vmx)) {
if (index >= 0 && update_transition_efer(vmx, index)) index = __vmx_find_uret_msr(vmx, MSR_EFER);
move_msr_up(vmx, index, nr_active_uret_msrs++); if (index >= 0)
move_msr_up(vmx, index, nr_active_uret_msrs++);
}
if (guest_cpuid_has(&vmx->vcpu, X86_FEATURE_RDTSCP)) { if (guest_cpuid_has(&vmx->vcpu, X86_FEATURE_RDTSCP)) {
index = __vmx_find_uret_msr(vmx, MSR_TSC_AUX); index = __vmx_find_uret_msr(vmx, MSR_TSC_AUX);
if (index >= 0) if (index >= 0)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment