Commit d5696725 authored by Avi Kivity's avatar Avi Kivity

KVM: VMX: Fix comparison of guest efer with stale host value

update_transition_efer() masks out some efer bits when deciding whether
to switch the msr during guest entry; for example, NX is emulated using the
mmu so we don't need to disable it, and LMA/LME are handled by the hardware.

However, with shared msrs, the comparison is made against a stale value;
at the time of the guest switch we may be running with another guest's efer.

Fix by deferring the mask/compare to the actual point of guest entry.

Noted by Marcelo.
Signed-off-by: default avatarAvi Kivity <avi@redhat.com>
parent f50146bd
...@@ -812,6 +812,6 @@ int kvm_arch_interrupt_allowed(struct kvm_vcpu *vcpu); ...@@ -812,6 +812,6 @@ int kvm_arch_interrupt_allowed(struct kvm_vcpu *vcpu);
int kvm_cpu_get_interrupt(struct kvm_vcpu *v); int kvm_cpu_get_interrupt(struct kvm_vcpu *v);
void kvm_define_shared_msr(unsigned index, u32 msr); void kvm_define_shared_msr(unsigned index, u32 msr);
void kvm_set_shared_msr(unsigned index, u64 val); void kvm_set_shared_msr(unsigned index, u64 val, u64 mask);
#endif /* _ASM_X86_KVM_HOST_H */ #endif /* _ASM_X86_KVM_HOST_H */
...@@ -89,6 +89,7 @@ struct vmcs { ...@@ -89,6 +89,7 @@ struct vmcs {
struct shared_msr_entry { struct shared_msr_entry {
unsigned index; unsigned index;
u64 data; u64 data;
u64 mask;
}; };
struct vcpu_vmx { struct vcpu_vmx {
...@@ -601,12 +602,10 @@ static bool update_transition_efer(struct vcpu_vmx *vmx, int efer_offset) ...@@ -601,12 +602,10 @@ static bool update_transition_efer(struct vcpu_vmx *vmx, int efer_offset)
if (guest_efer & EFER_LMA) if (guest_efer & EFER_LMA)
ignore_bits &= ~(u64)EFER_SCE; ignore_bits &= ~(u64)EFER_SCE;
#endif #endif
if ((guest_efer & ~ignore_bits) == (host_efer & ~ignore_bits))
return false;
guest_efer &= ~ignore_bits; guest_efer &= ~ignore_bits;
guest_efer |= host_efer & ignore_bits; guest_efer |= host_efer & ignore_bits;
vmx->guest_msrs[efer_offset].data = guest_efer; vmx->guest_msrs[efer_offset].data = guest_efer;
vmx->guest_msrs[efer_offset].mask = ~ignore_bits;
return true; return true;
} }
...@@ -657,7 +656,8 @@ static void vmx_save_host_state(struct kvm_vcpu *vcpu) ...@@ -657,7 +656,8 @@ static void vmx_save_host_state(struct kvm_vcpu *vcpu)
#endif #endif
for (i = 0; i < vmx->save_nmsrs; ++i) for (i = 0; i < vmx->save_nmsrs; ++i)
kvm_set_shared_msr(vmx->guest_msrs[i].index, kvm_set_shared_msr(vmx->guest_msrs[i].index,
vmx->guest_msrs[i].data); vmx->guest_msrs[i].data,
vmx->guest_msrs[i].mask);
} }
static void __vmx_load_host_state(struct vcpu_vmx *vmx) static void __vmx_load_host_state(struct vcpu_vmx *vmx)
...@@ -2394,6 +2394,7 @@ static int vmx_vcpu_setup(struct vcpu_vmx *vmx) ...@@ -2394,6 +2394,7 @@ static int vmx_vcpu_setup(struct vcpu_vmx *vmx)
data = data_low | ((u64)data_high << 32); data = data_low | ((u64)data_high << 32);
vmx->guest_msrs[j].index = i; vmx->guest_msrs[j].index = i;
vmx->guest_msrs[j].data = 0; vmx->guest_msrs[j].data = 0;
vmx->guest_msrs[j].mask = -1ull;
++vmx->nmsrs; ++vmx->nmsrs;
} }
......
...@@ -185,11 +185,11 @@ static void kvm_shared_msr_cpu_online(void) ...@@ -185,11 +185,11 @@ static void kvm_shared_msr_cpu_online(void)
locals->current_value[i] = shared_msrs_global.msrs[i].value; locals->current_value[i] = shared_msrs_global.msrs[i].value;
} }
void kvm_set_shared_msr(unsigned slot, u64 value) void kvm_set_shared_msr(unsigned slot, u64 value, u64 mask)
{ {
struct kvm_shared_msrs *smsr = &__get_cpu_var(shared_msrs); struct kvm_shared_msrs *smsr = &__get_cpu_var(shared_msrs);
if (value == smsr->current_value[slot]) if (((value ^ smsr->current_value[slot]) & mask) == 0)
return; return;
smsr->current_value[slot] = value; smsr->current_value[slot] = value;
wrmsrl(shared_msrs_global.msrs[slot].msr, value); wrmsrl(shared_msrs_global.msrs[slot].msr, value);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment