Commit e5d03de5 authored by Sean Christopherson's avatar Sean Christopherson Committed by Paolo Bonzini

KVM: nVMX: Reset register cache (available and dirty masks) on VMCS switch

Reset the per-vCPU available and dirty register masks when switching
between vmcs01 and vmcs02, as the masks track state relative to the
current VMCS.  The stale masks don't cause problems in the current code
base because the registers are either unconditionally written on nested
transitions or, in the case of segment registers, have an additional
tracker that is manually reset.

Note, by dropping (previously implicitly, now explicitly) the dirty mask
when switching the active VMCS, KVM is technically losing writes to the
associated fields.  But, the only regs that can be dirtied (RIP, RSP and
PDPTRs) are unconditionally written on nested transitions, e.g. explicit
writeback is a waste of cycles, and a WARN_ON would be rather pointless.
Signed-off-by: default avatarSean Christopherson <sean.j.christopherson@intel.com>
Message-Id: <20200415203454.8296-3-sean.j.christopherson@intel.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent 9932b49e
...@@ -307,6 +307,7 @@ static void vmx_switch_vmcs(struct kvm_vcpu *vcpu, struct loaded_vmcs *vmcs) ...@@ -307,6 +307,7 @@ static void vmx_switch_vmcs(struct kvm_vcpu *vcpu, struct loaded_vmcs *vmcs)
vmx_sync_vmcs_host_state(vmx, prev); vmx_sync_vmcs_host_state(vmx, prev);
put_cpu(); put_cpu();
vmx_register_cache_reset(vcpu);
vmx_segment_cache_clear(vmx); vmx_segment_cache_clear(vmx);
} }
......
...@@ -6711,12 +6711,7 @@ static void vmx_vcpu_run(struct kvm_vcpu *vcpu) ...@@ -6711,12 +6711,7 @@ static void vmx_vcpu_run(struct kvm_vcpu *vcpu)
loadsegment(es, __USER_DS); loadsegment(es, __USER_DS);
#endif #endif
vcpu->arch.regs_avail = ~((1 << VCPU_REGS_RIP) | (1 << VCPU_REGS_RSP) vmx_register_cache_reset(vcpu);
| (1 << VCPU_EXREG_RFLAGS)
| (1 << VCPU_EXREG_PDPTR)
| (1 << VCPU_EXREG_SEGMENTS)
| (1 << VCPU_EXREG_CR3));
vcpu->arch.regs_dirty = 0;
pt_guest_exit(vmx); pt_guest_exit(vmx);
......
...@@ -8,6 +8,7 @@ ...@@ -8,6 +8,7 @@
#include <asm/intel_pt.h> #include <asm/intel_pt.h>
#include "capabilities.h" #include "capabilities.h"
#include "kvm_cache_regs.h"
#include "ops.h" #include "ops.h"
#include "vmcs.h" #include "vmcs.h"
...@@ -447,6 +448,16 @@ static inline void vmx_segment_cache_clear(struct vcpu_vmx *vmx) ...@@ -447,6 +448,16 @@ static inline void vmx_segment_cache_clear(struct vcpu_vmx *vmx)
vmx->segment_cache.bitmask = 0; vmx->segment_cache.bitmask = 0;
} }
static inline void vmx_register_cache_reset(struct kvm_vcpu *vcpu)
{
vcpu->arch.regs_avail = ~((1 << VCPU_REGS_RIP) | (1 << VCPU_REGS_RSP)
| (1 << VCPU_EXREG_RFLAGS)
| (1 << VCPU_EXREG_PDPTR)
| (1 << VCPU_EXREG_SEGMENTS)
| (1 << VCPU_EXREG_CR3));
vcpu->arch.regs_dirty = 0;
}
static inline u32 vmx_vmentry_ctrl(void) static inline u32 vmx_vmentry_ctrl(void)
{ {
u32 vmentry_ctrl = vmcs_config.vmentry_ctrl; u32 vmentry_ctrl = vmcs_config.vmentry_ctrl;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment