Commit 41e68b69 authored by Paolo Bonzini's avatar Paolo Bonzini

KVM: vmx, svm: clean up mass updates to regs_avail/regs_dirty bits

Document the meaning of the three combinations of regs_avail and
regs_dirty.  Update regs_dirty just after writeback instead of
doing it later after vmexit.  After vmexit, instead, we clear the
regs_avail bits corresponding to lazily-loaded registers.
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent c62c7bd4
...@@ -43,6 +43,13 @@ BUILD_KVM_GPR_ACCESSORS(r14, R14) ...@@ -43,6 +43,13 @@ BUILD_KVM_GPR_ACCESSORS(r14, R14)
BUILD_KVM_GPR_ACCESSORS(r15, R15) BUILD_KVM_GPR_ACCESSORS(r15, R15)
#endif #endif
/*
* avail dirty
* 0 0 register in VMCS/VMCB
* 0 1 *INVALID*
* 1 0 register in vcpu->arch
* 1 1 register in vcpu->arch, needs to be stored back
*/
static inline bool kvm_register_is_available(struct kvm_vcpu *vcpu, static inline bool kvm_register_is_available(struct kvm_vcpu *vcpu,
enum kvm_reg reg) enum kvm_reg reg)
{ {
......
...@@ -3946,6 +3946,7 @@ static __no_kcsan fastpath_t svm_vcpu_run(struct kvm_vcpu *vcpu) ...@@ -3946,6 +3946,7 @@ static __no_kcsan fastpath_t svm_vcpu_run(struct kvm_vcpu *vcpu)
vcpu->arch.regs[VCPU_REGS_RSP] = svm->vmcb->save.rsp; vcpu->arch.regs[VCPU_REGS_RSP] = svm->vmcb->save.rsp;
vcpu->arch.regs[VCPU_REGS_RIP] = svm->vmcb->save.rip; vcpu->arch.regs[VCPU_REGS_RIP] = svm->vmcb->save.rip;
} }
vcpu->arch.regs_dirty = 0;
if (unlikely(svm->vmcb->control.exit_code == SVM_EXIT_NMI)) if (unlikely(svm->vmcb->control.exit_code == SVM_EXIT_NMI))
kvm_before_interrupt(vcpu); kvm_before_interrupt(vcpu);
...@@ -3980,7 +3981,7 @@ static __no_kcsan fastpath_t svm_vcpu_run(struct kvm_vcpu *vcpu) ...@@ -3980,7 +3981,7 @@ static __no_kcsan fastpath_t svm_vcpu_run(struct kvm_vcpu *vcpu)
vcpu->arch.apf.host_apf_flags = vcpu->arch.apf.host_apf_flags =
kvm_read_and_reset_apf_flags(); kvm_read_and_reset_apf_flags();
kvm_register_clear_available(vcpu, VCPU_EXREG_PDPTR); vcpu->arch.regs_avail &= ~SVM_REGS_LAZY_LOAD_SET;
/* /*
* We need to handle MC intercepts here before the vcpu has a chance to * We need to handle MC intercepts here before the vcpu has a chance to
......
...@@ -326,6 +326,16 @@ static __always_inline struct vcpu_svm *to_svm(struct kvm_vcpu *vcpu) ...@@ -326,6 +326,16 @@ static __always_inline struct vcpu_svm *to_svm(struct kvm_vcpu *vcpu)
return container_of(vcpu, struct vcpu_svm, vcpu); return container_of(vcpu, struct vcpu_svm, vcpu);
} }
/*
* Only the PDPTRs are loaded on demand into the shadow MMU. All other
* fields are synchronized in handle_exit, because accessing the VMCB is cheap.
*
* CR3 might be out of date in the VMCB but it is not marked dirty; instead,
* KVM_REQ_LOAD_MMU_PGD is always requested when the cached vcpu->arch.cr3
* is changed. svm_load_mmu_pgd() then syncs the new CR3 value into the VMCB.
*/
#define SVM_REGS_LAZY_LOAD_SET (1 << VCPU_EXREG_PDPTR)
static inline void vmcb_set_intercept(struct vmcb_control_area *control, u32 bit) static inline void vmcb_set_intercept(struct vmcb_control_area *control, u32 bit)
{ {
WARN_ON_ONCE(bit >= 32 * MAX_INTERCEPT); WARN_ON_ONCE(bit >= 32 * MAX_INTERCEPT);
......
...@@ -269,7 +269,13 @@ static void vmx_switch_vmcs(struct kvm_vcpu *vcpu, struct loaded_vmcs *vmcs) ...@@ -269,7 +269,13 @@ static void vmx_switch_vmcs(struct kvm_vcpu *vcpu, struct loaded_vmcs *vmcs)
vmx_sync_vmcs_host_state(vmx, prev); vmx_sync_vmcs_host_state(vmx, prev);
put_cpu(); put_cpu();
vmx_register_cache_reset(vcpu); vcpu->arch.regs_avail = ~VMX_REGS_LAZY_LOAD_SET;
/*
* All lazily updated registers will be reloaded from VMCS12 on both
* vmentry and vmexit.
*/
vcpu->arch.regs_dirty = 0;
} }
/* /*
......
...@@ -6649,6 +6649,7 @@ static fastpath_t vmx_vcpu_run(struct kvm_vcpu *vcpu) ...@@ -6649,6 +6649,7 @@ static fastpath_t vmx_vcpu_run(struct kvm_vcpu *vcpu)
vmcs_writel(GUEST_RSP, vcpu->arch.regs[VCPU_REGS_RSP]); vmcs_writel(GUEST_RSP, vcpu->arch.regs[VCPU_REGS_RSP]);
if (kvm_register_is_dirty(vcpu, VCPU_REGS_RIP)) if (kvm_register_is_dirty(vcpu, VCPU_REGS_RIP))
vmcs_writel(GUEST_RIP, vcpu->arch.regs[VCPU_REGS_RIP]); vmcs_writel(GUEST_RIP, vcpu->arch.regs[VCPU_REGS_RIP]);
vcpu->arch.regs_dirty = 0;
cr3 = __get_current_cr3_fast(); cr3 = __get_current_cr3_fast();
if (unlikely(cr3 != vmx->loaded_vmcs->host_state.cr3)) { if (unlikely(cr3 != vmx->loaded_vmcs->host_state.cr3)) {
...@@ -6743,7 +6744,7 @@ static fastpath_t vmx_vcpu_run(struct kvm_vcpu *vcpu) ...@@ -6743,7 +6744,7 @@ static fastpath_t vmx_vcpu_run(struct kvm_vcpu *vcpu)
loadsegment(es, __USER_DS); loadsegment(es, __USER_DS);
#endif #endif
vmx_register_cache_reset(vcpu); vcpu->arch.regs_avail &= ~VMX_REGS_LAZY_LOAD_SET;
pt_guest_exit(vmx); pt_guest_exit(vmx);
......
...@@ -473,19 +473,21 @@ BUILD_CONTROLS_SHADOW(pin, PIN_BASED_VM_EXEC_CONTROL) ...@@ -473,19 +473,21 @@ BUILD_CONTROLS_SHADOW(pin, PIN_BASED_VM_EXEC_CONTROL)
BUILD_CONTROLS_SHADOW(exec, CPU_BASED_VM_EXEC_CONTROL) BUILD_CONTROLS_SHADOW(exec, CPU_BASED_VM_EXEC_CONTROL)
BUILD_CONTROLS_SHADOW(secondary_exec, SECONDARY_VM_EXEC_CONTROL) BUILD_CONTROLS_SHADOW(secondary_exec, SECONDARY_VM_EXEC_CONTROL)
static inline void vmx_register_cache_reset(struct kvm_vcpu *vcpu) /*
{ * VMX_REGS_LAZY_LOAD_SET - The set of registers that will be updated in the
vcpu->arch.regs_avail = ~((1 << VCPU_REGS_RIP) | (1 << VCPU_REGS_RSP) * cache on demand. Other registers not listed here are synced to
| (1 << VCPU_EXREG_RFLAGS) * the cache immediately after VM-Exit.
| (1 << VCPU_EXREG_PDPTR) */
| (1 << VCPU_EXREG_SEGMENTS) #define VMX_REGS_LAZY_LOAD_SET ((1 << VCPU_REGS_RIP) | \
| (1 << VCPU_EXREG_CR0) (1 << VCPU_REGS_RSP) | \
| (1 << VCPU_EXREG_CR3) (1 << VCPU_EXREG_RFLAGS) | \
| (1 << VCPU_EXREG_CR4) (1 << VCPU_EXREG_PDPTR) | \
| (1 << VCPU_EXREG_EXIT_INFO_1) (1 << VCPU_EXREG_SEGMENTS) | \
| (1 << VCPU_EXREG_EXIT_INFO_2)); (1 << VCPU_EXREG_CR0) | \
vcpu->arch.regs_dirty = 0; (1 << VCPU_EXREG_CR3) | \
} (1 << VCPU_EXREG_CR4) | \
(1 << VCPU_EXREG_EXIT_INFO_1) | \
(1 << VCPU_EXREG_EXIT_INFO_2))
static inline struct kvm_vmx *to_kvm_vmx(struct kvm *kvm) static inline struct kvm_vmx *to_kvm_vmx(struct kvm *kvm)
{ {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment