Commit 2183f564 authored by Sean Christopherson's avatar Sean Christopherson Committed by Paolo Bonzini

KVM: VMX: Shadow VMCS primary execution controls

Prepare to shadow all major control fields on a per-VMCS basis, which
allows KVM to avoid VMREADs when switching between vmcs01 and vmcs02,
and more importantly can eliminate costly VMWRITEs to controls when
preparing vmcs02.

Shadowing exec controls also saves a VMREAD when opening virtual
INTR/NMI windows, yay...
Signed-off-by: default avatarSean Christopherson <sean.j.christopherson@intel.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent c5f2c766
...@@ -286,6 +286,7 @@ static void vmx_switch_vmcs(struct kvm_vcpu *vcpu, struct loaded_vmcs *vmcs) ...@@ -286,6 +286,7 @@ static void vmx_switch_vmcs(struct kvm_vcpu *vcpu, struct loaded_vmcs *vmcs)
vm_entry_controls_reset_shadow(vmx); vm_entry_controls_reset_shadow(vmx);
vm_exit_controls_reset_shadow(vmx); vm_exit_controls_reset_shadow(vmx);
pin_controls_reset_shadow(vmx); pin_controls_reset_shadow(vmx);
exec_controls_reset_shadow(vmx);
vmx_segment_cache_clear(vmx); vmx_segment_cache_clear(vmx);
} }
...@@ -2052,7 +2053,7 @@ static void prepare_vmcs02_early(struct vcpu_vmx *vmx, struct vmcs12 *vmcs12) ...@@ -2052,7 +2053,7 @@ static void prepare_vmcs02_early(struct vcpu_vmx *vmx, struct vmcs12 *vmcs12)
*/ */
exec_control &= ~CPU_BASED_USE_IO_BITMAPS; exec_control &= ~CPU_BASED_USE_IO_BITMAPS;
exec_control |= CPU_BASED_UNCOND_IO_EXITING; exec_control |= CPU_BASED_UNCOND_IO_EXITING;
vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, exec_control); exec_controls_init(vmx, exec_control);
/* /*
* SECONDARY EXEC CONTROLS * SECONDARY EXEC CONTROLS
...@@ -2873,8 +2874,7 @@ static void nested_get_vmcs12_pages(struct kvm_vcpu *vcpu) ...@@ -2873,8 +2874,7 @@ static void nested_get_vmcs12_pages(struct kvm_vcpu *vcpu)
* _not_ what the processor does but it's basically the * _not_ what the processor does but it's basically the
* only possibility we have. * only possibility we have.
*/ */
vmcs_clear_bits(CPU_BASED_VM_EXEC_CONTROL, exec_controls_clearbit(vmx, CPU_BASED_TPR_SHADOW);
CPU_BASED_TPR_SHADOW);
} else { } else {
/* /*
* Write an illegal value to VIRTUAL_APIC_PAGE_ADDR to * Write an illegal value to VIRTUAL_APIC_PAGE_ADDR to
...@@ -2896,11 +2896,9 @@ static void nested_get_vmcs12_pages(struct kvm_vcpu *vcpu) ...@@ -2896,11 +2896,9 @@ static void nested_get_vmcs12_pages(struct kvm_vcpu *vcpu)
} }
} }
if (nested_vmx_prepare_msr_bitmap(vcpu, vmcs12)) if (nested_vmx_prepare_msr_bitmap(vcpu, vmcs12))
vmcs_set_bits(CPU_BASED_VM_EXEC_CONTROL, exec_controls_setbit(vmx, CPU_BASED_USE_MSR_BITMAPS);
CPU_BASED_USE_MSR_BITMAPS);
else else
vmcs_clear_bits(CPU_BASED_VM_EXEC_CONTROL, exec_controls_clearbit(vmx, CPU_BASED_USE_MSR_BITMAPS);
CPU_BASED_USE_MSR_BITMAPS);
} }
/* /*
...@@ -2953,7 +2951,7 @@ int nested_vmx_enter_non_root_mode(struct kvm_vcpu *vcpu, bool from_vmentry) ...@@ -2953,7 +2951,7 @@ int nested_vmx_enter_non_root_mode(struct kvm_vcpu *vcpu, bool from_vmentry)
u32 exit_reason = EXIT_REASON_INVALID_STATE; u32 exit_reason = EXIT_REASON_INVALID_STATE;
u32 exit_qual; u32 exit_qual;
evaluate_pending_interrupts = vmcs_read32(CPU_BASED_VM_EXEC_CONTROL) & evaluate_pending_interrupts = exec_controls_get(vmx) &
(CPU_BASED_VIRTUAL_INTR_PENDING | CPU_BASED_VIRTUAL_NMI_PENDING); (CPU_BASED_VIRTUAL_INTR_PENDING | CPU_BASED_VIRTUAL_NMI_PENDING);
if (likely(!evaluate_pending_interrupts) && kvm_vcpu_apicv_active(vcpu)) if (likely(!evaluate_pending_interrupts) && kvm_vcpu_apicv_active(vcpu))
evaluate_pending_interrupts |= vmx_has_apicv_interrupt(vcpu); evaluate_pending_interrupts |= vmx_has_apicv_interrupt(vcpu);
......
...@@ -2796,22 +2796,20 @@ static void ept_update_paging_mode_cr0(unsigned long *hw_cr0, ...@@ -2796,22 +2796,20 @@ static void ept_update_paging_mode_cr0(unsigned long *hw_cr0,
unsigned long cr0, unsigned long cr0,
struct kvm_vcpu *vcpu) struct kvm_vcpu *vcpu)
{ {
struct vcpu_vmx *vmx = to_vmx(vcpu);
if (!test_bit(VCPU_EXREG_CR3, (ulong *)&vcpu->arch.regs_avail)) if (!test_bit(VCPU_EXREG_CR3, (ulong *)&vcpu->arch.regs_avail))
vmx_decache_cr3(vcpu); vmx_decache_cr3(vcpu);
if (!(cr0 & X86_CR0_PG)) { if (!(cr0 & X86_CR0_PG)) {
/* From paging/starting to nonpaging */ /* From paging/starting to nonpaging */
vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, exec_controls_setbit(vmx, CPU_BASED_CR3_LOAD_EXITING |
vmcs_read32(CPU_BASED_VM_EXEC_CONTROL) | CPU_BASED_CR3_STORE_EXITING);
(CPU_BASED_CR3_LOAD_EXITING |
CPU_BASED_CR3_STORE_EXITING));
vcpu->arch.cr0 = cr0; vcpu->arch.cr0 = cr0;
vmx_set_cr4(vcpu, kvm_read_cr4(vcpu)); vmx_set_cr4(vcpu, kvm_read_cr4(vcpu));
} else if (!is_paging(vcpu)) { } else if (!is_paging(vcpu)) {
/* From nonpaging to paging */ /* From nonpaging to paging */
vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, exec_controls_clearbit(vmx, CPU_BASED_CR3_LOAD_EXITING |
vmcs_read32(CPU_BASED_VM_EXEC_CONTROL) & CPU_BASED_CR3_STORE_EXITING);
~(CPU_BASED_CR3_LOAD_EXITING |
CPU_BASED_CR3_STORE_EXITING));
vcpu->arch.cr0 = cr0; vcpu->arch.cr0 = cr0;
vmx_set_cr4(vcpu, kvm_read_cr4(vcpu)); vmx_set_cr4(vcpu, kvm_read_cr4(vcpu));
} }
...@@ -4045,7 +4043,7 @@ static void vmx_vcpu_setup(struct vcpu_vmx *vmx) ...@@ -4045,7 +4043,7 @@ static void vmx_vcpu_setup(struct vcpu_vmx *vmx)
pin_controls_init(vmx, vmx_pin_based_exec_ctrl(vmx)); pin_controls_init(vmx, vmx_pin_based_exec_ctrl(vmx));
vmx->hv_deadline_tsc = -1; vmx->hv_deadline_tsc = -1;
vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, vmx_exec_control(vmx)); exec_controls_init(vmx, vmx_exec_control(vmx));
if (cpu_has_secondary_exec_ctrls()) { if (cpu_has_secondary_exec_ctrls()) {
vmx_compute_secondary_exec_control(vmx); vmx_compute_secondary_exec_control(vmx);
...@@ -4235,8 +4233,7 @@ static void vmx_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event) ...@@ -4235,8 +4233,7 @@ static void vmx_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event)
static void enable_irq_window(struct kvm_vcpu *vcpu) static void enable_irq_window(struct kvm_vcpu *vcpu)
{ {
vmcs_set_bits(CPU_BASED_VM_EXEC_CONTROL, exec_controls_setbit(to_vmx(vcpu), CPU_BASED_VIRTUAL_INTR_PENDING);
CPU_BASED_VIRTUAL_INTR_PENDING);
} }
static void enable_nmi_window(struct kvm_vcpu *vcpu) static void enable_nmi_window(struct kvm_vcpu *vcpu)
...@@ -4247,8 +4244,7 @@ static void enable_nmi_window(struct kvm_vcpu *vcpu) ...@@ -4247,8 +4244,7 @@ static void enable_nmi_window(struct kvm_vcpu *vcpu)
return; return;
} }
vmcs_set_bits(CPU_BASED_VM_EXEC_CONTROL, exec_controls_setbit(to_vmx(vcpu), CPU_BASED_VIRTUAL_NMI_PENDING);
CPU_BASED_VIRTUAL_NMI_PENDING);
} }
static void vmx_inject_irq(struct kvm_vcpu *vcpu) static void vmx_inject_irq(struct kvm_vcpu *vcpu)
...@@ -4795,8 +4791,7 @@ static int handle_dr(struct kvm_vcpu *vcpu) ...@@ -4795,8 +4791,7 @@ static int handle_dr(struct kvm_vcpu *vcpu)
} }
if (vcpu->guest_debug == 0) { if (vcpu->guest_debug == 0) {
vmcs_clear_bits(CPU_BASED_VM_EXEC_CONTROL, exec_controls_clearbit(to_vmx(vcpu), CPU_BASED_MOV_DR_EXITING);
CPU_BASED_MOV_DR_EXITING);
/* /*
* No more DR vmexits; force a reload of the debug registers * No more DR vmexits; force a reload of the debug registers
...@@ -4840,7 +4835,7 @@ static void vmx_sync_dirty_debug_regs(struct kvm_vcpu *vcpu) ...@@ -4840,7 +4835,7 @@ static void vmx_sync_dirty_debug_regs(struct kvm_vcpu *vcpu)
vcpu->arch.dr7 = vmcs_readl(GUEST_DR7); vcpu->arch.dr7 = vmcs_readl(GUEST_DR7);
vcpu->arch.switch_db_regs &= ~KVM_DEBUGREG_WONT_EXIT; vcpu->arch.switch_db_regs &= ~KVM_DEBUGREG_WONT_EXIT;
vmcs_set_bits(CPU_BASED_VM_EXEC_CONTROL, CPU_BASED_MOV_DR_EXITING); exec_controls_setbit(to_vmx(vcpu), CPU_BASED_MOV_DR_EXITING);
} }
static void vmx_set_dr7(struct kvm_vcpu *vcpu, unsigned long val) static void vmx_set_dr7(struct kvm_vcpu *vcpu, unsigned long val)
...@@ -4900,8 +4895,7 @@ static int handle_tpr_below_threshold(struct kvm_vcpu *vcpu) ...@@ -4900,8 +4895,7 @@ static int handle_tpr_below_threshold(struct kvm_vcpu *vcpu)
static int handle_interrupt_window(struct kvm_vcpu *vcpu) static int handle_interrupt_window(struct kvm_vcpu *vcpu)
{ {
vmcs_clear_bits(CPU_BASED_VM_EXEC_CONTROL, exec_controls_clearbit(to_vmx(vcpu), CPU_BASED_VIRTUAL_INTR_PENDING);
CPU_BASED_VIRTUAL_INTR_PENDING);
kvm_make_request(KVM_REQ_EVENT, vcpu); kvm_make_request(KVM_REQ_EVENT, vcpu);
...@@ -5155,8 +5149,7 @@ static int handle_ept_misconfig(struct kvm_vcpu *vcpu) ...@@ -5155,8 +5149,7 @@ static int handle_ept_misconfig(struct kvm_vcpu *vcpu)
static int handle_nmi_window(struct kvm_vcpu *vcpu) static int handle_nmi_window(struct kvm_vcpu *vcpu)
{ {
WARN_ON_ONCE(!enable_vnmi); WARN_ON_ONCE(!enable_vnmi);
vmcs_clear_bits(CPU_BASED_VM_EXEC_CONTROL, exec_controls_clearbit(to_vmx(vcpu), CPU_BASED_VIRTUAL_NMI_PENDING);
CPU_BASED_VIRTUAL_NMI_PENDING);
++vcpu->stat.nmi_window_exits; ++vcpu->stat.nmi_window_exits;
kvm_make_request(KVM_REQ_EVENT, vcpu); kvm_make_request(KVM_REQ_EVENT, vcpu);
...@@ -5168,7 +5161,6 @@ static int handle_invalid_guest_state(struct kvm_vcpu *vcpu) ...@@ -5168,7 +5161,6 @@ static int handle_invalid_guest_state(struct kvm_vcpu *vcpu)
struct vcpu_vmx *vmx = to_vmx(vcpu); struct vcpu_vmx *vmx = to_vmx(vcpu);
enum emulation_result err = EMULATE_DONE; enum emulation_result err = EMULATE_DONE;
int ret = 1; int ret = 1;
u32 cpu_exec_ctrl;
bool intr_window_requested; bool intr_window_requested;
unsigned count = 130; unsigned count = 130;
...@@ -5179,8 +5171,8 @@ static int handle_invalid_guest_state(struct kvm_vcpu *vcpu) ...@@ -5179,8 +5171,8 @@ static int handle_invalid_guest_state(struct kvm_vcpu *vcpu)
*/ */
WARN_ON_ONCE(vmx->emulation_required && vmx->nested.nested_run_pending); WARN_ON_ONCE(vmx->emulation_required && vmx->nested.nested_run_pending);
cpu_exec_ctrl = vmcs_read32(CPU_BASED_VM_EXEC_CONTROL); intr_window_requested = exec_controls_get(vmx) &
intr_window_requested = cpu_exec_ctrl & CPU_BASED_VIRTUAL_INTR_PENDING; CPU_BASED_VIRTUAL_INTR_PENDING;
while (vmx->emulation_required && count-- != 0) { while (vmx->emulation_required && count-- != 0) {
if (intr_window_requested && vmx_interrupt_allowed(vcpu)) if (intr_window_requested && vmx_interrupt_allowed(vcpu))
......
...@@ -89,6 +89,7 @@ struct vmx_controls_shadow { ...@@ -89,6 +89,7 @@ struct vmx_controls_shadow {
u32 vm_entry; u32 vm_entry;
u32 vm_exit; u32 vm_exit;
u32 pin; u32 pin;
u32 exec;
}; };
/* /*
...@@ -425,6 +426,7 @@ static inline void lname##_controls_clearbit(struct vcpu_vmx *vmx, u32 val) \ ...@@ -425,6 +426,7 @@ static inline void lname##_controls_clearbit(struct vcpu_vmx *vmx, u32 val) \
BUILD_CONTROLS_SHADOW(vm_entry, VM_ENTRY_CONTROLS) BUILD_CONTROLS_SHADOW(vm_entry, VM_ENTRY_CONTROLS)
BUILD_CONTROLS_SHADOW(vm_exit, VM_EXIT_CONTROLS) BUILD_CONTROLS_SHADOW(vm_exit, VM_EXIT_CONTROLS)
BUILD_CONTROLS_SHADOW(pin, PIN_BASED_VM_EXEC_CONTROL) BUILD_CONTROLS_SHADOW(pin, PIN_BASED_VM_EXEC_CONTROL)
BUILD_CONTROLS_SHADOW(exec, CPU_BASED_VM_EXEC_CONTROL)
static inline void vmx_segment_cache_clear(struct vcpu_vmx *vmx) static inline void vmx_segment_cache_clear(struct vcpu_vmx *vmx)
{ {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment