Commit 5addc235 authored by Sean Christopherson's avatar Sean Christopherson Committed by Paolo Bonzini

KVM: VMX: Cache vmcs.EXIT_QUALIFICATION using arch avail_reg flags

Introduce a new "extended register" type, EXIT_INFO_1 (to pair with the
nomenclature in .get_exit_info()), and use it to cache VMX's
vmcs.EXIT_QUALIFICATION.
Signed-off-by: default avatarSean Christopherson <sean.j.christopherson@intel.com>
Message-Id: <20200415203454.8296-5-sean.j.christopherson@intel.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent ec0241f3
...@@ -170,6 +170,7 @@ enum kvm_reg { ...@@ -170,6 +170,7 @@ enum kvm_reg {
VCPU_EXREG_CR3, VCPU_EXREG_CR3,
VCPU_EXREG_RFLAGS, VCPU_EXREG_RFLAGS,
VCPU_EXREG_SEGMENTS, VCPU_EXREG_SEGMENTS,
VCPU_EXREG_EXIT_INFO_1,
}; };
enum { enum {
......
...@@ -4604,7 +4604,7 @@ static int nested_vmx_get_vmptr(struct kvm_vcpu *vcpu, gpa_t *vmpointer) ...@@ -4604,7 +4604,7 @@ static int nested_vmx_get_vmptr(struct kvm_vcpu *vcpu, gpa_t *vmpointer)
gva_t gva; gva_t gva;
struct x86_exception e; struct x86_exception e;
if (get_vmx_mem_address(vcpu, vmcs_readl(EXIT_QUALIFICATION), if (get_vmx_mem_address(vcpu, vmx_get_exit_qual(vcpu),
vmcs_read32(VMX_INSTRUCTION_INFO), false, vmcs_read32(VMX_INSTRUCTION_INFO), false,
sizeof(*vmpointer), &gva)) sizeof(*vmpointer), &gva))
return 1; return 1;
...@@ -4869,7 +4869,7 @@ static int handle_vmread(struct kvm_vcpu *vcpu) ...@@ -4869,7 +4869,7 @@ static int handle_vmread(struct kvm_vcpu *vcpu)
{ {
struct vmcs12 *vmcs12 = is_guest_mode(vcpu) ? get_shadow_vmcs12(vcpu) struct vmcs12 *vmcs12 = is_guest_mode(vcpu) ? get_shadow_vmcs12(vcpu)
: get_vmcs12(vcpu); : get_vmcs12(vcpu);
unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION); unsigned long exit_qualification = vmx_get_exit_qual(vcpu);
u32 instr_info = vmcs_read32(VMX_INSTRUCTION_INFO); u32 instr_info = vmcs_read32(VMX_INSTRUCTION_INFO);
struct vcpu_vmx *vmx = to_vmx(vcpu); struct vcpu_vmx *vmx = to_vmx(vcpu);
struct x86_exception e; struct x86_exception e;
...@@ -4955,7 +4955,7 @@ static int handle_vmwrite(struct kvm_vcpu *vcpu) ...@@ -4955,7 +4955,7 @@ static int handle_vmwrite(struct kvm_vcpu *vcpu)
{ {
struct vmcs12 *vmcs12 = is_guest_mode(vcpu) ? get_shadow_vmcs12(vcpu) struct vmcs12 *vmcs12 = is_guest_mode(vcpu) ? get_shadow_vmcs12(vcpu)
: get_vmcs12(vcpu); : get_vmcs12(vcpu);
unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION); unsigned long exit_qualification = vmx_get_exit_qual(vcpu);
u32 instr_info = vmcs_read32(VMX_INSTRUCTION_INFO); u32 instr_info = vmcs_read32(VMX_INSTRUCTION_INFO);
struct vcpu_vmx *vmx = to_vmx(vcpu); struct vcpu_vmx *vmx = to_vmx(vcpu);
struct x86_exception e; struct x86_exception e;
...@@ -5140,7 +5140,7 @@ static int handle_vmptrld(struct kvm_vcpu *vcpu) ...@@ -5140,7 +5140,7 @@ static int handle_vmptrld(struct kvm_vcpu *vcpu)
/* Emulate the VMPTRST instruction */ /* Emulate the VMPTRST instruction */
static int handle_vmptrst(struct kvm_vcpu *vcpu) static int handle_vmptrst(struct kvm_vcpu *vcpu)
{ {
unsigned long exit_qual = vmcs_readl(EXIT_QUALIFICATION); unsigned long exit_qual = vmx_get_exit_qual(vcpu);
u32 instr_info = vmcs_read32(VMX_INSTRUCTION_INFO); u32 instr_info = vmcs_read32(VMX_INSTRUCTION_INFO);
gpa_t current_vmptr = to_vmx(vcpu)->nested.current_vmptr; gpa_t current_vmptr = to_vmx(vcpu)->nested.current_vmptr;
struct x86_exception e; struct x86_exception e;
...@@ -5208,7 +5208,7 @@ static int handle_invept(struct kvm_vcpu *vcpu) ...@@ -5208,7 +5208,7 @@ static int handle_invept(struct kvm_vcpu *vcpu)
/* According to the Intel VMX instruction reference, the memory /* According to the Intel VMX instruction reference, the memory
* operand is read even if it isn't needed (e.g., for type==global) * operand is read even if it isn't needed (e.g., for type==global)
*/ */
if (get_vmx_mem_address(vcpu, vmcs_readl(EXIT_QUALIFICATION), if (get_vmx_mem_address(vcpu, vmx_get_exit_qual(vcpu),
vmx_instruction_info, false, sizeof(operand), &gva)) vmx_instruction_info, false, sizeof(operand), &gva))
return 1; return 1;
if (kvm_read_guest_virt(vcpu, gva, &operand, sizeof(operand), &e)) { if (kvm_read_guest_virt(vcpu, gva, &operand, sizeof(operand), &e)) {
...@@ -5290,7 +5290,7 @@ static int handle_invvpid(struct kvm_vcpu *vcpu) ...@@ -5290,7 +5290,7 @@ static int handle_invvpid(struct kvm_vcpu *vcpu)
/* according to the intel vmx instruction reference, the memory /* according to the intel vmx instruction reference, the memory
* operand is read even if it isn't needed (e.g., for type==global) * operand is read even if it isn't needed (e.g., for type==global)
*/ */
if (get_vmx_mem_address(vcpu, vmcs_readl(EXIT_QUALIFICATION), if (get_vmx_mem_address(vcpu, vmx_get_exit_qual(vcpu),
vmx_instruction_info, false, sizeof(operand), &gva)) vmx_instruction_info, false, sizeof(operand), &gva))
return 1; return 1;
if (kvm_read_guest_virt(vcpu, gva, &operand, sizeof(operand), &e)) { if (kvm_read_guest_virt(vcpu, gva, &operand, sizeof(operand), &e)) {
...@@ -5420,7 +5420,7 @@ static int handle_vmfunc(struct kvm_vcpu *vcpu) ...@@ -5420,7 +5420,7 @@ static int handle_vmfunc(struct kvm_vcpu *vcpu)
fail: fail:
nested_vmx_vmexit(vcpu, vmx->exit_reason, nested_vmx_vmexit(vcpu, vmx->exit_reason,
vmcs_read32(VM_EXIT_INTR_INFO), vmcs_read32(VM_EXIT_INTR_INFO),
vmcs_readl(EXIT_QUALIFICATION)); vmx_get_exit_qual(vcpu));
return 1; return 1;
} }
...@@ -5471,7 +5471,7 @@ static bool nested_vmx_exit_handled_io(struct kvm_vcpu *vcpu, ...@@ -5471,7 +5471,7 @@ static bool nested_vmx_exit_handled_io(struct kvm_vcpu *vcpu,
if (!nested_cpu_has(vmcs12, CPU_BASED_USE_IO_BITMAPS)) if (!nested_cpu_has(vmcs12, CPU_BASED_USE_IO_BITMAPS))
return nested_cpu_has(vmcs12, CPU_BASED_UNCOND_IO_EXITING); return nested_cpu_has(vmcs12, CPU_BASED_UNCOND_IO_EXITING);
exit_qualification = vmcs_readl(EXIT_QUALIFICATION); exit_qualification = vmx_get_exit_qual(vcpu);
port = exit_qualification >> 16; port = exit_qualification >> 16;
size = (exit_qualification & 7) + 1; size = (exit_qualification & 7) + 1;
...@@ -5525,7 +5525,7 @@ static bool nested_vmx_exit_handled_msr(struct kvm_vcpu *vcpu, ...@@ -5525,7 +5525,7 @@ static bool nested_vmx_exit_handled_msr(struct kvm_vcpu *vcpu,
static bool nested_vmx_exit_handled_cr(struct kvm_vcpu *vcpu, static bool nested_vmx_exit_handled_cr(struct kvm_vcpu *vcpu,
struct vmcs12 *vmcs12) struct vmcs12 *vmcs12)
{ {
unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION); unsigned long exit_qualification = vmx_get_exit_qual(vcpu);
int cr = exit_qualification & 15; int cr = exit_qualification & 15;
int reg; int reg;
unsigned long val; unsigned long val;
...@@ -5849,7 +5849,7 @@ bool nested_vmx_reflect_vmexit(struct kvm_vcpu *vcpu) ...@@ -5849,7 +5849,7 @@ bool nested_vmx_reflect_vmexit(struct kvm_vcpu *vcpu)
} }
exit_intr_info = vmcs_read32(VM_EXIT_INTR_INFO); exit_intr_info = vmcs_read32(VM_EXIT_INTR_INFO);
exit_qual = vmcs_readl(EXIT_QUALIFICATION); exit_qual = vmx_get_exit_qual(vcpu);
trace_kvm_nested_vmexit(kvm_rip_read(vcpu), exit_reason, exit_qual, trace_kvm_nested_vmexit(kvm_rip_read(vcpu), exit_reason, exit_qual,
vmx->idt_vectoring_info, exit_intr_info, vmx->idt_vectoring_info, exit_intr_info,
......
...@@ -4698,7 +4698,7 @@ static int handle_exception_nmi(struct kvm_vcpu *vcpu) ...@@ -4698,7 +4698,7 @@ static int handle_exception_nmi(struct kvm_vcpu *vcpu)
} }
if (is_page_fault(intr_info)) { if (is_page_fault(intr_info)) {
cr2 = vmcs_readl(EXIT_QUALIFICATION); cr2 = vmx_get_exit_qual(vcpu);
/* EPT won't cause page fault directly */ /* EPT won't cause page fault directly */
WARN_ON_ONCE(!vcpu->arch.apf.host_apf_reason && enable_ept); WARN_ON_ONCE(!vcpu->arch.apf.host_apf_reason && enable_ept);
return kvm_handle_page_fault(vcpu, error_code, cr2, NULL, 0); return kvm_handle_page_fault(vcpu, error_code, cr2, NULL, 0);
...@@ -4714,7 +4714,7 @@ static int handle_exception_nmi(struct kvm_vcpu *vcpu) ...@@ -4714,7 +4714,7 @@ static int handle_exception_nmi(struct kvm_vcpu *vcpu)
kvm_queue_exception_e(vcpu, AC_VECTOR, error_code); kvm_queue_exception_e(vcpu, AC_VECTOR, error_code);
return 1; return 1;
case DB_VECTOR: case DB_VECTOR:
dr6 = vmcs_readl(EXIT_QUALIFICATION); dr6 = vmx_get_exit_qual(vcpu);
if (!(vcpu->guest_debug & if (!(vcpu->guest_debug &
(KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP))) { (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP))) {
vcpu->arch.dr6 &= ~DR_TRAP_BITS; vcpu->arch.dr6 &= ~DR_TRAP_BITS;
...@@ -4769,7 +4769,7 @@ static int handle_io(struct kvm_vcpu *vcpu) ...@@ -4769,7 +4769,7 @@ static int handle_io(struct kvm_vcpu *vcpu)
int size, in, string; int size, in, string;
unsigned port; unsigned port;
exit_qualification = vmcs_readl(EXIT_QUALIFICATION); exit_qualification = vmx_get_exit_qual(vcpu);
string = (exit_qualification & 16) != 0; string = (exit_qualification & 16) != 0;
++vcpu->stat.io_exits; ++vcpu->stat.io_exits;
...@@ -4860,7 +4860,7 @@ static int handle_cr(struct kvm_vcpu *vcpu) ...@@ -4860,7 +4860,7 @@ static int handle_cr(struct kvm_vcpu *vcpu)
int err; int err;
int ret; int ret;
exit_qualification = vmcs_readl(EXIT_QUALIFICATION); exit_qualification = vmx_get_exit_qual(vcpu);
cr = exit_qualification & 15; cr = exit_qualification & 15;
reg = (exit_qualification >> 8) & 15; reg = (exit_qualification >> 8) & 15;
switch ((exit_qualification >> 4) & 3) { switch ((exit_qualification >> 4) & 3) {
...@@ -4937,7 +4937,7 @@ static int handle_dr(struct kvm_vcpu *vcpu) ...@@ -4937,7 +4937,7 @@ static int handle_dr(struct kvm_vcpu *vcpu)
unsigned long exit_qualification; unsigned long exit_qualification;
int dr, dr7, reg; int dr, dr7, reg;
exit_qualification = vmcs_readl(EXIT_QUALIFICATION); exit_qualification = vmx_get_exit_qual(vcpu);
dr = exit_qualification & DEBUG_REG_ACCESS_NUM; dr = exit_qualification & DEBUG_REG_ACCESS_NUM;
/* First, if DR does not exist, trigger UD */ /* First, if DR does not exist, trigger UD */
...@@ -5050,7 +5050,7 @@ static int handle_invd(struct kvm_vcpu *vcpu) ...@@ -5050,7 +5050,7 @@ static int handle_invd(struct kvm_vcpu *vcpu)
static int handle_invlpg(struct kvm_vcpu *vcpu) static int handle_invlpg(struct kvm_vcpu *vcpu)
{ {
unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION); unsigned long exit_qualification = vmx_get_exit_qual(vcpu);
kvm_mmu_invlpg(vcpu, exit_qualification); kvm_mmu_invlpg(vcpu, exit_qualification);
return kvm_skip_emulated_instruction(vcpu); return kvm_skip_emulated_instruction(vcpu);
...@@ -5082,7 +5082,7 @@ static int handle_xsetbv(struct kvm_vcpu *vcpu) ...@@ -5082,7 +5082,7 @@ static int handle_xsetbv(struct kvm_vcpu *vcpu)
static int handle_apic_access(struct kvm_vcpu *vcpu) static int handle_apic_access(struct kvm_vcpu *vcpu)
{ {
if (likely(fasteoi)) { if (likely(fasteoi)) {
unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION); unsigned long exit_qualification = vmx_get_exit_qual(vcpu);
int access_type, offset; int access_type, offset;
access_type = exit_qualification & APIC_ACCESS_TYPE; access_type = exit_qualification & APIC_ACCESS_TYPE;
...@@ -5103,7 +5103,7 @@ static int handle_apic_access(struct kvm_vcpu *vcpu) ...@@ -5103,7 +5103,7 @@ static int handle_apic_access(struct kvm_vcpu *vcpu)
static int handle_apic_eoi_induced(struct kvm_vcpu *vcpu) static int handle_apic_eoi_induced(struct kvm_vcpu *vcpu)
{ {
unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION); unsigned long exit_qualification = vmx_get_exit_qual(vcpu);
int vector = exit_qualification & 0xff; int vector = exit_qualification & 0xff;
/* EOI-induced VM exit is trap-like and thus no need to adjust IP */ /* EOI-induced VM exit is trap-like and thus no need to adjust IP */
...@@ -5113,7 +5113,7 @@ static int handle_apic_eoi_induced(struct kvm_vcpu *vcpu) ...@@ -5113,7 +5113,7 @@ static int handle_apic_eoi_induced(struct kvm_vcpu *vcpu)
static int handle_apic_write(struct kvm_vcpu *vcpu) static int handle_apic_write(struct kvm_vcpu *vcpu)
{ {
unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION); unsigned long exit_qualification = vmx_get_exit_qual(vcpu);
u32 offset = exit_qualification & 0xfff; u32 offset = exit_qualification & 0xfff;
/* APIC-write VM exit is trap-like and thus no need to adjust IP */ /* APIC-write VM exit is trap-like and thus no need to adjust IP */
...@@ -5134,7 +5134,7 @@ static int handle_task_switch(struct kvm_vcpu *vcpu) ...@@ -5134,7 +5134,7 @@ static int handle_task_switch(struct kvm_vcpu *vcpu)
idt_index = (vmx->idt_vectoring_info & VECTORING_INFO_VECTOR_MASK); idt_index = (vmx->idt_vectoring_info & VECTORING_INFO_VECTOR_MASK);
type = (vmx->idt_vectoring_info & VECTORING_INFO_TYPE_MASK); type = (vmx->idt_vectoring_info & VECTORING_INFO_TYPE_MASK);
exit_qualification = vmcs_readl(EXIT_QUALIFICATION); exit_qualification = vmx_get_exit_qual(vcpu);
reason = (u32)exit_qualification >> 30; reason = (u32)exit_qualification >> 30;
if (reason == TASK_SWITCH_GATE && idt_v) { if (reason == TASK_SWITCH_GATE && idt_v) {
...@@ -5184,7 +5184,7 @@ static int handle_ept_violation(struct kvm_vcpu *vcpu) ...@@ -5184,7 +5184,7 @@ static int handle_ept_violation(struct kvm_vcpu *vcpu)
gpa_t gpa; gpa_t gpa;
u64 error_code; u64 error_code;
exit_qualification = vmcs_readl(EXIT_QUALIFICATION); exit_qualification = vmx_get_exit_qual(vcpu);
/* /*
* EPT violation happened while executing iret from NMI, * EPT violation happened while executing iret from NMI,
...@@ -5444,7 +5444,7 @@ static int handle_invpcid(struct kvm_vcpu *vcpu) ...@@ -5444,7 +5444,7 @@ static int handle_invpcid(struct kvm_vcpu *vcpu)
/* According to the Intel instruction reference, the memory operand /* According to the Intel instruction reference, the memory operand
* is read even if it isn't needed (e.g., for type==all) * is read even if it isn't needed (e.g., for type==all)
*/ */
if (get_vmx_mem_address(vcpu, vmcs_readl(EXIT_QUALIFICATION), if (get_vmx_mem_address(vcpu, vmx_get_exit_qual(vcpu),
vmx_instruction_info, false, vmx_instruction_info, false,
sizeof(operand), &gva)) sizeof(operand), &gva))
return 1; return 1;
...@@ -5520,7 +5520,7 @@ static int handle_pml_full(struct kvm_vcpu *vcpu) ...@@ -5520,7 +5520,7 @@ static int handle_pml_full(struct kvm_vcpu *vcpu)
trace_kvm_pml_full(vcpu->vcpu_id); trace_kvm_pml_full(vcpu->vcpu_id);
exit_qualification = vmcs_readl(EXIT_QUALIFICATION); exit_qualification = vmx_get_exit_qual(vcpu);
/* /*
* PML buffer FULL happened while executing iret from NMI, * PML buffer FULL happened while executing iret from NMI,
...@@ -5634,7 +5634,7 @@ static const int kvm_vmx_max_exit_handlers = ...@@ -5634,7 +5634,7 @@ static const int kvm_vmx_max_exit_handlers =
static void vmx_get_exit_info(struct kvm_vcpu *vcpu, u64 *info1, u64 *info2) static void vmx_get_exit_info(struct kvm_vcpu *vcpu, u64 *info1, u64 *info2)
{ {
*info1 = vmcs_readl(EXIT_QUALIFICATION); *info1 = vmx_get_exit_qual(vcpu);
*info2 = vmcs_read32(VM_EXIT_INTR_INFO); *info2 = vmcs_read32(VM_EXIT_INTR_INFO);
} }
......
...@@ -210,6 +210,7 @@ struct vcpu_vmx { ...@@ -210,6 +210,7 @@ struct vcpu_vmx {
*/ */
bool guest_state_loaded; bool guest_state_loaded;
unsigned long exit_qualification;
u32 exit_intr_info; u32 exit_intr_info;
u32 idt_vectoring_info; u32 idt_vectoring_info;
ulong rflags; ulong rflags;
...@@ -449,7 +450,8 @@ static inline void vmx_register_cache_reset(struct kvm_vcpu *vcpu) ...@@ -449,7 +450,8 @@ static inline void vmx_register_cache_reset(struct kvm_vcpu *vcpu)
| (1 << VCPU_EXREG_RFLAGS) | (1 << VCPU_EXREG_RFLAGS)
| (1 << VCPU_EXREG_PDPTR) | (1 << VCPU_EXREG_PDPTR)
| (1 << VCPU_EXREG_SEGMENTS) | (1 << VCPU_EXREG_SEGMENTS)
| (1 << VCPU_EXREG_CR3)); | (1 << VCPU_EXREG_CR3)
| (1 << VCPU_EXREG_EXIT_INFO_1));
vcpu->arch.regs_dirty = 0; vcpu->arch.regs_dirty = 0;
} }
...@@ -493,6 +495,17 @@ static inline struct pi_desc *vcpu_to_pi_desc(struct kvm_vcpu *vcpu) ...@@ -493,6 +495,17 @@ static inline struct pi_desc *vcpu_to_pi_desc(struct kvm_vcpu *vcpu)
return &(to_vmx(vcpu)->pi_desc); return &(to_vmx(vcpu)->pi_desc);
} }
static inline unsigned long vmx_get_exit_qual(struct kvm_vcpu *vcpu)
{
struct vcpu_vmx *vmx = to_vmx(vcpu);
if (!kvm_register_is_available(vcpu, VCPU_EXREG_EXIT_INFO_1)) {
kvm_register_mark_available(vcpu, VCPU_EXREG_EXIT_INFO_1);
vmx->exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
}
return vmx->exit_qualification;
}
struct vmcs *alloc_vmcs_cpu(bool shadow, int cpu, gfp_t flags); struct vmcs *alloc_vmcs_cpu(bool shadow, int cpu, gfp_t flags);
void free_vmcs(struct vmcs *vmcs); void free_vmcs(struct vmcs *vmcs);
int alloc_loaded_vmcs(struct loaded_vmcs *loaded_vmcs); int alloc_loaded_vmcs(struct loaded_vmcs *loaded_vmcs);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment