Commit 35754c98 authored by Paolo Bonzini's avatar Paolo Bonzini

KVM: x86: introduce lapic_in_kernel

Avoid pointer chasing and memory barriers, and simplify the code
when split irqchip (LAPIC in kernel, IOAPIC/PIC in userspace)
is introduced.
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent d50ab6c1
...@@ -57,7 +57,7 @@ static int kvm_cpu_has_extint(struct kvm_vcpu *v) ...@@ -57,7 +57,7 @@ static int kvm_cpu_has_extint(struct kvm_vcpu *v)
*/ */
int kvm_cpu_has_injectable_intr(struct kvm_vcpu *v) int kvm_cpu_has_injectable_intr(struct kvm_vcpu *v)
{ {
if (!irqchip_in_kernel(v->kvm)) if (!lapic_in_kernel(v))
return v->arch.interrupt.pending; return v->arch.interrupt.pending;
if (kvm_cpu_has_extint(v)) if (kvm_cpu_has_extint(v))
...@@ -75,7 +75,7 @@ int kvm_cpu_has_injectable_intr(struct kvm_vcpu *v) ...@@ -75,7 +75,7 @@ int kvm_cpu_has_injectable_intr(struct kvm_vcpu *v)
*/ */
int kvm_cpu_has_interrupt(struct kvm_vcpu *v) int kvm_cpu_has_interrupt(struct kvm_vcpu *v)
{ {
if (!irqchip_in_kernel(v->kvm)) if (!lapic_in_kernel(v))
return v->arch.interrupt.pending; return v->arch.interrupt.pending;
if (kvm_cpu_has_extint(v)) if (kvm_cpu_has_extint(v))
...@@ -103,7 +103,7 @@ int kvm_cpu_get_interrupt(struct kvm_vcpu *v) ...@@ -103,7 +103,7 @@ int kvm_cpu_get_interrupt(struct kvm_vcpu *v)
{ {
int vector; int vector;
if (!irqchip_in_kernel(v->kvm)) if (!lapic_in_kernel(v))
return v->arch.interrupt.nr; return v->arch.interrupt.nr;
vector = kvm_cpu_get_extint(v); vector = kvm_cpu_get_extint(v);
......
...@@ -92,6 +92,14 @@ static inline int irqchip_in_kernel(struct kvm *kvm) ...@@ -92,6 +92,14 @@ static inline int irqchip_in_kernel(struct kvm *kvm)
return vpic != NULL; return vpic != NULL;
} }
static inline int lapic_in_kernel(struct kvm_vcpu *vcpu)
{
/* Same as irqchip_in_kernel(vcpu->kvm), but with less
* pointer chasing and no unnecessary memory barriers.
*/
return vcpu->arch.apic != NULL;
}
void kvm_pic_reset(struct kvm_kpic_state *s); void kvm_pic_reset(struct kvm_kpic_state *s);
void kvm_inject_pending_timer_irqs(struct kvm_vcpu *vcpu); void kvm_inject_pending_timer_irqs(struct kvm_vcpu *vcpu);
......
...@@ -1985,7 +1985,7 @@ int kvm_x2apic_msr_write(struct kvm_vcpu *vcpu, u32 msr, u64 data) ...@@ -1985,7 +1985,7 @@ int kvm_x2apic_msr_write(struct kvm_vcpu *vcpu, u32 msr, u64 data)
struct kvm_lapic *apic = vcpu->arch.apic; struct kvm_lapic *apic = vcpu->arch.apic;
u32 reg = (msr - APIC_BASE_MSR) << 4; u32 reg = (msr - APIC_BASE_MSR) << 4;
if (!irqchip_in_kernel(vcpu->kvm) || !apic_x2apic_mode(apic)) if (!lapic_in_kernel(vcpu) || !apic_x2apic_mode(apic))
return 1; return 1;
if (reg == APIC_ICR2) if (reg == APIC_ICR2)
...@@ -2002,7 +2002,7 @@ int kvm_x2apic_msr_read(struct kvm_vcpu *vcpu, u32 msr, u64 *data) ...@@ -2002,7 +2002,7 @@ int kvm_x2apic_msr_read(struct kvm_vcpu *vcpu, u32 msr, u64 *data)
struct kvm_lapic *apic = vcpu->arch.apic; struct kvm_lapic *apic = vcpu->arch.apic;
u32 reg = (msr - APIC_BASE_MSR) << 4, low, high = 0; u32 reg = (msr - APIC_BASE_MSR) << 4, low, high = 0;
if (!irqchip_in_kernel(vcpu->kvm) || !apic_x2apic_mode(apic)) if (!lapic_in_kernel(vcpu) || !apic_x2apic_mode(apic))
return 1; return 1;
if (reg == APIC_DFR || reg == APIC_ICR2) { if (reg == APIC_DFR || reg == APIC_ICR2) {
......
...@@ -3427,7 +3427,7 @@ static int kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu, gva_t gva, gfn_t gfn) ...@@ -3427,7 +3427,7 @@ static int kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu, gva_t gva, gfn_t gfn)
static bool can_do_async_pf(struct kvm_vcpu *vcpu) static bool can_do_async_pf(struct kvm_vcpu *vcpu)
{ {
if (unlikely(!irqchip_in_kernel(vcpu->kvm) || if (unlikely(!lapic_in_kernel(vcpu) ||
kvm_event_needs_reinjection(vcpu))) kvm_event_needs_reinjection(vcpu)))
return false; return false;
......
...@@ -3060,7 +3060,7 @@ static int cr8_write_interception(struct vcpu_svm *svm) ...@@ -3060,7 +3060,7 @@ static int cr8_write_interception(struct vcpu_svm *svm)
u8 cr8_prev = kvm_get_cr8(&svm->vcpu); u8 cr8_prev = kvm_get_cr8(&svm->vcpu);
/* instruction emulation calls kvm_set_cr8() */ /* instruction emulation calls kvm_set_cr8() */
r = cr_interception(svm); r = cr_interception(svm);
if (irqchip_in_kernel(svm->vcpu.kvm)) if (lapic_in_kernel(&svm->vcpu))
return r; return r;
if (cr8_prev <= kvm_get_cr8(&svm->vcpu)) if (cr8_prev <= kvm_get_cr8(&svm->vcpu))
return r; return r;
...@@ -3305,7 +3305,7 @@ static int interrupt_window_interception(struct vcpu_svm *svm) ...@@ -3305,7 +3305,7 @@ static int interrupt_window_interception(struct vcpu_svm *svm)
* If the user space waits to inject interrupts, exit as soon as * If the user space waits to inject interrupts, exit as soon as
* possible * possible
*/ */
if (!irqchip_in_kernel(svm->vcpu.kvm) && if (!lapic_in_kernel(&svm->vcpu) &&
kvm_run->request_interrupt_window && kvm_run->request_interrupt_window &&
!kvm_cpu_has_interrupt(&svm->vcpu)) { !kvm_cpu_has_interrupt(&svm->vcpu)) {
kvm_run->exit_reason = KVM_EXIT_IRQ_WINDOW_OPEN; kvm_run->exit_reason = KVM_EXIT_IRQ_WINDOW_OPEN;
......
...@@ -809,7 +809,6 @@ static void kvm_cpu_vmxon(u64 addr); ...@@ -809,7 +809,6 @@ static void kvm_cpu_vmxon(u64 addr);
static void kvm_cpu_vmxoff(void); static void kvm_cpu_vmxoff(void);
static bool vmx_mpx_supported(void); static bool vmx_mpx_supported(void);
static bool vmx_xsaves_supported(void); static bool vmx_xsaves_supported(void);
static int vmx_vm_has_apicv(struct kvm *kvm);
static int vmx_cpu_uses_apicv(struct kvm_vcpu *vcpu); static int vmx_cpu_uses_apicv(struct kvm_vcpu *vcpu);
static int vmx_set_tss_addr(struct kvm *kvm, unsigned int addr); static int vmx_set_tss_addr(struct kvm *kvm, unsigned int addr);
static void vmx_set_segment(struct kvm_vcpu *vcpu, static void vmx_set_segment(struct kvm_vcpu *vcpu,
...@@ -947,9 +946,9 @@ static inline bool cpu_has_vmx_tpr_shadow(void) ...@@ -947,9 +946,9 @@ static inline bool cpu_has_vmx_tpr_shadow(void)
return vmcs_config.cpu_based_exec_ctrl & CPU_BASED_TPR_SHADOW; return vmcs_config.cpu_based_exec_ctrl & CPU_BASED_TPR_SHADOW;
} }
static inline bool vm_need_tpr_shadow(struct kvm *kvm) static inline bool cpu_need_tpr_shadow(struct kvm_vcpu *vcpu)
{ {
return (cpu_has_vmx_tpr_shadow()) && (irqchip_in_kernel(kvm)); return cpu_has_vmx_tpr_shadow() && lapic_in_kernel(vcpu);
} }
static inline bool cpu_has_secondary_exec_ctrls(void) static inline bool cpu_has_secondary_exec_ctrls(void)
...@@ -1063,9 +1062,9 @@ static inline bool cpu_has_vmx_ple(void) ...@@ -1063,9 +1062,9 @@ static inline bool cpu_has_vmx_ple(void)
SECONDARY_EXEC_PAUSE_LOOP_EXITING; SECONDARY_EXEC_PAUSE_LOOP_EXITING;
} }
static inline bool vm_need_virtualize_apic_accesses(struct kvm *kvm) static inline bool cpu_need_virtualize_apic_accesses(struct kvm_vcpu *vcpu)
{ {
return flexpriority_enabled && irqchip_in_kernel(kvm); return flexpriority_enabled && lapic_in_kernel(vcpu);
} }
static inline bool cpu_has_vmx_vpid(void) static inline bool cpu_has_vmx_vpid(void)
...@@ -2378,7 +2377,7 @@ static void nested_vmx_setup_ctls_msrs(struct vcpu_vmx *vmx) ...@@ -2378,7 +2377,7 @@ static void nested_vmx_setup_ctls_msrs(struct vcpu_vmx *vmx)
vmx->nested.nested_vmx_pinbased_ctls_high |= vmx->nested.nested_vmx_pinbased_ctls_high |=
PIN_BASED_ALWAYSON_WITHOUT_TRUE_MSR | PIN_BASED_ALWAYSON_WITHOUT_TRUE_MSR |
PIN_BASED_VMX_PREEMPTION_TIMER; PIN_BASED_VMX_PREEMPTION_TIMER;
if (vmx_vm_has_apicv(vmx->vcpu.kvm)) if (vmx_cpu_uses_apicv(&vmx->vcpu))
vmx->nested.nested_vmx_pinbased_ctls_high |= vmx->nested.nested_vmx_pinbased_ctls_high |=
PIN_BASED_POSTED_INTR; PIN_BASED_POSTED_INTR;
...@@ -4333,14 +4332,9 @@ static void vmx_disable_intercept_msr_write_x2apic(u32 msr) ...@@ -4333,14 +4332,9 @@ static void vmx_disable_intercept_msr_write_x2apic(u32 msr)
msr, MSR_TYPE_W); msr, MSR_TYPE_W);
} }
static int vmx_vm_has_apicv(struct kvm *kvm)
{
return enable_apicv && irqchip_in_kernel(kvm);
}
static int vmx_cpu_uses_apicv(struct kvm_vcpu *vcpu) static int vmx_cpu_uses_apicv(struct kvm_vcpu *vcpu)
{ {
return vmx_vm_has_apicv(vcpu->kvm); return enable_apicv && lapic_in_kernel(vcpu);
} }
static int vmx_complete_nested_posted_interrupt(struct kvm_vcpu *vcpu) static int vmx_complete_nested_posted_interrupt(struct kvm_vcpu *vcpu)
...@@ -4520,7 +4514,7 @@ static u32 vmx_pin_based_exec_ctrl(struct vcpu_vmx *vmx) ...@@ -4520,7 +4514,7 @@ static u32 vmx_pin_based_exec_ctrl(struct vcpu_vmx *vmx)
{ {
u32 pin_based_exec_ctrl = vmcs_config.pin_based_exec_ctrl; u32 pin_based_exec_ctrl = vmcs_config.pin_based_exec_ctrl;
if (!vmx_vm_has_apicv(vmx->vcpu.kvm)) if (!vmx_cpu_uses_apicv(&vmx->vcpu))
pin_based_exec_ctrl &= ~PIN_BASED_POSTED_INTR; pin_based_exec_ctrl &= ~PIN_BASED_POSTED_INTR;
return pin_based_exec_ctrl; return pin_based_exec_ctrl;
} }
...@@ -4532,7 +4526,7 @@ static u32 vmx_exec_control(struct vcpu_vmx *vmx) ...@@ -4532,7 +4526,7 @@ static u32 vmx_exec_control(struct vcpu_vmx *vmx)
if (vmx->vcpu.arch.switch_db_regs & KVM_DEBUGREG_WONT_EXIT) if (vmx->vcpu.arch.switch_db_regs & KVM_DEBUGREG_WONT_EXIT)
exec_control &= ~CPU_BASED_MOV_DR_EXITING; exec_control &= ~CPU_BASED_MOV_DR_EXITING;
if (!vm_need_tpr_shadow(vmx->vcpu.kvm)) { if (!cpu_need_tpr_shadow(&vmx->vcpu)) {
exec_control &= ~CPU_BASED_TPR_SHADOW; exec_control &= ~CPU_BASED_TPR_SHADOW;
#ifdef CONFIG_X86_64 #ifdef CONFIG_X86_64
exec_control |= CPU_BASED_CR8_STORE_EXITING | exec_control |= CPU_BASED_CR8_STORE_EXITING |
...@@ -4549,7 +4543,7 @@ static u32 vmx_exec_control(struct vcpu_vmx *vmx) ...@@ -4549,7 +4543,7 @@ static u32 vmx_exec_control(struct vcpu_vmx *vmx)
static u32 vmx_secondary_exec_control(struct vcpu_vmx *vmx) static u32 vmx_secondary_exec_control(struct vcpu_vmx *vmx)
{ {
u32 exec_control = vmcs_config.cpu_based_2nd_exec_ctrl; u32 exec_control = vmcs_config.cpu_based_2nd_exec_ctrl;
if (!vm_need_virtualize_apic_accesses(vmx->vcpu.kvm)) if (!cpu_need_virtualize_apic_accesses(&vmx->vcpu))
exec_control &= ~SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES; exec_control &= ~SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES;
if (vmx->vpid == 0) if (vmx->vpid == 0)
exec_control &= ~SECONDARY_EXEC_ENABLE_VPID; exec_control &= ~SECONDARY_EXEC_ENABLE_VPID;
...@@ -4563,7 +4557,7 @@ static u32 vmx_secondary_exec_control(struct vcpu_vmx *vmx) ...@@ -4563,7 +4557,7 @@ static u32 vmx_secondary_exec_control(struct vcpu_vmx *vmx)
exec_control &= ~SECONDARY_EXEC_UNRESTRICTED_GUEST; exec_control &= ~SECONDARY_EXEC_UNRESTRICTED_GUEST;
if (!ple_gap) if (!ple_gap)
exec_control &= ~SECONDARY_EXEC_PAUSE_LOOP_EXITING; exec_control &= ~SECONDARY_EXEC_PAUSE_LOOP_EXITING;
if (!vmx_vm_has_apicv(vmx->vcpu.kvm)) if (!vmx_cpu_uses_apicv(&vmx->vcpu))
exec_control &= ~(SECONDARY_EXEC_APIC_REGISTER_VIRT | exec_control &= ~(SECONDARY_EXEC_APIC_REGISTER_VIRT |
SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY); SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY);
exec_control &= ~SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE; exec_control &= ~SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE;
...@@ -4624,7 +4618,7 @@ static int vmx_vcpu_setup(struct vcpu_vmx *vmx) ...@@ -4624,7 +4618,7 @@ static int vmx_vcpu_setup(struct vcpu_vmx *vmx)
vmx_secondary_exec_control(vmx)); vmx_secondary_exec_control(vmx));
} }
if (vmx_vm_has_apicv(vmx->vcpu.kvm)) { if (vmx_cpu_uses_apicv(&vmx->vcpu)) {
vmcs_write64(EOI_EXIT_BITMAP0, 0); vmcs_write64(EOI_EXIT_BITMAP0, 0);
vmcs_write64(EOI_EXIT_BITMAP1, 0); vmcs_write64(EOI_EXIT_BITMAP1, 0);
vmcs_write64(EOI_EXIT_BITMAP2, 0); vmcs_write64(EOI_EXIT_BITMAP2, 0);
...@@ -4768,7 +4762,7 @@ static void vmx_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event) ...@@ -4768,7 +4762,7 @@ static void vmx_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event)
if (cpu_has_vmx_tpr_shadow() && !init_event) { if (cpu_has_vmx_tpr_shadow() && !init_event) {
vmcs_write64(VIRTUAL_APIC_PAGE_ADDR, 0); vmcs_write64(VIRTUAL_APIC_PAGE_ADDR, 0);
if (vm_need_tpr_shadow(vcpu->kvm)) if (cpu_need_tpr_shadow(vcpu))
vmcs_write64(VIRTUAL_APIC_PAGE_ADDR, vmcs_write64(VIRTUAL_APIC_PAGE_ADDR,
__pa(vcpu->arch.apic->regs)); __pa(vcpu->arch.apic->regs));
vmcs_write32(TPR_THRESHOLD, 0); vmcs_write32(TPR_THRESHOLD, 0);
...@@ -4776,7 +4770,7 @@ static void vmx_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event) ...@@ -4776,7 +4770,7 @@ static void vmx_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event)
kvm_make_request(KVM_REQ_APIC_PAGE_RELOAD, vcpu); kvm_make_request(KVM_REQ_APIC_PAGE_RELOAD, vcpu);
if (vmx_vm_has_apicv(vcpu->kvm)) if (vmx_cpu_uses_apicv(vcpu))
memset(&vmx->pi_desc, 0, sizeof(struct pi_desc)); memset(&vmx->pi_desc, 0, sizeof(struct pi_desc));
if (vmx->vpid != 0) if (vmx->vpid != 0)
...@@ -5316,7 +5310,7 @@ static int handle_cr(struct kvm_vcpu *vcpu) ...@@ -5316,7 +5310,7 @@ static int handle_cr(struct kvm_vcpu *vcpu)
u8 cr8 = (u8)val; u8 cr8 = (u8)val;
err = kvm_set_cr8(vcpu, cr8); err = kvm_set_cr8(vcpu, cr8);
kvm_complete_insn_gp(vcpu, err); kvm_complete_insn_gp(vcpu, err);
if (irqchip_in_kernel(vcpu->kvm)) if (lapic_in_kernel(vcpu))
return 1; return 1;
if (cr8_prev <= cr8) if (cr8_prev <= cr8)
return 1; return 1;
...@@ -5535,7 +5529,7 @@ static int handle_interrupt_window(struct kvm_vcpu *vcpu) ...@@ -5535,7 +5529,7 @@ static int handle_interrupt_window(struct kvm_vcpu *vcpu)
* If the user space waits to inject interrupts, exit as soon as * If the user space waits to inject interrupts, exit as soon as
* possible * possible
*/ */
if (!irqchip_in_kernel(vcpu->kvm) && if (!lapic_in_kernel(vcpu) &&
vcpu->run->request_interrupt_window && vcpu->run->request_interrupt_window &&
!kvm_cpu_has_interrupt(vcpu)) { !kvm_cpu_has_interrupt(vcpu)) {
vcpu->run->exit_reason = KVM_EXIT_IRQ_WINDOW_OPEN; vcpu->run->exit_reason = KVM_EXIT_IRQ_WINDOW_OPEN;
...@@ -7944,10 +7938,10 @@ static void vmx_set_virtual_x2apic_mode(struct kvm_vcpu *vcpu, bool set) ...@@ -7944,10 +7938,10 @@ static void vmx_set_virtual_x2apic_mode(struct kvm_vcpu *vcpu, bool set)
* apicv * apicv
*/ */
if (!cpu_has_vmx_virtualize_x2apic_mode() || if (!cpu_has_vmx_virtualize_x2apic_mode() ||
!vmx_vm_has_apicv(vcpu->kvm)) !vmx_cpu_uses_apicv(vcpu))
return; return;
if (!vm_need_tpr_shadow(vcpu->kvm)) if (!cpu_need_tpr_shadow(vcpu))
return; return;
sec_exec_control = vmcs_read32(SECONDARY_VM_EXEC_CONTROL); sec_exec_control = vmcs_read32(SECONDARY_VM_EXEC_CONTROL);
...@@ -8052,7 +8046,7 @@ static void vmx_hwapic_irr_update(struct kvm_vcpu *vcpu, int max_irr) ...@@ -8052,7 +8046,7 @@ static void vmx_hwapic_irr_update(struct kvm_vcpu *vcpu, int max_irr)
static void vmx_load_eoi_exitmap(struct kvm_vcpu *vcpu) static void vmx_load_eoi_exitmap(struct kvm_vcpu *vcpu)
{ {
u64 *eoi_exit_bitmap = vcpu->arch.eoi_exit_bitmap; u64 *eoi_exit_bitmap = vcpu->arch.eoi_exit_bitmap;
if (!vmx_vm_has_apicv(vcpu->kvm)) if (!vmx_cpu_uses_apicv(vcpu))
return; return;
vmcs_write64(EOI_EXIT_BITMAP0, eoi_exit_bitmap[0]); vmcs_write64(EOI_EXIT_BITMAP0, eoi_exit_bitmap[0]);
...@@ -8551,7 +8545,7 @@ static struct kvm_vcpu *vmx_create_vcpu(struct kvm *kvm, unsigned int id) ...@@ -8551,7 +8545,7 @@ static struct kvm_vcpu *vmx_create_vcpu(struct kvm *kvm, unsigned int id)
put_cpu(); put_cpu();
if (err) if (err)
goto free_vmcs; goto free_vmcs;
if (vm_need_virtualize_apic_accesses(kvm)) { if (cpu_need_virtualize_apic_accesses(&vmx->vcpu)) {
err = alloc_apic_access_page(kvm); err = alloc_apic_access_page(kvm);
if (err) if (err)
goto free_vmcs; goto free_vmcs;
...@@ -9344,7 +9338,7 @@ static void prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12) ...@@ -9344,7 +9338,7 @@ static void prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
vmcs_write64(APIC_ACCESS_ADDR, vmcs_write64(APIC_ACCESS_ADDR,
page_to_phys(vmx->nested.apic_access_page)); page_to_phys(vmx->nested.apic_access_page));
} else if (!(nested_cpu_has_virt_x2apic_mode(vmcs12)) && } else if (!(nested_cpu_has_virt_x2apic_mode(vmcs12)) &&
(vm_need_virtualize_apic_accesses(vmx->vcpu.kvm))) { cpu_need_virtualize_apic_accesses(&vmx->vcpu)) {
exec_control |= exec_control |=
SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES; SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES;
kvm_vcpu_reload_apic_access_page(vcpu); kvm_vcpu_reload_apic_access_page(vcpu);
......
...@@ -788,7 +788,7 @@ int kvm_set_cr8(struct kvm_vcpu *vcpu, unsigned long cr8) ...@@ -788,7 +788,7 @@ int kvm_set_cr8(struct kvm_vcpu *vcpu, unsigned long cr8)
{ {
if (cr8 & CR8_RESERVED_BITS) if (cr8 & CR8_RESERVED_BITS)
return 1; return 1;
if (irqchip_in_kernel(vcpu->kvm)) if (lapic_in_kernel(vcpu))
kvm_lapic_set_tpr(vcpu, cr8); kvm_lapic_set_tpr(vcpu, cr8);
else else
vcpu->arch.cr8 = cr8; vcpu->arch.cr8 = cr8;
...@@ -798,7 +798,7 @@ EXPORT_SYMBOL_GPL(kvm_set_cr8); ...@@ -798,7 +798,7 @@ EXPORT_SYMBOL_GPL(kvm_set_cr8);
unsigned long kvm_get_cr8(struct kvm_vcpu *vcpu) unsigned long kvm_get_cr8(struct kvm_vcpu *vcpu)
{ {
if (irqchip_in_kernel(vcpu->kvm)) if (lapic_in_kernel(vcpu))
return kvm_lapic_get_cr8(vcpu); return kvm_lapic_get_cr8(vcpu);
else else
return vcpu->arch.cr8; return vcpu->arch.cr8;
...@@ -3175,7 +3175,7 @@ long kvm_arch_vcpu_ioctl(struct file *filp, ...@@ -3175,7 +3175,7 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
struct kvm_vapic_addr va; struct kvm_vapic_addr va;
r = -EINVAL; r = -EINVAL;
if (!irqchip_in_kernel(vcpu->kvm)) if (!lapic_in_kernel(vcpu))
goto out; goto out;
r = -EFAULT; r = -EFAULT;
if (copy_from_user(&va, argp, sizeof va)) if (copy_from_user(&va, argp, sizeof va))
...@@ -5666,7 +5666,7 @@ void kvm_arch_exit(void) ...@@ -5666,7 +5666,7 @@ void kvm_arch_exit(void)
int kvm_vcpu_halt(struct kvm_vcpu *vcpu) int kvm_vcpu_halt(struct kvm_vcpu *vcpu)
{ {
++vcpu->stat.halt_exits; ++vcpu->stat.halt_exits;
if (irqchip_in_kernel(vcpu->kvm)) { if (lapic_in_kernel(vcpu)) {
vcpu->arch.mp_state = KVM_MP_STATE_HALTED; vcpu->arch.mp_state = KVM_MP_STATE_HALTED;
return 1; return 1;
} else { } else {
...@@ -6162,7 +6162,7 @@ void kvm_vcpu_reload_apic_access_page(struct kvm_vcpu *vcpu) ...@@ -6162,7 +6162,7 @@ void kvm_vcpu_reload_apic_access_page(struct kvm_vcpu *vcpu)
{ {
struct page *page = NULL; struct page *page = NULL;
if (!irqchip_in_kernel(vcpu->kvm)) if (!lapic_in_kernel(vcpu))
return; return;
if (!kvm_x86_ops->set_apic_access_page_addr) if (!kvm_x86_ops->set_apic_access_page_addr)
...@@ -6200,7 +6200,7 @@ void kvm_arch_mmu_notifier_invalidate_page(struct kvm *kvm, ...@@ -6200,7 +6200,7 @@ void kvm_arch_mmu_notifier_invalidate_page(struct kvm *kvm,
static int vcpu_enter_guest(struct kvm_vcpu *vcpu) static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
{ {
int r; int r;
bool req_int_win = !irqchip_in_kernel(vcpu->kvm) && bool req_int_win = !lapic_in_kernel(vcpu) &&
vcpu->run->request_interrupt_window; vcpu->run->request_interrupt_window;
bool req_immediate_exit = false; bool req_immediate_exit = false;
...@@ -6597,7 +6597,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) ...@@ -6597,7 +6597,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
} }
/* re-sync apic's tpr */ /* re-sync apic's tpr */
if (!irqchip_in_kernel(vcpu->kvm)) { if (!lapic_in_kernel(vcpu)) {
if (kvm_set_cr8(vcpu, kvm_run->cr8) != 0) { if (kvm_set_cr8(vcpu, kvm_run->cr8) != 0) {
r = -EINVAL; r = -EINVAL;
goto out; goto out;
...@@ -7297,7 +7297,7 @@ bool kvm_vcpu_is_bsp(struct kvm_vcpu *vcpu) ...@@ -7297,7 +7297,7 @@ bool kvm_vcpu_is_bsp(struct kvm_vcpu *vcpu)
bool kvm_vcpu_compatible(struct kvm_vcpu *vcpu) bool kvm_vcpu_compatible(struct kvm_vcpu *vcpu)
{ {
return irqchip_in_kernel(vcpu->kvm) == (vcpu->arch.apic != NULL); return irqchip_in_kernel(vcpu->kvm) == lapic_in_kernel(vcpu);
} }
struct static_key kvm_no_apic_vcpu __read_mostly; struct static_key kvm_no_apic_vcpu __read_mostly;
...@@ -7391,7 +7391,7 @@ void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu) ...@@ -7391,7 +7391,7 @@ void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
kvm_mmu_destroy(vcpu); kvm_mmu_destroy(vcpu);
srcu_read_unlock(&vcpu->kvm->srcu, idx); srcu_read_unlock(&vcpu->kvm->srcu, idx);
free_page((unsigned long)vcpu->arch.pio_data); free_page((unsigned long)vcpu->arch.pio_data);
if (!irqchip_in_kernel(vcpu->kvm)) if (!lapic_in_kernel(vcpu))
static_key_slow_dec(&kvm_no_apic_vcpu); static_key_slow_dec(&kvm_no_apic_vcpu);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment