Commit ed0a0ec9 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm

Pull KVM fixes from Paolo Bonzini:
 "A somewhat bigger ARM update, and the usual smattering of x86 bug
  fixes"

* tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm:
  kvm: vmx: Fix entry number check for add_atomic_switch_msr()
  KVM: x86: Recompute PID.ON when clearing PID.SN
  KVM: nVMX: Restore a preemption timer consistency check
  x86/kvm/nVMX: read from MSR_IA32_VMX_PROCBASED_CTLS2 only when it is available
  KVM: arm64: Forbid kprobing of the VHE world-switch code
  KVM: arm64: Relax the restriction on using stage2 PUD huge mapping
  arm: KVM: Add missing kvm_stage2_has_pmd() helper
  KVM: arm/arm64: vgic: Always initialize the group of private IRQs
  arm/arm64: KVM: Don't panic on failure to properly reset system registers
  arm/arm64: KVM: Allow a VCPU to fully reset itself
  KVM: arm/arm64: Reset the VCPU without preemption and vcpu state loaded
  arm64: KVM: Don't generate UNDEF when LORegion feature is present
  KVM: arm/arm64: vgic: Make vgic_cpu->ap_list_lock a raw_spinlock
  KVM: arm/arm64: vgic: Make vgic_dist->lpi_list_lock a raw_spinlock
  KVM: arm/arm64: vgic: Make vgic_irq->irq_lock a raw_spinlock
parents 64c0133e 98ae70cc
...@@ -48,6 +48,7 @@ ...@@ -48,6 +48,7 @@
#define KVM_REQ_SLEEP \ #define KVM_REQ_SLEEP \
KVM_ARCH_REQ_FLAGS(0, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP) KVM_ARCH_REQ_FLAGS(0, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
#define KVM_REQ_IRQ_PENDING KVM_ARCH_REQ(1) #define KVM_REQ_IRQ_PENDING KVM_ARCH_REQ(1)
#define KVM_REQ_VCPU_RESET KVM_ARCH_REQ(2)
DECLARE_STATIC_KEY_FALSE(userspace_irqchip_in_use); DECLARE_STATIC_KEY_FALSE(userspace_irqchip_in_use);
...@@ -147,6 +148,13 @@ struct kvm_cpu_context { ...@@ -147,6 +148,13 @@ struct kvm_cpu_context {
typedef struct kvm_cpu_context kvm_cpu_context_t; typedef struct kvm_cpu_context kvm_cpu_context_t;
struct vcpu_reset_state {
unsigned long pc;
unsigned long r0;
bool be;
bool reset;
};
struct kvm_vcpu_arch { struct kvm_vcpu_arch {
struct kvm_cpu_context ctxt; struct kvm_cpu_context ctxt;
...@@ -186,6 +194,8 @@ struct kvm_vcpu_arch { ...@@ -186,6 +194,8 @@ struct kvm_vcpu_arch {
/* Cache some mmu pages needed inside spinlock regions */ /* Cache some mmu pages needed inside spinlock regions */
struct kvm_mmu_memory_cache mmu_page_cache; struct kvm_mmu_memory_cache mmu_page_cache;
struct vcpu_reset_state reset_state;
/* Detect first run of a vcpu */ /* Detect first run of a vcpu */
bool has_run_once; bool has_run_once;
}; };
......
...@@ -76,4 +76,9 @@ static inline bool kvm_stage2_has_pud(struct kvm *kvm) ...@@ -76,4 +76,9 @@ static inline bool kvm_stage2_has_pud(struct kvm *kvm)
#define S2_PMD_MASK PMD_MASK #define S2_PMD_MASK PMD_MASK
#define S2_PMD_SIZE PMD_SIZE #define S2_PMD_SIZE PMD_SIZE
static inline bool kvm_stage2_has_pmd(struct kvm *kvm)
{
return true;
}
#endif /* __ARM_S2_PGTABLE_H_ */ #endif /* __ARM_S2_PGTABLE_H_ */
...@@ -1450,6 +1450,6 @@ void kvm_reset_coprocs(struct kvm_vcpu *vcpu) ...@@ -1450,6 +1450,6 @@ void kvm_reset_coprocs(struct kvm_vcpu *vcpu)
reset_coproc_regs(vcpu, table, num); reset_coproc_regs(vcpu, table, num);
for (num = 1; num < NR_CP15_REGS; num++) for (num = 1; num < NR_CP15_REGS; num++)
if (vcpu_cp15(vcpu, num) == 0x42424242) WARN(vcpu_cp15(vcpu, num) == 0x42424242,
panic("Didn't reset vcpu_cp15(vcpu, %zi)", num); "Didn't reset vcpu_cp15(vcpu, %zi)", num);
} }
...@@ -26,6 +26,7 @@ ...@@ -26,6 +26,7 @@
#include <asm/cputype.h> #include <asm/cputype.h>
#include <asm/kvm_arm.h> #include <asm/kvm_arm.h>
#include <asm/kvm_coproc.h> #include <asm/kvm_coproc.h>
#include <asm/kvm_emulate.h>
#include <kvm/arm_arch_timer.h> #include <kvm/arm_arch_timer.h>
...@@ -69,6 +70,29 @@ int kvm_reset_vcpu(struct kvm_vcpu *vcpu) ...@@ -69,6 +70,29 @@ int kvm_reset_vcpu(struct kvm_vcpu *vcpu)
/* Reset CP15 registers */ /* Reset CP15 registers */
kvm_reset_coprocs(vcpu); kvm_reset_coprocs(vcpu);
/*
* Additional reset state handling that PSCI may have imposed on us.
* Must be done after all the sys_reg reset.
*/
if (READ_ONCE(vcpu->arch.reset_state.reset)) {
unsigned long target_pc = vcpu->arch.reset_state.pc;
/* Gracefully handle Thumb2 entry point */
if (target_pc & 1) {
target_pc &= ~1UL;
vcpu_set_thumb(vcpu);
}
/* Propagate caller endianness */
if (vcpu->arch.reset_state.be)
kvm_vcpu_set_be(vcpu);
*vcpu_pc(vcpu) = target_pc;
vcpu_set_reg(vcpu, 0, vcpu->arch.reset_state.r0);
vcpu->arch.reset_state.reset = false;
}
/* Reset arch_timer context */ /* Reset arch_timer context */
return kvm_timer_vcpu_reset(vcpu); return kvm_timer_vcpu_reset(vcpu);
} }
...@@ -48,6 +48,7 @@ ...@@ -48,6 +48,7 @@
#define KVM_REQ_SLEEP \ #define KVM_REQ_SLEEP \
KVM_ARCH_REQ_FLAGS(0, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP) KVM_ARCH_REQ_FLAGS(0, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
#define KVM_REQ_IRQ_PENDING KVM_ARCH_REQ(1) #define KVM_REQ_IRQ_PENDING KVM_ARCH_REQ(1)
#define KVM_REQ_VCPU_RESET KVM_ARCH_REQ(2)
DECLARE_STATIC_KEY_FALSE(userspace_irqchip_in_use); DECLARE_STATIC_KEY_FALSE(userspace_irqchip_in_use);
...@@ -208,6 +209,13 @@ struct kvm_cpu_context { ...@@ -208,6 +209,13 @@ struct kvm_cpu_context {
typedef struct kvm_cpu_context kvm_cpu_context_t; typedef struct kvm_cpu_context kvm_cpu_context_t;
struct vcpu_reset_state {
unsigned long pc;
unsigned long r0;
bool be;
bool reset;
};
struct kvm_vcpu_arch { struct kvm_vcpu_arch {
struct kvm_cpu_context ctxt; struct kvm_cpu_context ctxt;
...@@ -297,6 +305,9 @@ struct kvm_vcpu_arch { ...@@ -297,6 +305,9 @@ struct kvm_vcpu_arch {
/* Virtual SError ESR to restore when HCR_EL2.VSE is set */ /* Virtual SError ESR to restore when HCR_EL2.VSE is set */
u64 vsesr_el2; u64 vsesr_el2;
/* Additional reset state */
struct vcpu_reset_state reset_state;
/* True when deferrable sysregs are loaded on the physical CPU, /* True when deferrable sysregs are loaded on the physical CPU,
* see kvm_vcpu_load_sysregs and kvm_vcpu_put_sysregs. */ * see kvm_vcpu_load_sysregs and kvm_vcpu_put_sysregs. */
bool sysregs_loaded_on_cpu; bool sysregs_loaded_on_cpu;
......
...@@ -23,6 +23,7 @@ ...@@ -23,6 +23,7 @@
#include <kvm/arm_psci.h> #include <kvm/arm_psci.h>
#include <asm/cpufeature.h> #include <asm/cpufeature.h>
#include <asm/kprobes.h>
#include <asm/kvm_asm.h> #include <asm/kvm_asm.h>
#include <asm/kvm_emulate.h> #include <asm/kvm_emulate.h>
#include <asm/kvm_host.h> #include <asm/kvm_host.h>
...@@ -107,6 +108,7 @@ static void activate_traps_vhe(struct kvm_vcpu *vcpu) ...@@ -107,6 +108,7 @@ static void activate_traps_vhe(struct kvm_vcpu *vcpu)
write_sysreg(kvm_get_hyp_vector(), vbar_el1); write_sysreg(kvm_get_hyp_vector(), vbar_el1);
} }
NOKPROBE_SYMBOL(activate_traps_vhe);
static void __hyp_text __activate_traps_nvhe(struct kvm_vcpu *vcpu) static void __hyp_text __activate_traps_nvhe(struct kvm_vcpu *vcpu)
{ {
...@@ -154,6 +156,7 @@ static void deactivate_traps_vhe(void) ...@@ -154,6 +156,7 @@ static void deactivate_traps_vhe(void)
write_sysreg(CPACR_EL1_DEFAULT, cpacr_el1); write_sysreg(CPACR_EL1_DEFAULT, cpacr_el1);
write_sysreg(vectors, vbar_el1); write_sysreg(vectors, vbar_el1);
} }
NOKPROBE_SYMBOL(deactivate_traps_vhe);
static void __hyp_text __deactivate_traps_nvhe(void) static void __hyp_text __deactivate_traps_nvhe(void)
{ {
...@@ -513,6 +516,7 @@ int kvm_vcpu_run_vhe(struct kvm_vcpu *vcpu) ...@@ -513,6 +516,7 @@ int kvm_vcpu_run_vhe(struct kvm_vcpu *vcpu)
return exit_code; return exit_code;
} }
NOKPROBE_SYMBOL(kvm_vcpu_run_vhe);
/* Switch to the guest for legacy non-VHE systems */ /* Switch to the guest for legacy non-VHE systems */
int __hyp_text __kvm_vcpu_run_nvhe(struct kvm_vcpu *vcpu) int __hyp_text __kvm_vcpu_run_nvhe(struct kvm_vcpu *vcpu)
...@@ -620,6 +624,7 @@ static void __hyp_call_panic_vhe(u64 spsr, u64 elr, u64 par, ...@@ -620,6 +624,7 @@ static void __hyp_call_panic_vhe(u64 spsr, u64 elr, u64 par,
read_sysreg_el2(esr), read_sysreg_el2(far), read_sysreg_el2(esr), read_sysreg_el2(far),
read_sysreg(hpfar_el2), par, vcpu); read_sysreg(hpfar_el2), par, vcpu);
} }
NOKPROBE_SYMBOL(__hyp_call_panic_vhe);
void __hyp_text __noreturn hyp_panic(struct kvm_cpu_context *host_ctxt) void __hyp_text __noreturn hyp_panic(struct kvm_cpu_context *host_ctxt)
{ {
......
...@@ -18,6 +18,7 @@ ...@@ -18,6 +18,7 @@
#include <linux/compiler.h> #include <linux/compiler.h>
#include <linux/kvm_host.h> #include <linux/kvm_host.h>
#include <asm/kprobes.h>
#include <asm/kvm_asm.h> #include <asm/kvm_asm.h>
#include <asm/kvm_emulate.h> #include <asm/kvm_emulate.h>
#include <asm/kvm_hyp.h> #include <asm/kvm_hyp.h>
...@@ -98,12 +99,14 @@ void sysreg_save_host_state_vhe(struct kvm_cpu_context *ctxt) ...@@ -98,12 +99,14 @@ void sysreg_save_host_state_vhe(struct kvm_cpu_context *ctxt)
{ {
__sysreg_save_common_state(ctxt); __sysreg_save_common_state(ctxt);
} }
NOKPROBE_SYMBOL(sysreg_save_host_state_vhe);
void sysreg_save_guest_state_vhe(struct kvm_cpu_context *ctxt) void sysreg_save_guest_state_vhe(struct kvm_cpu_context *ctxt)
{ {
__sysreg_save_common_state(ctxt); __sysreg_save_common_state(ctxt);
__sysreg_save_el2_return_state(ctxt); __sysreg_save_el2_return_state(ctxt);
} }
NOKPROBE_SYMBOL(sysreg_save_guest_state_vhe);
static void __hyp_text __sysreg_restore_common_state(struct kvm_cpu_context *ctxt) static void __hyp_text __sysreg_restore_common_state(struct kvm_cpu_context *ctxt)
{ {
...@@ -188,12 +191,14 @@ void sysreg_restore_host_state_vhe(struct kvm_cpu_context *ctxt) ...@@ -188,12 +191,14 @@ void sysreg_restore_host_state_vhe(struct kvm_cpu_context *ctxt)
{ {
__sysreg_restore_common_state(ctxt); __sysreg_restore_common_state(ctxt);
} }
NOKPROBE_SYMBOL(sysreg_restore_host_state_vhe);
void sysreg_restore_guest_state_vhe(struct kvm_cpu_context *ctxt) void sysreg_restore_guest_state_vhe(struct kvm_cpu_context *ctxt)
{ {
__sysreg_restore_common_state(ctxt); __sysreg_restore_common_state(ctxt);
__sysreg_restore_el2_return_state(ctxt); __sysreg_restore_el2_return_state(ctxt);
} }
NOKPROBE_SYMBOL(sysreg_restore_guest_state_vhe);
void __hyp_text __sysreg32_save_state(struct kvm_vcpu *vcpu) void __hyp_text __sysreg32_save_state(struct kvm_vcpu *vcpu)
{ {
......
...@@ -32,6 +32,7 @@ ...@@ -32,6 +32,7 @@
#include <asm/kvm_arm.h> #include <asm/kvm_arm.h>
#include <asm/kvm_asm.h> #include <asm/kvm_asm.h>
#include <asm/kvm_coproc.h> #include <asm/kvm_coproc.h>
#include <asm/kvm_emulate.h>
#include <asm/kvm_mmu.h> #include <asm/kvm_mmu.h>
/* Maximum phys_shift supported for any VM on this host */ /* Maximum phys_shift supported for any VM on this host */
...@@ -105,16 +106,33 @@ int kvm_arch_vm_ioctl_check_extension(struct kvm *kvm, long ext) ...@@ -105,16 +106,33 @@ int kvm_arch_vm_ioctl_check_extension(struct kvm *kvm, long ext)
* This function finds the right table above and sets the registers on * This function finds the right table above and sets the registers on
* the virtual CPU struct to their architecturally defined reset * the virtual CPU struct to their architecturally defined reset
* values. * values.
*
* Note: This function can be called from two paths: The KVM_ARM_VCPU_INIT
* ioctl or as part of handling a request issued by another VCPU in the PSCI
* handling code. In the first case, the VCPU will not be loaded, and in the
* second case the VCPU will be loaded. Because this function operates purely
* on the memory-backed valus of system registers, we want to do a full put if
* we were loaded (handling a request) and load the values back at the end of
* the function. Otherwise we leave the state alone. In both cases, we
* disable preemption around the vcpu reset as we would otherwise race with
* preempt notifiers which also call put/load.
*/ */
int kvm_reset_vcpu(struct kvm_vcpu *vcpu) int kvm_reset_vcpu(struct kvm_vcpu *vcpu)
{ {
const struct kvm_regs *cpu_reset; const struct kvm_regs *cpu_reset;
int ret = -EINVAL;
bool loaded;
preempt_disable();
loaded = (vcpu->cpu != -1);
if (loaded)
kvm_arch_vcpu_put(vcpu);
switch (vcpu->arch.target) { switch (vcpu->arch.target) {
default: default:
if (test_bit(KVM_ARM_VCPU_EL1_32BIT, vcpu->arch.features)) { if (test_bit(KVM_ARM_VCPU_EL1_32BIT, vcpu->arch.features)) {
if (!cpu_has_32bit_el1()) if (!cpu_has_32bit_el1())
return -EINVAL; goto out;
cpu_reset = &default_regs_reset32; cpu_reset = &default_regs_reset32;
} else { } else {
cpu_reset = &default_regs_reset; cpu_reset = &default_regs_reset;
...@@ -129,6 +147,29 @@ int kvm_reset_vcpu(struct kvm_vcpu *vcpu) ...@@ -129,6 +147,29 @@ int kvm_reset_vcpu(struct kvm_vcpu *vcpu)
/* Reset system registers */ /* Reset system registers */
kvm_reset_sys_regs(vcpu); kvm_reset_sys_regs(vcpu);
/*
* Additional reset state handling that PSCI may have imposed on us.
* Must be done after all the sys_reg reset.
*/
if (vcpu->arch.reset_state.reset) {
unsigned long target_pc = vcpu->arch.reset_state.pc;
/* Gracefully handle Thumb2 entry point */
if (vcpu_mode_is_32bit(vcpu) && (target_pc & 1)) {
target_pc &= ~1UL;
vcpu_set_thumb(vcpu);
}
/* Propagate caller endianness */
if (vcpu->arch.reset_state.be)
kvm_vcpu_set_be(vcpu);
*vcpu_pc(vcpu) = target_pc;
vcpu_set_reg(vcpu, 0, vcpu->arch.reset_state.r0);
vcpu->arch.reset_state.reset = false;
}
/* Reset PMU */ /* Reset PMU */
kvm_pmu_vcpu_reset(vcpu); kvm_pmu_vcpu_reset(vcpu);
...@@ -137,7 +178,12 @@ int kvm_reset_vcpu(struct kvm_vcpu *vcpu) ...@@ -137,7 +178,12 @@ int kvm_reset_vcpu(struct kvm_vcpu *vcpu)
vcpu->arch.workaround_flags |= VCPU_WORKAROUND_2_FLAG; vcpu->arch.workaround_flags |= VCPU_WORKAROUND_2_FLAG;
/* Reset timer */ /* Reset timer */
return kvm_timer_vcpu_reset(vcpu); ret = kvm_timer_vcpu_reset(vcpu);
out:
if (loaded)
kvm_arch_vcpu_load(vcpu, smp_processor_id());
preempt_enable();
return ret;
} }
void kvm_set_ipa_limit(void) void kvm_set_ipa_limit(void)
......
...@@ -314,12 +314,29 @@ static bool trap_raz_wi(struct kvm_vcpu *vcpu, ...@@ -314,12 +314,29 @@ static bool trap_raz_wi(struct kvm_vcpu *vcpu,
return read_zero(vcpu, p); return read_zero(vcpu, p);
} }
static bool trap_undef(struct kvm_vcpu *vcpu, /*
struct sys_reg_params *p, * ARMv8.1 mandates at least a trivial LORegion implementation, where all the
const struct sys_reg_desc *r) * RW registers are RES0 (which we can implement as RAZ/WI). On an ARMv8.0
* system, these registers should UNDEF. LORID_EL1 being a RO register, we
* treat it separately.
*/
static bool trap_loregion(struct kvm_vcpu *vcpu,
struct sys_reg_params *p,
const struct sys_reg_desc *r)
{ {
kvm_inject_undefined(vcpu); u64 val = read_sanitised_ftr_reg(SYS_ID_AA64MMFR1_EL1);
return false; u32 sr = sys_reg((u32)r->Op0, (u32)r->Op1,
(u32)r->CRn, (u32)r->CRm, (u32)r->Op2);
if (!(val & (0xfUL << ID_AA64MMFR1_LOR_SHIFT))) {
kvm_inject_undefined(vcpu);
return false;
}
if (p->is_write && sr == SYS_LORID_EL1)
return write_to_read_only(vcpu, p, r);
return trap_raz_wi(vcpu, p, r);
} }
static bool trap_oslsr_el1(struct kvm_vcpu *vcpu, static bool trap_oslsr_el1(struct kvm_vcpu *vcpu,
...@@ -1048,11 +1065,6 @@ static u64 read_id_reg(struct sys_reg_desc const *r, bool raz) ...@@ -1048,11 +1065,6 @@ static u64 read_id_reg(struct sys_reg_desc const *r, bool raz)
if (val & ptrauth_mask) if (val & ptrauth_mask)
kvm_debug("ptrauth unsupported for guests, suppressing\n"); kvm_debug("ptrauth unsupported for guests, suppressing\n");
val &= ~ptrauth_mask; val &= ~ptrauth_mask;
} else if (id == SYS_ID_AA64MMFR1_EL1) {
if (val & (0xfUL << ID_AA64MMFR1_LOR_SHIFT))
kvm_debug("LORegions unsupported for guests, suppressing\n");
val &= ~(0xfUL << ID_AA64MMFR1_LOR_SHIFT);
} }
return val; return val;
...@@ -1338,11 +1350,11 @@ static const struct sys_reg_desc sys_reg_descs[] = { ...@@ -1338,11 +1350,11 @@ static const struct sys_reg_desc sys_reg_descs[] = {
{ SYS_DESC(SYS_MAIR_EL1), access_vm_reg, reset_unknown, MAIR_EL1 }, { SYS_DESC(SYS_MAIR_EL1), access_vm_reg, reset_unknown, MAIR_EL1 },
{ SYS_DESC(SYS_AMAIR_EL1), access_vm_reg, reset_amair_el1, AMAIR_EL1 }, { SYS_DESC(SYS_AMAIR_EL1), access_vm_reg, reset_amair_el1, AMAIR_EL1 },
{ SYS_DESC(SYS_LORSA_EL1), trap_undef }, { SYS_DESC(SYS_LORSA_EL1), trap_loregion },
{ SYS_DESC(SYS_LOREA_EL1), trap_undef }, { SYS_DESC(SYS_LOREA_EL1), trap_loregion },
{ SYS_DESC(SYS_LORN_EL1), trap_undef }, { SYS_DESC(SYS_LORN_EL1), trap_loregion },
{ SYS_DESC(SYS_LORC_EL1), trap_undef }, { SYS_DESC(SYS_LORC_EL1), trap_loregion },
{ SYS_DESC(SYS_LORID_EL1), trap_undef }, { SYS_DESC(SYS_LORID_EL1), trap_loregion },
{ SYS_DESC(SYS_VBAR_EL1), NULL, reset_val, VBAR_EL1, 0 }, { SYS_DESC(SYS_VBAR_EL1), NULL, reset_val, VBAR_EL1, 0 },
{ SYS_DESC(SYS_DISR_EL1), NULL, reset_val, DISR_EL1, 0 }, { SYS_DESC(SYS_DISR_EL1), NULL, reset_val, DISR_EL1, 0 },
...@@ -2596,7 +2608,9 @@ void kvm_reset_sys_regs(struct kvm_vcpu *vcpu) ...@@ -2596,7 +2608,9 @@ void kvm_reset_sys_regs(struct kvm_vcpu *vcpu)
table = get_target_table(vcpu->arch.target, true, &num); table = get_target_table(vcpu->arch.target, true, &num);
reset_sys_reg_descs(vcpu, table, num); reset_sys_reg_descs(vcpu, table, num);
for (num = 1; num < NR_SYS_REGS; num++) for (num = 1; num < NR_SYS_REGS; num++) {
if (__vcpu_sys_reg(vcpu, num) == 0x4242424242424242) if (WARN(__vcpu_sys_reg(vcpu, num) == 0x4242424242424242,
panic("Didn't reset __vcpu_sys_reg(%zi)", num); "Didn't reset __vcpu_sys_reg(%zi)\n", num))
break;
}
} }
...@@ -2473,6 +2473,10 @@ static int nested_check_vm_execution_controls(struct kvm_vcpu *vcpu, ...@@ -2473,6 +2473,10 @@ static int nested_check_vm_execution_controls(struct kvm_vcpu *vcpu,
(nested_cpu_has_vpid(vmcs12) && !vmcs12->virtual_processor_id)) (nested_cpu_has_vpid(vmcs12) && !vmcs12->virtual_processor_id))
return -EINVAL; return -EINVAL;
if (!nested_cpu_has_preemption_timer(vmcs12) &&
nested_cpu_has_save_preemption_timer(vmcs12))
return -EINVAL;
if (nested_cpu_has_ept(vmcs12) && if (nested_cpu_has_ept(vmcs12) &&
!valid_ept_address(vcpu, vmcs12->ept_pointer)) !valid_ept_address(vcpu, vmcs12->ept_pointer))
return -EINVAL; return -EINVAL;
...@@ -5557,9 +5561,11 @@ void nested_vmx_setup_ctls_msrs(struct nested_vmx_msrs *msrs, u32 ept_caps, ...@@ -5557,9 +5561,11 @@ void nested_vmx_setup_ctls_msrs(struct nested_vmx_msrs *msrs, u32 ept_caps,
* secondary cpu-based controls. Do not include those that * secondary cpu-based controls. Do not include those that
* depend on CPUID bits, they are added later by vmx_cpuid_update. * depend on CPUID bits, they are added later by vmx_cpuid_update.
*/ */
rdmsr(MSR_IA32_VMX_PROCBASED_CTLS2, if (msrs->procbased_ctls_high & CPU_BASED_ACTIVATE_SECONDARY_CONTROLS)
msrs->secondary_ctls_low, rdmsr(MSR_IA32_VMX_PROCBASED_CTLS2,
msrs->secondary_ctls_high); msrs->secondary_ctls_low,
msrs->secondary_ctls_high);
msrs->secondary_ctls_low = 0; msrs->secondary_ctls_low = 0;
msrs->secondary_ctls_high &= msrs->secondary_ctls_high &=
SECONDARY_EXEC_DESC | SECONDARY_EXEC_DESC |
......
...@@ -863,7 +863,8 @@ static void add_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr, ...@@ -863,7 +863,8 @@ static void add_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr,
if (!entry_only) if (!entry_only)
j = find_msr(&m->host, msr); j = find_msr(&m->host, msr);
if (i == NR_AUTOLOAD_MSRS || j == NR_AUTOLOAD_MSRS) { if ((i < 0 && m->guest.nr == NR_AUTOLOAD_MSRS) ||
(j < 0 && m->host.nr == NR_AUTOLOAD_MSRS)) {
printk_once(KERN_WARNING "Not enough msr switch entries. " printk_once(KERN_WARNING "Not enough msr switch entries. "
"Can't add msr %x\n", msr); "Can't add msr %x\n", msr);
return; return;
...@@ -1193,21 +1194,6 @@ static void vmx_vcpu_pi_load(struct kvm_vcpu *vcpu, int cpu) ...@@ -1193,21 +1194,6 @@ static void vmx_vcpu_pi_load(struct kvm_vcpu *vcpu, int cpu)
if (!pi_test_sn(pi_desc) && vcpu->cpu == cpu) if (!pi_test_sn(pi_desc) && vcpu->cpu == cpu)
return; return;
/*
* First handle the simple case where no cmpxchg is necessary; just
* allow posting non-urgent interrupts.
*
* If the 'nv' field is POSTED_INTR_WAKEUP_VECTOR, do not change
* PI.NDST: pi_post_block will do it for us and the wakeup_handler
* expects the VCPU to be on the blocked_vcpu_list that matches
* PI.NDST.
*/
if (pi_desc->nv == POSTED_INTR_WAKEUP_VECTOR ||
vcpu->cpu == cpu) {
pi_clear_sn(pi_desc);
return;
}
/* The full case. */ /* The full case. */
do { do {
old.control = new.control = pi_desc->control; old.control = new.control = pi_desc->control;
...@@ -1222,6 +1208,17 @@ static void vmx_vcpu_pi_load(struct kvm_vcpu *vcpu, int cpu) ...@@ -1222,6 +1208,17 @@ static void vmx_vcpu_pi_load(struct kvm_vcpu *vcpu, int cpu)
new.sn = 0; new.sn = 0;
} while (cmpxchg64(&pi_desc->control, old.control, } while (cmpxchg64(&pi_desc->control, old.control,
new.control) != old.control); new.control) != old.control);
/*
* Clear SN before reading the bitmap. The VT-d firmware
* writes the bitmap and reads SN atomically (5.2.3 in the
* spec), so it doesn't really have a memory barrier that
* pairs with this, but we cannot do that and we need one.
*/
smp_mb__after_atomic();
if (!bitmap_empty((unsigned long *)pi_desc->pir, NR_VECTORS))
pi_set_on(pi_desc);
} }
/* /*
......
...@@ -337,16 +337,16 @@ static inline int pi_test_and_set_pir(int vector, struct pi_desc *pi_desc) ...@@ -337,16 +337,16 @@ static inline int pi_test_and_set_pir(int vector, struct pi_desc *pi_desc)
return test_and_set_bit(vector, (unsigned long *)pi_desc->pir); return test_and_set_bit(vector, (unsigned long *)pi_desc->pir);
} }
static inline void pi_clear_sn(struct pi_desc *pi_desc) static inline void pi_set_sn(struct pi_desc *pi_desc)
{ {
return clear_bit(POSTED_INTR_SN, return set_bit(POSTED_INTR_SN,
(unsigned long *)&pi_desc->control); (unsigned long *)&pi_desc->control);
} }
static inline void pi_set_sn(struct pi_desc *pi_desc) static inline void pi_set_on(struct pi_desc *pi_desc)
{ {
return set_bit(POSTED_INTR_SN, set_bit(POSTED_INTR_ON,
(unsigned long *)&pi_desc->control); (unsigned long *)&pi_desc->control);
} }
static inline void pi_clear_on(struct pi_desc *pi_desc) static inline void pi_clear_on(struct pi_desc *pi_desc)
......
...@@ -7801,7 +7801,7 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu) ...@@ -7801,7 +7801,7 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
* 1) We should set ->mode before checking ->requests. Please see * 1) We should set ->mode before checking ->requests. Please see
* the comment in kvm_vcpu_exiting_guest_mode(). * the comment in kvm_vcpu_exiting_guest_mode().
* *
* 2) For APICv, we should set ->mode before checking PIR.ON. This * 2) For APICv, we should set ->mode before checking PID.ON. This
* pairs with the memory barrier implicit in pi_test_and_set_on * pairs with the memory barrier implicit in pi_test_and_set_on
* (see vmx_deliver_posted_interrupt). * (see vmx_deliver_posted_interrupt).
* *
......
...@@ -100,7 +100,7 @@ enum vgic_irq_config { ...@@ -100,7 +100,7 @@ enum vgic_irq_config {
}; };
struct vgic_irq { struct vgic_irq {
spinlock_t irq_lock; /* Protects the content of the struct */ raw_spinlock_t irq_lock; /* Protects the content of the struct */
struct list_head lpi_list; /* Used to link all LPIs together */ struct list_head lpi_list; /* Used to link all LPIs together */
struct list_head ap_list; struct list_head ap_list;
...@@ -256,7 +256,7 @@ struct vgic_dist { ...@@ -256,7 +256,7 @@ struct vgic_dist {
u64 propbaser; u64 propbaser;
/* Protects the lpi_list and the count value below. */ /* Protects the lpi_list and the count value below. */
spinlock_t lpi_list_lock; raw_spinlock_t lpi_list_lock;
struct list_head lpi_list_head; struct list_head lpi_list_head;
int lpi_list_count; int lpi_list_count;
...@@ -307,7 +307,7 @@ struct vgic_cpu { ...@@ -307,7 +307,7 @@ struct vgic_cpu {
unsigned int used_lrs; unsigned int used_lrs;
struct vgic_irq private_irqs[VGIC_NR_PRIVATE_IRQS]; struct vgic_irq private_irqs[VGIC_NR_PRIVATE_IRQS];
spinlock_t ap_list_lock; /* Protects the ap_list */ raw_spinlock_t ap_list_lock; /* Protects the ap_list */
/* /*
* List of IRQs that this VCPU should consider because they are either * List of IRQs that this VCPU should consider because they are either
......
...@@ -626,6 +626,13 @@ static void vcpu_req_sleep(struct kvm_vcpu *vcpu) ...@@ -626,6 +626,13 @@ static void vcpu_req_sleep(struct kvm_vcpu *vcpu)
/* Awaken to handle a signal, request we sleep again later. */ /* Awaken to handle a signal, request we sleep again later. */
kvm_make_request(KVM_REQ_SLEEP, vcpu); kvm_make_request(KVM_REQ_SLEEP, vcpu);
} }
/*
* Make sure we will observe a potential reset request if we've
* observed a change to the power state. Pairs with the smp_wmb() in
* kvm_psci_vcpu_on().
*/
smp_rmb();
} }
static int kvm_vcpu_initialized(struct kvm_vcpu *vcpu) static int kvm_vcpu_initialized(struct kvm_vcpu *vcpu)
...@@ -639,6 +646,9 @@ static void check_vcpu_requests(struct kvm_vcpu *vcpu) ...@@ -639,6 +646,9 @@ static void check_vcpu_requests(struct kvm_vcpu *vcpu)
if (kvm_check_request(KVM_REQ_SLEEP, vcpu)) if (kvm_check_request(KVM_REQ_SLEEP, vcpu))
vcpu_req_sleep(vcpu); vcpu_req_sleep(vcpu);
if (kvm_check_request(KVM_REQ_VCPU_RESET, vcpu))
kvm_reset_vcpu(vcpu);
/* /*
* Clear IRQ_PENDING requests that were made to guarantee * Clear IRQ_PENDING requests that were made to guarantee
* that a VCPU sees new virtual interrupts. * that a VCPU sees new virtual interrupts.
......
...@@ -1695,11 +1695,14 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, ...@@ -1695,11 +1695,14 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
vma_pagesize = vma_kernel_pagesize(vma); vma_pagesize = vma_kernel_pagesize(vma);
/* /*
* PUD level may not exist for a VM but PMD is guaranteed to * The stage2 has a minimum of 2 level table (For arm64 see
* exist. * kvm_arm_setup_stage2()). Hence, we are guaranteed that we can
* use PMD_SIZE huge mappings (even when the PMD is folded into PGD).
* As for PUD huge maps, we must make sure that we have at least
* 3 levels, i.e, PMD is not folded.
*/ */
if ((vma_pagesize == PMD_SIZE || if ((vma_pagesize == PMD_SIZE ||
(vma_pagesize == PUD_SIZE && kvm_stage2_has_pud(kvm))) && (vma_pagesize == PUD_SIZE && kvm_stage2_has_pmd(kvm))) &&
!force_pte) { !force_pte) {
gfn = (fault_ipa & huge_page_mask(hstate_vma(vma))) >> PAGE_SHIFT; gfn = (fault_ipa & huge_page_mask(hstate_vma(vma))) >> PAGE_SHIFT;
} }
......
...@@ -104,12 +104,10 @@ static void kvm_psci_vcpu_off(struct kvm_vcpu *vcpu) ...@@ -104,12 +104,10 @@ static void kvm_psci_vcpu_off(struct kvm_vcpu *vcpu)
static unsigned long kvm_psci_vcpu_on(struct kvm_vcpu *source_vcpu) static unsigned long kvm_psci_vcpu_on(struct kvm_vcpu *source_vcpu)
{ {
struct vcpu_reset_state *reset_state;
struct kvm *kvm = source_vcpu->kvm; struct kvm *kvm = source_vcpu->kvm;
struct kvm_vcpu *vcpu = NULL; struct kvm_vcpu *vcpu = NULL;
struct swait_queue_head *wq;
unsigned long cpu_id; unsigned long cpu_id;
unsigned long context_id;
phys_addr_t target_pc;
cpu_id = smccc_get_arg1(source_vcpu) & MPIDR_HWID_BITMASK; cpu_id = smccc_get_arg1(source_vcpu) & MPIDR_HWID_BITMASK;
if (vcpu_mode_is_32bit(source_vcpu)) if (vcpu_mode_is_32bit(source_vcpu))
...@@ -130,32 +128,30 @@ static unsigned long kvm_psci_vcpu_on(struct kvm_vcpu *source_vcpu) ...@@ -130,32 +128,30 @@ static unsigned long kvm_psci_vcpu_on(struct kvm_vcpu *source_vcpu)
return PSCI_RET_INVALID_PARAMS; return PSCI_RET_INVALID_PARAMS;
} }
target_pc = smccc_get_arg2(source_vcpu); reset_state = &vcpu->arch.reset_state;
context_id = smccc_get_arg3(source_vcpu);
kvm_reset_vcpu(vcpu); reset_state->pc = smccc_get_arg2(source_vcpu);
/* Gracefully handle Thumb2 entry point */
if (vcpu_mode_is_32bit(vcpu) && (target_pc & 1)) {
target_pc &= ~((phys_addr_t) 1);
vcpu_set_thumb(vcpu);
}
/* Propagate caller endianness */ /* Propagate caller endianness */
if (kvm_vcpu_is_be(source_vcpu)) reset_state->be = kvm_vcpu_is_be(source_vcpu);
kvm_vcpu_set_be(vcpu);
*vcpu_pc(vcpu) = target_pc;
/* /*
* NOTE: We always update r0 (or x0) because for PSCI v0.1 * NOTE: We always update r0 (or x0) because for PSCI v0.1
* the general puspose registers are undefined upon CPU_ON. * the general puspose registers are undefined upon CPU_ON.
*/ */
smccc_set_retval(vcpu, context_id, 0, 0, 0); reset_state->r0 = smccc_get_arg3(source_vcpu);
vcpu->arch.power_off = false;
smp_mb(); /* Make sure the above is visible */ WRITE_ONCE(reset_state->reset, true);
kvm_make_request(KVM_REQ_VCPU_RESET, vcpu);
wq = kvm_arch_vcpu_wq(vcpu); /*
swake_up_one(wq); * Make sure the reset request is observed if the change to
* power_state is observed.
*/
smp_wmb();
vcpu->arch.power_off = false;
kvm_vcpu_wake_up(vcpu);
return PSCI_RET_SUCCESS; return PSCI_RET_SUCCESS;
} }
......
...@@ -251,9 +251,9 @@ static int vgic_debug_show(struct seq_file *s, void *v) ...@@ -251,9 +251,9 @@ static int vgic_debug_show(struct seq_file *s, void *v)
return 0; return 0;
} }
spin_lock_irqsave(&irq->irq_lock, flags); raw_spin_lock_irqsave(&irq->irq_lock, flags);
print_irq_state(s, irq, vcpu); print_irq_state(s, irq, vcpu);
spin_unlock_irqrestore(&irq->irq_lock, flags); raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
vgic_put_irq(kvm, irq); vgic_put_irq(kvm, irq);
return 0; return 0;
......
...@@ -64,7 +64,7 @@ void kvm_vgic_early_init(struct kvm *kvm) ...@@ -64,7 +64,7 @@ void kvm_vgic_early_init(struct kvm *kvm)
struct vgic_dist *dist = &kvm->arch.vgic; struct vgic_dist *dist = &kvm->arch.vgic;
INIT_LIST_HEAD(&dist->lpi_list_head); INIT_LIST_HEAD(&dist->lpi_list_head);
spin_lock_init(&dist->lpi_list_lock); raw_spin_lock_init(&dist->lpi_list_lock);
} }
/* CREATION */ /* CREATION */
...@@ -171,7 +171,7 @@ static int kvm_vgic_dist_init(struct kvm *kvm, unsigned int nr_spis) ...@@ -171,7 +171,7 @@ static int kvm_vgic_dist_init(struct kvm *kvm, unsigned int nr_spis)
irq->intid = i + VGIC_NR_PRIVATE_IRQS; irq->intid = i + VGIC_NR_PRIVATE_IRQS;
INIT_LIST_HEAD(&irq->ap_list); INIT_LIST_HEAD(&irq->ap_list);
spin_lock_init(&irq->irq_lock); raw_spin_lock_init(&irq->irq_lock);
irq->vcpu = NULL; irq->vcpu = NULL;
irq->target_vcpu = vcpu0; irq->target_vcpu = vcpu0;
kref_init(&irq->refcount); kref_init(&irq->refcount);
...@@ -206,7 +206,7 @@ int kvm_vgic_vcpu_init(struct kvm_vcpu *vcpu) ...@@ -206,7 +206,7 @@ int kvm_vgic_vcpu_init(struct kvm_vcpu *vcpu)
vgic_cpu->sgi_iodev.base_addr = VGIC_ADDR_UNDEF; vgic_cpu->sgi_iodev.base_addr = VGIC_ADDR_UNDEF;
INIT_LIST_HEAD(&vgic_cpu->ap_list_head); INIT_LIST_HEAD(&vgic_cpu->ap_list_head);
spin_lock_init(&vgic_cpu->ap_list_lock); raw_spin_lock_init(&vgic_cpu->ap_list_lock);
/* /*
* Enable and configure all SGIs to be edge-triggered and * Enable and configure all SGIs to be edge-triggered and
...@@ -216,7 +216,7 @@ int kvm_vgic_vcpu_init(struct kvm_vcpu *vcpu) ...@@ -216,7 +216,7 @@ int kvm_vgic_vcpu_init(struct kvm_vcpu *vcpu)
struct vgic_irq *irq = &vgic_cpu->private_irqs[i]; struct vgic_irq *irq = &vgic_cpu->private_irqs[i];
INIT_LIST_HEAD(&irq->ap_list); INIT_LIST_HEAD(&irq->ap_list);
spin_lock_init(&irq->irq_lock); raw_spin_lock_init(&irq->irq_lock);
irq->intid = i; irq->intid = i;
irq->vcpu = NULL; irq->vcpu = NULL;
irq->target_vcpu = vcpu; irq->target_vcpu = vcpu;
...@@ -231,13 +231,6 @@ int kvm_vgic_vcpu_init(struct kvm_vcpu *vcpu) ...@@ -231,13 +231,6 @@ int kvm_vgic_vcpu_init(struct kvm_vcpu *vcpu)
irq->config = VGIC_CONFIG_LEVEL; irq->config = VGIC_CONFIG_LEVEL;
} }
/*
* GICv3 can only be created via the KVM_DEVICE_CREATE API and
* so we always know the emulation type at this point as it's
* either explicitly configured as GICv3, or explicitly
* configured as GICv2, or not configured yet which also
* implies GICv2.
*/
if (dist->vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3) if (dist->vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3)
irq->group = 1; irq->group = 1;
else else
...@@ -281,7 +274,7 @@ int vgic_init(struct kvm *kvm) ...@@ -281,7 +274,7 @@ int vgic_init(struct kvm *kvm)
{ {
struct vgic_dist *dist = &kvm->arch.vgic; struct vgic_dist *dist = &kvm->arch.vgic;
struct kvm_vcpu *vcpu; struct kvm_vcpu *vcpu;
int ret = 0, i; int ret = 0, i, idx;
if (vgic_initialized(kvm)) if (vgic_initialized(kvm))
return 0; return 0;
...@@ -298,6 +291,19 @@ int vgic_init(struct kvm *kvm) ...@@ -298,6 +291,19 @@ int vgic_init(struct kvm *kvm)
if (ret) if (ret)
goto out; goto out;
/* Initialize groups on CPUs created before the VGIC type was known */
kvm_for_each_vcpu(idx, vcpu, kvm) {
struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
for (i = 0; i < VGIC_NR_PRIVATE_IRQS; i++) {
struct vgic_irq *irq = &vgic_cpu->private_irqs[i];
if (dist->vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3)
irq->group = 1;
else
irq->group = 0;
}
}
if (vgic_has_its(kvm)) { if (vgic_has_its(kvm)) {
ret = vgic_v4_init(kvm); ret = vgic_v4_init(kvm);
if (ret) if (ret)
......
...@@ -65,7 +65,7 @@ static struct vgic_irq *vgic_add_lpi(struct kvm *kvm, u32 intid, ...@@ -65,7 +65,7 @@ static struct vgic_irq *vgic_add_lpi(struct kvm *kvm, u32 intid,
INIT_LIST_HEAD(&irq->lpi_list); INIT_LIST_HEAD(&irq->lpi_list);
INIT_LIST_HEAD(&irq->ap_list); INIT_LIST_HEAD(&irq->ap_list);
spin_lock_init(&irq->irq_lock); raw_spin_lock_init(&irq->irq_lock);
irq->config = VGIC_CONFIG_EDGE; irq->config = VGIC_CONFIG_EDGE;
kref_init(&irq->refcount); kref_init(&irq->refcount);
...@@ -73,7 +73,7 @@ static struct vgic_irq *vgic_add_lpi(struct kvm *kvm, u32 intid, ...@@ -73,7 +73,7 @@ static struct vgic_irq *vgic_add_lpi(struct kvm *kvm, u32 intid,
irq->target_vcpu = vcpu; irq->target_vcpu = vcpu;
irq->group = 1; irq->group = 1;
spin_lock_irqsave(&dist->lpi_list_lock, flags); raw_spin_lock_irqsave(&dist->lpi_list_lock, flags);
/* /*
* There could be a race with another vgic_add_lpi(), so we need to * There could be a race with another vgic_add_lpi(), so we need to
...@@ -101,7 +101,7 @@ static struct vgic_irq *vgic_add_lpi(struct kvm *kvm, u32 intid, ...@@ -101,7 +101,7 @@ static struct vgic_irq *vgic_add_lpi(struct kvm *kvm, u32 intid,
dist->lpi_list_count++; dist->lpi_list_count++;
out_unlock: out_unlock:
spin_unlock_irqrestore(&dist->lpi_list_lock, flags); raw_spin_unlock_irqrestore(&dist->lpi_list_lock, flags);
/* /*
* We "cache" the configuration table entries in our struct vgic_irq's. * We "cache" the configuration table entries in our struct vgic_irq's.
...@@ -287,7 +287,7 @@ static int update_lpi_config(struct kvm *kvm, struct vgic_irq *irq, ...@@ -287,7 +287,7 @@ static int update_lpi_config(struct kvm *kvm, struct vgic_irq *irq,
if (ret) if (ret)
return ret; return ret;
spin_lock_irqsave(&irq->irq_lock, flags); raw_spin_lock_irqsave(&irq->irq_lock, flags);
if (!filter_vcpu || filter_vcpu == irq->target_vcpu) { if (!filter_vcpu || filter_vcpu == irq->target_vcpu) {
irq->priority = LPI_PROP_PRIORITY(prop); irq->priority = LPI_PROP_PRIORITY(prop);
...@@ -299,7 +299,7 @@ static int update_lpi_config(struct kvm *kvm, struct vgic_irq *irq, ...@@ -299,7 +299,7 @@ static int update_lpi_config(struct kvm *kvm, struct vgic_irq *irq,
} }
} }
spin_unlock_irqrestore(&irq->irq_lock, flags); raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
if (irq->hw) if (irq->hw)
return its_prop_update_vlpi(irq->host_irq, prop, needs_inv); return its_prop_update_vlpi(irq->host_irq, prop, needs_inv);
...@@ -332,7 +332,7 @@ int vgic_copy_lpi_list(struct kvm *kvm, struct kvm_vcpu *vcpu, u32 **intid_ptr) ...@@ -332,7 +332,7 @@ int vgic_copy_lpi_list(struct kvm *kvm, struct kvm_vcpu *vcpu, u32 **intid_ptr)
if (!intids) if (!intids)
return -ENOMEM; return -ENOMEM;
spin_lock_irqsave(&dist->lpi_list_lock, flags); raw_spin_lock_irqsave(&dist->lpi_list_lock, flags);
list_for_each_entry(irq, &dist->lpi_list_head, lpi_list) { list_for_each_entry(irq, &dist->lpi_list_head, lpi_list) {
if (i == irq_count) if (i == irq_count)
break; break;
...@@ -341,7 +341,7 @@ int vgic_copy_lpi_list(struct kvm *kvm, struct kvm_vcpu *vcpu, u32 **intid_ptr) ...@@ -341,7 +341,7 @@ int vgic_copy_lpi_list(struct kvm *kvm, struct kvm_vcpu *vcpu, u32 **intid_ptr)
continue; continue;
intids[i++] = irq->intid; intids[i++] = irq->intid;
} }
spin_unlock_irqrestore(&dist->lpi_list_lock, flags); raw_spin_unlock_irqrestore(&dist->lpi_list_lock, flags);
*intid_ptr = intids; *intid_ptr = intids;
return i; return i;
...@@ -352,9 +352,9 @@ static int update_affinity(struct vgic_irq *irq, struct kvm_vcpu *vcpu) ...@@ -352,9 +352,9 @@ static int update_affinity(struct vgic_irq *irq, struct kvm_vcpu *vcpu)
int ret = 0; int ret = 0;
unsigned long flags; unsigned long flags;
spin_lock_irqsave(&irq->irq_lock, flags); raw_spin_lock_irqsave(&irq->irq_lock, flags);
irq->target_vcpu = vcpu; irq->target_vcpu = vcpu;
spin_unlock_irqrestore(&irq->irq_lock, flags); raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
if (irq->hw) { if (irq->hw) {
struct its_vlpi_map map; struct its_vlpi_map map;
...@@ -455,7 +455,7 @@ static int its_sync_lpi_pending_table(struct kvm_vcpu *vcpu) ...@@ -455,7 +455,7 @@ static int its_sync_lpi_pending_table(struct kvm_vcpu *vcpu)
} }
irq = vgic_get_irq(vcpu->kvm, NULL, intids[i]); irq = vgic_get_irq(vcpu->kvm, NULL, intids[i]);
spin_lock_irqsave(&irq->irq_lock, flags); raw_spin_lock_irqsave(&irq->irq_lock, flags);
irq->pending_latch = pendmask & (1U << bit_nr); irq->pending_latch = pendmask & (1U << bit_nr);
vgic_queue_irq_unlock(vcpu->kvm, irq, flags); vgic_queue_irq_unlock(vcpu->kvm, irq, flags);
vgic_put_irq(vcpu->kvm, irq); vgic_put_irq(vcpu->kvm, irq);
...@@ -612,7 +612,7 @@ static int vgic_its_trigger_msi(struct kvm *kvm, struct vgic_its *its, ...@@ -612,7 +612,7 @@ static int vgic_its_trigger_msi(struct kvm *kvm, struct vgic_its *its,
return irq_set_irqchip_state(irq->host_irq, return irq_set_irqchip_state(irq->host_irq,
IRQCHIP_STATE_PENDING, true); IRQCHIP_STATE_PENDING, true);
spin_lock_irqsave(&irq->irq_lock, flags); raw_spin_lock_irqsave(&irq->irq_lock, flags);
irq->pending_latch = true; irq->pending_latch = true;
vgic_queue_irq_unlock(kvm, irq, flags); vgic_queue_irq_unlock(kvm, irq, flags);
......
...@@ -147,7 +147,7 @@ static void vgic_mmio_write_sgir(struct kvm_vcpu *source_vcpu, ...@@ -147,7 +147,7 @@ static void vgic_mmio_write_sgir(struct kvm_vcpu *source_vcpu,
irq = vgic_get_irq(source_vcpu->kvm, vcpu, intid); irq = vgic_get_irq(source_vcpu->kvm, vcpu, intid);
spin_lock_irqsave(&irq->irq_lock, flags); raw_spin_lock_irqsave(&irq->irq_lock, flags);
irq->pending_latch = true; irq->pending_latch = true;
irq->source |= 1U << source_vcpu->vcpu_id; irq->source |= 1U << source_vcpu->vcpu_id;
...@@ -191,13 +191,13 @@ static void vgic_mmio_write_target(struct kvm_vcpu *vcpu, ...@@ -191,13 +191,13 @@ static void vgic_mmio_write_target(struct kvm_vcpu *vcpu,
struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, NULL, intid + i); struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, NULL, intid + i);
int target; int target;
spin_lock_irqsave(&irq->irq_lock, flags); raw_spin_lock_irqsave(&irq->irq_lock, flags);
irq->targets = (val >> (i * 8)) & cpu_mask; irq->targets = (val >> (i * 8)) & cpu_mask;
target = irq->targets ? __ffs(irq->targets) : 0; target = irq->targets ? __ffs(irq->targets) : 0;
irq->target_vcpu = kvm_get_vcpu(vcpu->kvm, target); irq->target_vcpu = kvm_get_vcpu(vcpu->kvm, target);
spin_unlock_irqrestore(&irq->irq_lock, flags); raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
vgic_put_irq(vcpu->kvm, irq); vgic_put_irq(vcpu->kvm, irq);
} }
} }
...@@ -230,13 +230,13 @@ static void vgic_mmio_write_sgipendc(struct kvm_vcpu *vcpu, ...@@ -230,13 +230,13 @@ static void vgic_mmio_write_sgipendc(struct kvm_vcpu *vcpu,
for (i = 0; i < len; i++) { for (i = 0; i < len; i++) {
struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
spin_lock_irqsave(&irq->irq_lock, flags); raw_spin_lock_irqsave(&irq->irq_lock, flags);
irq->source &= ~((val >> (i * 8)) & 0xff); irq->source &= ~((val >> (i * 8)) & 0xff);
if (!irq->source) if (!irq->source)
irq->pending_latch = false; irq->pending_latch = false;
spin_unlock_irqrestore(&irq->irq_lock, flags); raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
vgic_put_irq(vcpu->kvm, irq); vgic_put_irq(vcpu->kvm, irq);
} }
} }
...@@ -252,7 +252,7 @@ static void vgic_mmio_write_sgipends(struct kvm_vcpu *vcpu, ...@@ -252,7 +252,7 @@ static void vgic_mmio_write_sgipends(struct kvm_vcpu *vcpu,
for (i = 0; i < len; i++) { for (i = 0; i < len; i++) {
struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
spin_lock_irqsave(&irq->irq_lock, flags); raw_spin_lock_irqsave(&irq->irq_lock, flags);
irq->source |= (val >> (i * 8)) & 0xff; irq->source |= (val >> (i * 8)) & 0xff;
...@@ -260,7 +260,7 @@ static void vgic_mmio_write_sgipends(struct kvm_vcpu *vcpu, ...@@ -260,7 +260,7 @@ static void vgic_mmio_write_sgipends(struct kvm_vcpu *vcpu,
irq->pending_latch = true; irq->pending_latch = true;
vgic_queue_irq_unlock(vcpu->kvm, irq, flags); vgic_queue_irq_unlock(vcpu->kvm, irq, flags);
} else { } else {
spin_unlock_irqrestore(&irq->irq_lock, flags); raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
} }
vgic_put_irq(vcpu->kvm, irq); vgic_put_irq(vcpu->kvm, irq);
} }
......
...@@ -169,13 +169,13 @@ static void vgic_mmio_write_irouter(struct kvm_vcpu *vcpu, ...@@ -169,13 +169,13 @@ static void vgic_mmio_write_irouter(struct kvm_vcpu *vcpu,
if (!irq) if (!irq)
return; return;
spin_lock_irqsave(&irq->irq_lock, flags); raw_spin_lock_irqsave(&irq->irq_lock, flags);
/* We only care about and preserve Aff0, Aff1 and Aff2. */ /* We only care about and preserve Aff0, Aff1 and Aff2. */
irq->mpidr = val & GENMASK(23, 0); irq->mpidr = val & GENMASK(23, 0);
irq->target_vcpu = kvm_mpidr_to_vcpu(vcpu->kvm, irq->mpidr); irq->target_vcpu = kvm_mpidr_to_vcpu(vcpu->kvm, irq->mpidr);
spin_unlock_irqrestore(&irq->irq_lock, flags); raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
vgic_put_irq(vcpu->kvm, irq); vgic_put_irq(vcpu->kvm, irq);
} }
...@@ -281,7 +281,7 @@ static int vgic_v3_uaccess_write_pending(struct kvm_vcpu *vcpu, ...@@ -281,7 +281,7 @@ static int vgic_v3_uaccess_write_pending(struct kvm_vcpu *vcpu,
for (i = 0; i < len * 8; i++) { for (i = 0; i < len * 8; i++) {
struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
spin_lock_irqsave(&irq->irq_lock, flags); raw_spin_lock_irqsave(&irq->irq_lock, flags);
if (test_bit(i, &val)) { if (test_bit(i, &val)) {
/* /*
* pending_latch is set irrespective of irq type * pending_latch is set irrespective of irq type
...@@ -292,7 +292,7 @@ static int vgic_v3_uaccess_write_pending(struct kvm_vcpu *vcpu, ...@@ -292,7 +292,7 @@ static int vgic_v3_uaccess_write_pending(struct kvm_vcpu *vcpu,
vgic_queue_irq_unlock(vcpu->kvm, irq, flags); vgic_queue_irq_unlock(vcpu->kvm, irq, flags);
} else { } else {
irq->pending_latch = false; irq->pending_latch = false;
spin_unlock_irqrestore(&irq->irq_lock, flags); raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
} }
vgic_put_irq(vcpu->kvm, irq); vgic_put_irq(vcpu->kvm, irq);
...@@ -957,7 +957,7 @@ void vgic_v3_dispatch_sgi(struct kvm_vcpu *vcpu, u64 reg, bool allow_group1) ...@@ -957,7 +957,7 @@ void vgic_v3_dispatch_sgi(struct kvm_vcpu *vcpu, u64 reg, bool allow_group1)
irq = vgic_get_irq(vcpu->kvm, c_vcpu, sgi); irq = vgic_get_irq(vcpu->kvm, c_vcpu, sgi);
spin_lock_irqsave(&irq->irq_lock, flags); raw_spin_lock_irqsave(&irq->irq_lock, flags);
/* /*
* An access targetting Group0 SGIs can only generate * An access targetting Group0 SGIs can only generate
...@@ -968,7 +968,7 @@ void vgic_v3_dispatch_sgi(struct kvm_vcpu *vcpu, u64 reg, bool allow_group1) ...@@ -968,7 +968,7 @@ void vgic_v3_dispatch_sgi(struct kvm_vcpu *vcpu, u64 reg, bool allow_group1)
irq->pending_latch = true; irq->pending_latch = true;
vgic_queue_irq_unlock(vcpu->kvm, irq, flags); vgic_queue_irq_unlock(vcpu->kvm, irq, flags);
} else { } else {
spin_unlock_irqrestore(&irq->irq_lock, flags); raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
} }
vgic_put_irq(vcpu->kvm, irq); vgic_put_irq(vcpu->kvm, irq);
......
...@@ -77,7 +77,7 @@ void vgic_mmio_write_group(struct kvm_vcpu *vcpu, gpa_t addr, ...@@ -77,7 +77,7 @@ void vgic_mmio_write_group(struct kvm_vcpu *vcpu, gpa_t addr,
for (i = 0; i < len * 8; i++) { for (i = 0; i < len * 8; i++) {
struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
spin_lock_irqsave(&irq->irq_lock, flags); raw_spin_lock_irqsave(&irq->irq_lock, flags);
irq->group = !!(val & BIT(i)); irq->group = !!(val & BIT(i));
vgic_queue_irq_unlock(vcpu->kvm, irq, flags); vgic_queue_irq_unlock(vcpu->kvm, irq, flags);
...@@ -120,7 +120,7 @@ void vgic_mmio_write_senable(struct kvm_vcpu *vcpu, ...@@ -120,7 +120,7 @@ void vgic_mmio_write_senable(struct kvm_vcpu *vcpu,
for_each_set_bit(i, &val, len * 8) { for_each_set_bit(i, &val, len * 8) {
struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
spin_lock_irqsave(&irq->irq_lock, flags); raw_spin_lock_irqsave(&irq->irq_lock, flags);
irq->enabled = true; irq->enabled = true;
vgic_queue_irq_unlock(vcpu->kvm, irq, flags); vgic_queue_irq_unlock(vcpu->kvm, irq, flags);
...@@ -139,11 +139,11 @@ void vgic_mmio_write_cenable(struct kvm_vcpu *vcpu, ...@@ -139,11 +139,11 @@ void vgic_mmio_write_cenable(struct kvm_vcpu *vcpu,
for_each_set_bit(i, &val, len * 8) { for_each_set_bit(i, &val, len * 8) {
struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
spin_lock_irqsave(&irq->irq_lock, flags); raw_spin_lock_irqsave(&irq->irq_lock, flags);
irq->enabled = false; irq->enabled = false;
spin_unlock_irqrestore(&irq->irq_lock, flags); raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
vgic_put_irq(vcpu->kvm, irq); vgic_put_irq(vcpu->kvm, irq);
} }
} }
...@@ -160,10 +160,10 @@ unsigned long vgic_mmio_read_pending(struct kvm_vcpu *vcpu, ...@@ -160,10 +160,10 @@ unsigned long vgic_mmio_read_pending(struct kvm_vcpu *vcpu,
struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
unsigned long flags; unsigned long flags;
spin_lock_irqsave(&irq->irq_lock, flags); raw_spin_lock_irqsave(&irq->irq_lock, flags);
if (irq_is_pending(irq)) if (irq_is_pending(irq))
value |= (1U << i); value |= (1U << i);
spin_unlock_irqrestore(&irq->irq_lock, flags); raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
vgic_put_irq(vcpu->kvm, irq); vgic_put_irq(vcpu->kvm, irq);
} }
...@@ -215,7 +215,7 @@ void vgic_mmio_write_spending(struct kvm_vcpu *vcpu, ...@@ -215,7 +215,7 @@ void vgic_mmio_write_spending(struct kvm_vcpu *vcpu,
for_each_set_bit(i, &val, len * 8) { for_each_set_bit(i, &val, len * 8) {
struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
spin_lock_irqsave(&irq->irq_lock, flags); raw_spin_lock_irqsave(&irq->irq_lock, flags);
if (irq->hw) if (irq->hw)
vgic_hw_irq_spending(vcpu, irq, is_uaccess); vgic_hw_irq_spending(vcpu, irq, is_uaccess);
else else
...@@ -262,14 +262,14 @@ void vgic_mmio_write_cpending(struct kvm_vcpu *vcpu, ...@@ -262,14 +262,14 @@ void vgic_mmio_write_cpending(struct kvm_vcpu *vcpu,
for_each_set_bit(i, &val, len * 8) { for_each_set_bit(i, &val, len * 8) {
struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
spin_lock_irqsave(&irq->irq_lock, flags); raw_spin_lock_irqsave(&irq->irq_lock, flags);
if (irq->hw) if (irq->hw)
vgic_hw_irq_cpending(vcpu, irq, is_uaccess); vgic_hw_irq_cpending(vcpu, irq, is_uaccess);
else else
irq->pending_latch = false; irq->pending_latch = false;
spin_unlock_irqrestore(&irq->irq_lock, flags); raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
vgic_put_irq(vcpu->kvm, irq); vgic_put_irq(vcpu->kvm, irq);
} }
} }
...@@ -311,7 +311,7 @@ static void vgic_mmio_change_active(struct kvm_vcpu *vcpu, struct vgic_irq *irq, ...@@ -311,7 +311,7 @@ static void vgic_mmio_change_active(struct kvm_vcpu *vcpu, struct vgic_irq *irq,
unsigned long flags; unsigned long flags;
struct kvm_vcpu *requester_vcpu = vgic_get_mmio_requester_vcpu(); struct kvm_vcpu *requester_vcpu = vgic_get_mmio_requester_vcpu();
spin_lock_irqsave(&irq->irq_lock, flags); raw_spin_lock_irqsave(&irq->irq_lock, flags);
if (irq->hw) { if (irq->hw) {
vgic_hw_irq_change_active(vcpu, irq, active, !requester_vcpu); vgic_hw_irq_change_active(vcpu, irq, active, !requester_vcpu);
...@@ -342,7 +342,7 @@ static void vgic_mmio_change_active(struct kvm_vcpu *vcpu, struct vgic_irq *irq, ...@@ -342,7 +342,7 @@ static void vgic_mmio_change_active(struct kvm_vcpu *vcpu, struct vgic_irq *irq,
if (irq->active) if (irq->active)
vgic_queue_irq_unlock(vcpu->kvm, irq, flags); vgic_queue_irq_unlock(vcpu->kvm, irq, flags);
else else
spin_unlock_irqrestore(&irq->irq_lock, flags); raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
} }
/* /*
...@@ -485,10 +485,10 @@ void vgic_mmio_write_priority(struct kvm_vcpu *vcpu, ...@@ -485,10 +485,10 @@ void vgic_mmio_write_priority(struct kvm_vcpu *vcpu,
for (i = 0; i < len; i++) { for (i = 0; i < len; i++) {
struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
spin_lock_irqsave(&irq->irq_lock, flags); raw_spin_lock_irqsave(&irq->irq_lock, flags);
/* Narrow the priority range to what we actually support */ /* Narrow the priority range to what we actually support */
irq->priority = (val >> (i * 8)) & GENMASK(7, 8 - VGIC_PRI_BITS); irq->priority = (val >> (i * 8)) & GENMASK(7, 8 - VGIC_PRI_BITS);
spin_unlock_irqrestore(&irq->irq_lock, flags); raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
vgic_put_irq(vcpu->kvm, irq); vgic_put_irq(vcpu->kvm, irq);
} }
...@@ -534,14 +534,14 @@ void vgic_mmio_write_config(struct kvm_vcpu *vcpu, ...@@ -534,14 +534,14 @@ void vgic_mmio_write_config(struct kvm_vcpu *vcpu,
continue; continue;
irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
spin_lock_irqsave(&irq->irq_lock, flags); raw_spin_lock_irqsave(&irq->irq_lock, flags);
if (test_bit(i * 2 + 1, &val)) if (test_bit(i * 2 + 1, &val))
irq->config = VGIC_CONFIG_EDGE; irq->config = VGIC_CONFIG_EDGE;
else else
irq->config = VGIC_CONFIG_LEVEL; irq->config = VGIC_CONFIG_LEVEL;
spin_unlock_irqrestore(&irq->irq_lock, flags); raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
vgic_put_irq(vcpu->kvm, irq); vgic_put_irq(vcpu->kvm, irq);
} }
} }
...@@ -590,12 +590,12 @@ void vgic_write_irq_line_level_info(struct kvm_vcpu *vcpu, u32 intid, ...@@ -590,12 +590,12 @@ void vgic_write_irq_line_level_info(struct kvm_vcpu *vcpu, u32 intid,
* restore irq config before line level. * restore irq config before line level.
*/ */
new_level = !!(val & (1U << i)); new_level = !!(val & (1U << i));
spin_lock_irqsave(&irq->irq_lock, flags); raw_spin_lock_irqsave(&irq->irq_lock, flags);
irq->line_level = new_level; irq->line_level = new_level;
if (new_level) if (new_level)
vgic_queue_irq_unlock(vcpu->kvm, irq, flags); vgic_queue_irq_unlock(vcpu->kvm, irq, flags);
else else
spin_unlock_irqrestore(&irq->irq_lock, flags); raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
vgic_put_irq(vcpu->kvm, irq); vgic_put_irq(vcpu->kvm, irq);
} }
......
...@@ -84,7 +84,7 @@ void vgic_v2_fold_lr_state(struct kvm_vcpu *vcpu) ...@@ -84,7 +84,7 @@ void vgic_v2_fold_lr_state(struct kvm_vcpu *vcpu)
irq = vgic_get_irq(vcpu->kvm, vcpu, intid); irq = vgic_get_irq(vcpu->kvm, vcpu, intid);
spin_lock(&irq->irq_lock); raw_spin_lock(&irq->irq_lock);
/* Always preserve the active bit */ /* Always preserve the active bit */
irq->active = !!(val & GICH_LR_ACTIVE_BIT); irq->active = !!(val & GICH_LR_ACTIVE_BIT);
...@@ -127,7 +127,7 @@ void vgic_v2_fold_lr_state(struct kvm_vcpu *vcpu) ...@@ -127,7 +127,7 @@ void vgic_v2_fold_lr_state(struct kvm_vcpu *vcpu)
vgic_irq_set_phys_active(irq, false); vgic_irq_set_phys_active(irq, false);
} }
spin_unlock(&irq->irq_lock); raw_spin_unlock(&irq->irq_lock);
vgic_put_irq(vcpu->kvm, irq); vgic_put_irq(vcpu->kvm, irq);
} }
......
...@@ -76,7 +76,7 @@ void vgic_v3_fold_lr_state(struct kvm_vcpu *vcpu) ...@@ -76,7 +76,7 @@ void vgic_v3_fold_lr_state(struct kvm_vcpu *vcpu)
if (!irq) /* An LPI could have been unmapped. */ if (!irq) /* An LPI could have been unmapped. */
continue; continue;
spin_lock(&irq->irq_lock); raw_spin_lock(&irq->irq_lock);
/* Always preserve the active bit */ /* Always preserve the active bit */
irq->active = !!(val & ICH_LR_ACTIVE_BIT); irq->active = !!(val & ICH_LR_ACTIVE_BIT);
...@@ -119,7 +119,7 @@ void vgic_v3_fold_lr_state(struct kvm_vcpu *vcpu) ...@@ -119,7 +119,7 @@ void vgic_v3_fold_lr_state(struct kvm_vcpu *vcpu)
vgic_irq_set_phys_active(irq, false); vgic_irq_set_phys_active(irq, false);
} }
spin_unlock(&irq->irq_lock); raw_spin_unlock(&irq->irq_lock);
vgic_put_irq(vcpu->kvm, irq); vgic_put_irq(vcpu->kvm, irq);
} }
...@@ -347,9 +347,9 @@ int vgic_v3_lpi_sync_pending_status(struct kvm *kvm, struct vgic_irq *irq) ...@@ -347,9 +347,9 @@ int vgic_v3_lpi_sync_pending_status(struct kvm *kvm, struct vgic_irq *irq)
status = val & (1 << bit_nr); status = val & (1 << bit_nr);
spin_lock_irqsave(&irq->irq_lock, flags); raw_spin_lock_irqsave(&irq->irq_lock, flags);
if (irq->target_vcpu != vcpu) { if (irq->target_vcpu != vcpu) {
spin_unlock_irqrestore(&irq->irq_lock, flags); raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
goto retry; goto retry;
} }
irq->pending_latch = status; irq->pending_latch = status;
......
...@@ -54,11 +54,11 @@ struct vgic_global kvm_vgic_global_state __ro_after_init = { ...@@ -54,11 +54,11 @@ struct vgic_global kvm_vgic_global_state __ro_after_init = {
* When taking more than one ap_list_lock at the same time, always take the * When taking more than one ap_list_lock at the same time, always take the
* lowest numbered VCPU's ap_list_lock first, so: * lowest numbered VCPU's ap_list_lock first, so:
* vcpuX->vcpu_id < vcpuY->vcpu_id: * vcpuX->vcpu_id < vcpuY->vcpu_id:
* spin_lock(vcpuX->arch.vgic_cpu.ap_list_lock); * raw_spin_lock(vcpuX->arch.vgic_cpu.ap_list_lock);
* spin_lock(vcpuY->arch.vgic_cpu.ap_list_lock); * raw_spin_lock(vcpuY->arch.vgic_cpu.ap_list_lock);
* *
* Since the VGIC must support injecting virtual interrupts from ISRs, we have * Since the VGIC must support injecting virtual interrupts from ISRs, we have
* to use the spin_lock_irqsave/spin_unlock_irqrestore versions of outer * to use the raw_spin_lock_irqsave/raw_spin_unlock_irqrestore versions of outer
* spinlocks for any lock that may be taken while injecting an interrupt. * spinlocks for any lock that may be taken while injecting an interrupt.
*/ */
...@@ -72,7 +72,7 @@ static struct vgic_irq *vgic_get_lpi(struct kvm *kvm, u32 intid) ...@@ -72,7 +72,7 @@ static struct vgic_irq *vgic_get_lpi(struct kvm *kvm, u32 intid)
struct vgic_irq *irq = NULL; struct vgic_irq *irq = NULL;
unsigned long flags; unsigned long flags;
spin_lock_irqsave(&dist->lpi_list_lock, flags); raw_spin_lock_irqsave(&dist->lpi_list_lock, flags);
list_for_each_entry(irq, &dist->lpi_list_head, lpi_list) { list_for_each_entry(irq, &dist->lpi_list_head, lpi_list) {
if (irq->intid != intid) if (irq->intid != intid)
...@@ -88,7 +88,7 @@ static struct vgic_irq *vgic_get_lpi(struct kvm *kvm, u32 intid) ...@@ -88,7 +88,7 @@ static struct vgic_irq *vgic_get_lpi(struct kvm *kvm, u32 intid)
irq = NULL; irq = NULL;
out_unlock: out_unlock:
spin_unlock_irqrestore(&dist->lpi_list_lock, flags); raw_spin_unlock_irqrestore(&dist->lpi_list_lock, flags);
return irq; return irq;
} }
...@@ -138,15 +138,15 @@ void vgic_put_irq(struct kvm *kvm, struct vgic_irq *irq) ...@@ -138,15 +138,15 @@ void vgic_put_irq(struct kvm *kvm, struct vgic_irq *irq)
if (irq->intid < VGIC_MIN_LPI) if (irq->intid < VGIC_MIN_LPI)
return; return;
spin_lock_irqsave(&dist->lpi_list_lock, flags); raw_spin_lock_irqsave(&dist->lpi_list_lock, flags);
if (!kref_put(&irq->refcount, vgic_irq_release)) { if (!kref_put(&irq->refcount, vgic_irq_release)) {
spin_unlock_irqrestore(&dist->lpi_list_lock, flags); raw_spin_unlock_irqrestore(&dist->lpi_list_lock, flags);
return; return;
}; };
list_del(&irq->lpi_list); list_del(&irq->lpi_list);
dist->lpi_list_count--; dist->lpi_list_count--;
spin_unlock_irqrestore(&dist->lpi_list_lock, flags); raw_spin_unlock_irqrestore(&dist->lpi_list_lock, flags);
kfree(irq); kfree(irq);
} }
...@@ -244,8 +244,8 @@ static int vgic_irq_cmp(void *priv, struct list_head *a, struct list_head *b) ...@@ -244,8 +244,8 @@ static int vgic_irq_cmp(void *priv, struct list_head *a, struct list_head *b)
bool penda, pendb; bool penda, pendb;
int ret; int ret;
spin_lock(&irqa->irq_lock); raw_spin_lock(&irqa->irq_lock);
spin_lock_nested(&irqb->irq_lock, SINGLE_DEPTH_NESTING); raw_spin_lock_nested(&irqb->irq_lock, SINGLE_DEPTH_NESTING);
if (irqa->active || irqb->active) { if (irqa->active || irqb->active) {
ret = (int)irqb->active - (int)irqa->active; ret = (int)irqb->active - (int)irqa->active;
...@@ -263,8 +263,8 @@ static int vgic_irq_cmp(void *priv, struct list_head *a, struct list_head *b) ...@@ -263,8 +263,8 @@ static int vgic_irq_cmp(void *priv, struct list_head *a, struct list_head *b)
/* Both pending and enabled, sort by priority */ /* Both pending and enabled, sort by priority */
ret = irqa->priority - irqb->priority; ret = irqa->priority - irqb->priority;
out: out:
spin_unlock(&irqb->irq_lock); raw_spin_unlock(&irqb->irq_lock);
spin_unlock(&irqa->irq_lock); raw_spin_unlock(&irqa->irq_lock);
return ret; return ret;
} }
...@@ -325,7 +325,7 @@ bool vgic_queue_irq_unlock(struct kvm *kvm, struct vgic_irq *irq, ...@@ -325,7 +325,7 @@ bool vgic_queue_irq_unlock(struct kvm *kvm, struct vgic_irq *irq,
* not need to be inserted into an ap_list and there is also * not need to be inserted into an ap_list and there is also
* no more work for us to do. * no more work for us to do.
*/ */
spin_unlock_irqrestore(&irq->irq_lock, flags); raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
/* /*
* We have to kick the VCPU here, because we could be * We have to kick the VCPU here, because we could be
...@@ -347,12 +347,12 @@ bool vgic_queue_irq_unlock(struct kvm *kvm, struct vgic_irq *irq, ...@@ -347,12 +347,12 @@ bool vgic_queue_irq_unlock(struct kvm *kvm, struct vgic_irq *irq,
* We must unlock the irq lock to take the ap_list_lock where * We must unlock the irq lock to take the ap_list_lock where
* we are going to insert this new pending interrupt. * we are going to insert this new pending interrupt.
*/ */
spin_unlock_irqrestore(&irq->irq_lock, flags); raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
/* someone can do stuff here, which we re-check below */ /* someone can do stuff here, which we re-check below */
spin_lock_irqsave(&vcpu->arch.vgic_cpu.ap_list_lock, flags); raw_spin_lock_irqsave(&vcpu->arch.vgic_cpu.ap_list_lock, flags);
spin_lock(&irq->irq_lock); raw_spin_lock(&irq->irq_lock);
/* /*
* Did something change behind our backs? * Did something change behind our backs?
...@@ -367,10 +367,11 @@ bool vgic_queue_irq_unlock(struct kvm *kvm, struct vgic_irq *irq, ...@@ -367,10 +367,11 @@ bool vgic_queue_irq_unlock(struct kvm *kvm, struct vgic_irq *irq,
*/ */
if (unlikely(irq->vcpu || vcpu != vgic_target_oracle(irq))) { if (unlikely(irq->vcpu || vcpu != vgic_target_oracle(irq))) {
spin_unlock(&irq->irq_lock); raw_spin_unlock(&irq->irq_lock);
spin_unlock_irqrestore(&vcpu->arch.vgic_cpu.ap_list_lock, flags); raw_spin_unlock_irqrestore(&vcpu->arch.vgic_cpu.ap_list_lock,
flags);
spin_lock_irqsave(&irq->irq_lock, flags); raw_spin_lock_irqsave(&irq->irq_lock, flags);
goto retry; goto retry;
} }
...@@ -382,8 +383,8 @@ bool vgic_queue_irq_unlock(struct kvm *kvm, struct vgic_irq *irq, ...@@ -382,8 +383,8 @@ bool vgic_queue_irq_unlock(struct kvm *kvm, struct vgic_irq *irq,
list_add_tail(&irq->ap_list, &vcpu->arch.vgic_cpu.ap_list_head); list_add_tail(&irq->ap_list, &vcpu->arch.vgic_cpu.ap_list_head);
irq->vcpu = vcpu; irq->vcpu = vcpu;
spin_unlock(&irq->irq_lock); raw_spin_unlock(&irq->irq_lock);
spin_unlock_irqrestore(&vcpu->arch.vgic_cpu.ap_list_lock, flags); raw_spin_unlock_irqrestore(&vcpu->arch.vgic_cpu.ap_list_lock, flags);
kvm_make_request(KVM_REQ_IRQ_PENDING, vcpu); kvm_make_request(KVM_REQ_IRQ_PENDING, vcpu);
kvm_vcpu_kick(vcpu); kvm_vcpu_kick(vcpu);
...@@ -430,11 +431,11 @@ int kvm_vgic_inject_irq(struct kvm *kvm, int cpuid, unsigned int intid, ...@@ -430,11 +431,11 @@ int kvm_vgic_inject_irq(struct kvm *kvm, int cpuid, unsigned int intid,
if (!irq) if (!irq)
return -EINVAL; return -EINVAL;
spin_lock_irqsave(&irq->irq_lock, flags); raw_spin_lock_irqsave(&irq->irq_lock, flags);
if (!vgic_validate_injection(irq, level, owner)) { if (!vgic_validate_injection(irq, level, owner)) {
/* Nothing to see here, move along... */ /* Nothing to see here, move along... */
spin_unlock_irqrestore(&irq->irq_lock, flags); raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
vgic_put_irq(kvm, irq); vgic_put_irq(kvm, irq);
return 0; return 0;
} }
...@@ -494,9 +495,9 @@ int kvm_vgic_map_phys_irq(struct kvm_vcpu *vcpu, unsigned int host_irq, ...@@ -494,9 +495,9 @@ int kvm_vgic_map_phys_irq(struct kvm_vcpu *vcpu, unsigned int host_irq,
BUG_ON(!irq); BUG_ON(!irq);
spin_lock_irqsave(&irq->irq_lock, flags); raw_spin_lock_irqsave(&irq->irq_lock, flags);
ret = kvm_vgic_map_irq(vcpu, irq, host_irq, get_input_level); ret = kvm_vgic_map_irq(vcpu, irq, host_irq, get_input_level);
spin_unlock_irqrestore(&irq->irq_lock, flags); raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
vgic_put_irq(vcpu->kvm, irq); vgic_put_irq(vcpu->kvm, irq);
return ret; return ret;
...@@ -519,11 +520,11 @@ void kvm_vgic_reset_mapped_irq(struct kvm_vcpu *vcpu, u32 vintid) ...@@ -519,11 +520,11 @@ void kvm_vgic_reset_mapped_irq(struct kvm_vcpu *vcpu, u32 vintid)
if (!irq->hw) if (!irq->hw)
goto out; goto out;
spin_lock_irqsave(&irq->irq_lock, flags); raw_spin_lock_irqsave(&irq->irq_lock, flags);
irq->active = false; irq->active = false;
irq->pending_latch = false; irq->pending_latch = false;
irq->line_level = false; irq->line_level = false;
spin_unlock_irqrestore(&irq->irq_lock, flags); raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
out: out:
vgic_put_irq(vcpu->kvm, irq); vgic_put_irq(vcpu->kvm, irq);
} }
...@@ -539,9 +540,9 @@ int kvm_vgic_unmap_phys_irq(struct kvm_vcpu *vcpu, unsigned int vintid) ...@@ -539,9 +540,9 @@ int kvm_vgic_unmap_phys_irq(struct kvm_vcpu *vcpu, unsigned int vintid)
irq = vgic_get_irq(vcpu->kvm, vcpu, vintid); irq = vgic_get_irq(vcpu->kvm, vcpu, vintid);
BUG_ON(!irq); BUG_ON(!irq);
spin_lock_irqsave(&irq->irq_lock, flags); raw_spin_lock_irqsave(&irq->irq_lock, flags);
kvm_vgic_unmap_irq(irq); kvm_vgic_unmap_irq(irq);
spin_unlock_irqrestore(&irq->irq_lock, flags); raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
vgic_put_irq(vcpu->kvm, irq); vgic_put_irq(vcpu->kvm, irq);
return 0; return 0;
...@@ -571,12 +572,12 @@ int kvm_vgic_set_owner(struct kvm_vcpu *vcpu, unsigned int intid, void *owner) ...@@ -571,12 +572,12 @@ int kvm_vgic_set_owner(struct kvm_vcpu *vcpu, unsigned int intid, void *owner)
return -EINVAL; return -EINVAL;
irq = vgic_get_irq(vcpu->kvm, vcpu, intid); irq = vgic_get_irq(vcpu->kvm, vcpu, intid);
spin_lock_irqsave(&irq->irq_lock, flags); raw_spin_lock_irqsave(&irq->irq_lock, flags);
if (irq->owner && irq->owner != owner) if (irq->owner && irq->owner != owner)
ret = -EEXIST; ret = -EEXIST;
else else
irq->owner = owner; irq->owner = owner;
spin_unlock_irqrestore(&irq->irq_lock, flags); raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
return ret; return ret;
} }
...@@ -597,13 +598,13 @@ static void vgic_prune_ap_list(struct kvm_vcpu *vcpu) ...@@ -597,13 +598,13 @@ static void vgic_prune_ap_list(struct kvm_vcpu *vcpu)
DEBUG_SPINLOCK_BUG_ON(!irqs_disabled()); DEBUG_SPINLOCK_BUG_ON(!irqs_disabled());
retry: retry:
spin_lock(&vgic_cpu->ap_list_lock); raw_spin_lock(&vgic_cpu->ap_list_lock);
list_for_each_entry_safe(irq, tmp, &vgic_cpu->ap_list_head, ap_list) { list_for_each_entry_safe(irq, tmp, &vgic_cpu->ap_list_head, ap_list) {
struct kvm_vcpu *target_vcpu, *vcpuA, *vcpuB; struct kvm_vcpu *target_vcpu, *vcpuA, *vcpuB;
bool target_vcpu_needs_kick = false; bool target_vcpu_needs_kick = false;
spin_lock(&irq->irq_lock); raw_spin_lock(&irq->irq_lock);
BUG_ON(vcpu != irq->vcpu); BUG_ON(vcpu != irq->vcpu);
...@@ -616,7 +617,7 @@ static void vgic_prune_ap_list(struct kvm_vcpu *vcpu) ...@@ -616,7 +617,7 @@ static void vgic_prune_ap_list(struct kvm_vcpu *vcpu)
*/ */
list_del(&irq->ap_list); list_del(&irq->ap_list);
irq->vcpu = NULL; irq->vcpu = NULL;
spin_unlock(&irq->irq_lock); raw_spin_unlock(&irq->irq_lock);
/* /*
* This vgic_put_irq call matches the * This vgic_put_irq call matches the
...@@ -631,14 +632,14 @@ static void vgic_prune_ap_list(struct kvm_vcpu *vcpu) ...@@ -631,14 +632,14 @@ static void vgic_prune_ap_list(struct kvm_vcpu *vcpu)
if (target_vcpu == vcpu) { if (target_vcpu == vcpu) {
/* We're on the right CPU */ /* We're on the right CPU */
spin_unlock(&irq->irq_lock); raw_spin_unlock(&irq->irq_lock);
continue; continue;
} }
/* This interrupt looks like it has to be migrated. */ /* This interrupt looks like it has to be migrated. */
spin_unlock(&irq->irq_lock); raw_spin_unlock(&irq->irq_lock);
spin_unlock(&vgic_cpu->ap_list_lock); raw_spin_unlock(&vgic_cpu->ap_list_lock);
/* /*
* Ensure locking order by always locking the smallest * Ensure locking order by always locking the smallest
...@@ -652,10 +653,10 @@ static void vgic_prune_ap_list(struct kvm_vcpu *vcpu) ...@@ -652,10 +653,10 @@ static void vgic_prune_ap_list(struct kvm_vcpu *vcpu)
vcpuB = vcpu; vcpuB = vcpu;
} }
spin_lock(&vcpuA->arch.vgic_cpu.ap_list_lock); raw_spin_lock(&vcpuA->arch.vgic_cpu.ap_list_lock);
spin_lock_nested(&vcpuB->arch.vgic_cpu.ap_list_lock, raw_spin_lock_nested(&vcpuB->arch.vgic_cpu.ap_list_lock,
SINGLE_DEPTH_NESTING); SINGLE_DEPTH_NESTING);
spin_lock(&irq->irq_lock); raw_spin_lock(&irq->irq_lock);
/* /*
* If the affinity has been preserved, move the * If the affinity has been preserved, move the
...@@ -675,9 +676,9 @@ static void vgic_prune_ap_list(struct kvm_vcpu *vcpu) ...@@ -675,9 +676,9 @@ static void vgic_prune_ap_list(struct kvm_vcpu *vcpu)
target_vcpu_needs_kick = true; target_vcpu_needs_kick = true;
} }
spin_unlock(&irq->irq_lock); raw_spin_unlock(&irq->irq_lock);
spin_unlock(&vcpuB->arch.vgic_cpu.ap_list_lock); raw_spin_unlock(&vcpuB->arch.vgic_cpu.ap_list_lock);
spin_unlock(&vcpuA->arch.vgic_cpu.ap_list_lock); raw_spin_unlock(&vcpuA->arch.vgic_cpu.ap_list_lock);
if (target_vcpu_needs_kick) { if (target_vcpu_needs_kick) {
kvm_make_request(KVM_REQ_IRQ_PENDING, target_vcpu); kvm_make_request(KVM_REQ_IRQ_PENDING, target_vcpu);
...@@ -687,7 +688,7 @@ static void vgic_prune_ap_list(struct kvm_vcpu *vcpu) ...@@ -687,7 +688,7 @@ static void vgic_prune_ap_list(struct kvm_vcpu *vcpu)
goto retry; goto retry;
} }
spin_unlock(&vgic_cpu->ap_list_lock); raw_spin_unlock(&vgic_cpu->ap_list_lock);
} }
static inline void vgic_fold_lr_state(struct kvm_vcpu *vcpu) static inline void vgic_fold_lr_state(struct kvm_vcpu *vcpu)
...@@ -741,10 +742,10 @@ static int compute_ap_list_depth(struct kvm_vcpu *vcpu, ...@@ -741,10 +742,10 @@ static int compute_ap_list_depth(struct kvm_vcpu *vcpu,
list_for_each_entry(irq, &vgic_cpu->ap_list_head, ap_list) { list_for_each_entry(irq, &vgic_cpu->ap_list_head, ap_list) {
int w; int w;
spin_lock(&irq->irq_lock); raw_spin_lock(&irq->irq_lock);
/* GICv2 SGIs can count for more than one... */ /* GICv2 SGIs can count for more than one... */
w = vgic_irq_get_lr_count(irq); w = vgic_irq_get_lr_count(irq);
spin_unlock(&irq->irq_lock); raw_spin_unlock(&irq->irq_lock);
count += w; count += w;
*multi_sgi |= (w > 1); *multi_sgi |= (w > 1);
...@@ -770,7 +771,7 @@ static void vgic_flush_lr_state(struct kvm_vcpu *vcpu) ...@@ -770,7 +771,7 @@ static void vgic_flush_lr_state(struct kvm_vcpu *vcpu)
count = 0; count = 0;
list_for_each_entry(irq, &vgic_cpu->ap_list_head, ap_list) { list_for_each_entry(irq, &vgic_cpu->ap_list_head, ap_list) {
spin_lock(&irq->irq_lock); raw_spin_lock(&irq->irq_lock);
/* /*
* If we have multi-SGIs in the pipeline, we need to * If we have multi-SGIs in the pipeline, we need to
...@@ -780,7 +781,7 @@ static void vgic_flush_lr_state(struct kvm_vcpu *vcpu) ...@@ -780,7 +781,7 @@ static void vgic_flush_lr_state(struct kvm_vcpu *vcpu)
* the AP list has been sorted already. * the AP list has been sorted already.
*/ */
if (multi_sgi && irq->priority > prio) { if (multi_sgi && irq->priority > prio) {
spin_unlock(&irq->irq_lock); _raw_spin_unlock(&irq->irq_lock);
break; break;
} }
...@@ -791,7 +792,7 @@ static void vgic_flush_lr_state(struct kvm_vcpu *vcpu) ...@@ -791,7 +792,7 @@ static void vgic_flush_lr_state(struct kvm_vcpu *vcpu)
prio = irq->priority; prio = irq->priority;
} }
spin_unlock(&irq->irq_lock); raw_spin_unlock(&irq->irq_lock);
if (count == kvm_vgic_global_state.nr_lr) { if (count == kvm_vgic_global_state.nr_lr) {
if (!list_is_last(&irq->ap_list, if (!list_is_last(&irq->ap_list,
...@@ -872,9 +873,9 @@ void kvm_vgic_flush_hwstate(struct kvm_vcpu *vcpu) ...@@ -872,9 +873,9 @@ void kvm_vgic_flush_hwstate(struct kvm_vcpu *vcpu)
DEBUG_SPINLOCK_BUG_ON(!irqs_disabled()); DEBUG_SPINLOCK_BUG_ON(!irqs_disabled());
spin_lock(&vcpu->arch.vgic_cpu.ap_list_lock); raw_spin_lock(&vcpu->arch.vgic_cpu.ap_list_lock);
vgic_flush_lr_state(vcpu); vgic_flush_lr_state(vcpu);
spin_unlock(&vcpu->arch.vgic_cpu.ap_list_lock); raw_spin_unlock(&vcpu->arch.vgic_cpu.ap_list_lock);
if (can_access_vgic_from_kernel()) if (can_access_vgic_from_kernel())
vgic_restore_state(vcpu); vgic_restore_state(vcpu);
...@@ -918,20 +919,20 @@ int kvm_vgic_vcpu_pending_irq(struct kvm_vcpu *vcpu) ...@@ -918,20 +919,20 @@ int kvm_vgic_vcpu_pending_irq(struct kvm_vcpu *vcpu)
vgic_get_vmcr(vcpu, &vmcr); vgic_get_vmcr(vcpu, &vmcr);
spin_lock_irqsave(&vgic_cpu->ap_list_lock, flags); raw_spin_lock_irqsave(&vgic_cpu->ap_list_lock, flags);
list_for_each_entry(irq, &vgic_cpu->ap_list_head, ap_list) { list_for_each_entry(irq, &vgic_cpu->ap_list_head, ap_list) {
spin_lock(&irq->irq_lock); raw_spin_lock(&irq->irq_lock);
pending = irq_is_pending(irq) && irq->enabled && pending = irq_is_pending(irq) && irq->enabled &&
!irq->active && !irq->active &&
irq->priority < vmcr.pmr; irq->priority < vmcr.pmr;
spin_unlock(&irq->irq_lock); raw_spin_unlock(&irq->irq_lock);
if (pending) if (pending)
break; break;
} }
spin_unlock_irqrestore(&vgic_cpu->ap_list_lock, flags); raw_spin_unlock_irqrestore(&vgic_cpu->ap_list_lock, flags);
return pending; return pending;
} }
...@@ -963,11 +964,10 @@ bool kvm_vgic_map_is_active(struct kvm_vcpu *vcpu, unsigned int vintid) ...@@ -963,11 +964,10 @@ bool kvm_vgic_map_is_active(struct kvm_vcpu *vcpu, unsigned int vintid)
return false; return false;
irq = vgic_get_irq(vcpu->kvm, vcpu, vintid); irq = vgic_get_irq(vcpu->kvm, vcpu, vintid);
spin_lock_irqsave(&irq->irq_lock, flags); raw_spin_lock_irqsave(&irq->irq_lock, flags);
map_is_active = irq->hw && irq->active; map_is_active = irq->hw && irq->active;
spin_unlock_irqrestore(&irq->irq_lock, flags); raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
vgic_put_irq(vcpu->kvm, irq); vgic_put_irq(vcpu->kvm, irq);
return map_is_active; return map_is_active;
} }
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment