Commit 609b7002 authored by Radim Krčmář's avatar Radim Krčmář

Merge tag 'kvm-arm-fixes-for-v4.15-1' of...

Merge tag 'kvm-arm-fixes-for-v4.15-1' of git://git.kernel.org/pub/scm/linux/kernel/git/kvmarm/kvmarm

KVM/ARM Fixes for v4.15.

Fixes:
 - A number of issues in the vgic discovered using SMATCH
 - A bit one-off calculation in out stage base address mask (32-bit and
   64-bit)
 - Fixes to single-step debugging instructions that trap for other
   reasons such as MMMIO aborts
 - Printing unavailable hyp mode as error
 - Potential spinlock deadlock in the vgic
 - Avoid calling vgic vcpu free more than once
 - Broken bit calculation for big endian systems
parents ae64f9bd fc396e06
...@@ -161,8 +161,7 @@ ...@@ -161,8 +161,7 @@
#else #else
#define VTTBR_X (5 - KVM_T0SZ) #define VTTBR_X (5 - KVM_T0SZ)
#endif #endif
#define VTTBR_BADDR_SHIFT (VTTBR_X - 1) #define VTTBR_BADDR_MASK (((_AC(1, ULL) << (40 - VTTBR_X)) - 1) << VTTBR_X)
#define VTTBR_BADDR_MASK (((_AC(1, ULL) << (40 - VTTBR_X)) - 1) << VTTBR_BADDR_SHIFT)
#define VTTBR_VMID_SHIFT _AC(48, ULL) #define VTTBR_VMID_SHIFT _AC(48, ULL)
#define VTTBR_VMID_MASK(size) (_AT(u64, (1 << size) - 1) << VTTBR_VMID_SHIFT) #define VTTBR_VMID_MASK(size) (_AT(u64, (1 << size) - 1) << VTTBR_VMID_SHIFT)
......
...@@ -285,6 +285,11 @@ static inline void kvm_arm_init_debug(void) {} ...@@ -285,6 +285,11 @@ static inline void kvm_arm_init_debug(void) {}
static inline void kvm_arm_setup_debug(struct kvm_vcpu *vcpu) {} static inline void kvm_arm_setup_debug(struct kvm_vcpu *vcpu) {}
static inline void kvm_arm_clear_debug(struct kvm_vcpu *vcpu) {} static inline void kvm_arm_clear_debug(struct kvm_vcpu *vcpu) {}
static inline void kvm_arm_reset_debug_ptr(struct kvm_vcpu *vcpu) {} static inline void kvm_arm_reset_debug_ptr(struct kvm_vcpu *vcpu) {}
static inline bool kvm_arm_handle_step_debug(struct kvm_vcpu *vcpu,
struct kvm_run *run)
{
return false;
}
int kvm_arm_vcpu_arch_set_attr(struct kvm_vcpu *vcpu, int kvm_arm_vcpu_arch_set_attr(struct kvm_vcpu *vcpu,
struct kvm_device_attr *attr); struct kvm_device_attr *attr);
......
...@@ -170,8 +170,7 @@ ...@@ -170,8 +170,7 @@
#define VTCR_EL2_FLAGS (VTCR_EL2_COMMON_BITS | VTCR_EL2_TGRAN_FLAGS) #define VTCR_EL2_FLAGS (VTCR_EL2_COMMON_BITS | VTCR_EL2_TGRAN_FLAGS)
#define VTTBR_X (VTTBR_X_TGRAN_MAGIC - VTCR_EL2_T0SZ_IPA) #define VTTBR_X (VTTBR_X_TGRAN_MAGIC - VTCR_EL2_T0SZ_IPA)
#define VTTBR_BADDR_SHIFT (VTTBR_X - 1) #define VTTBR_BADDR_MASK (((UL(1) << (PHYS_MASK_SHIFT - VTTBR_X)) - 1) << VTTBR_X)
#define VTTBR_BADDR_MASK (((UL(1) << (PHYS_MASK_SHIFT - VTTBR_X)) - 1) << VTTBR_BADDR_SHIFT)
#define VTTBR_VMID_SHIFT (UL(48)) #define VTTBR_VMID_SHIFT (UL(48))
#define VTTBR_VMID_MASK(size) (_AT(u64, (1 << size) - 1) << VTTBR_VMID_SHIFT) #define VTTBR_VMID_MASK(size) (_AT(u64, (1 << size) - 1) << VTTBR_VMID_SHIFT)
......
...@@ -370,6 +370,7 @@ void kvm_arm_init_debug(void); ...@@ -370,6 +370,7 @@ void kvm_arm_init_debug(void);
void kvm_arm_setup_debug(struct kvm_vcpu *vcpu); void kvm_arm_setup_debug(struct kvm_vcpu *vcpu);
void kvm_arm_clear_debug(struct kvm_vcpu *vcpu); void kvm_arm_clear_debug(struct kvm_vcpu *vcpu);
void kvm_arm_reset_debug_ptr(struct kvm_vcpu *vcpu); void kvm_arm_reset_debug_ptr(struct kvm_vcpu *vcpu);
bool kvm_arm_handle_step_debug(struct kvm_vcpu *vcpu, struct kvm_run *run);
int kvm_arm_vcpu_arch_set_attr(struct kvm_vcpu *vcpu, int kvm_arm_vcpu_arch_set_attr(struct kvm_vcpu *vcpu,
struct kvm_device_attr *attr); struct kvm_device_attr *attr);
int kvm_arm_vcpu_arch_get_attr(struct kvm_vcpu *vcpu, int kvm_arm_vcpu_arch_get_attr(struct kvm_vcpu *vcpu,
......
...@@ -221,3 +221,24 @@ void kvm_arm_clear_debug(struct kvm_vcpu *vcpu) ...@@ -221,3 +221,24 @@ void kvm_arm_clear_debug(struct kvm_vcpu *vcpu)
} }
} }
} }
/*
* After successfully emulating an instruction, we might want to
* return to user space with a KVM_EXIT_DEBUG. We can only do this
* once the emulation is complete, though, so for userspace emulations
* we have to wait until we have re-entered KVM before calling this
* helper.
*
* Return true (and set exit_reason) to return to userspace or false
* if no further action is required.
*/
bool kvm_arm_handle_step_debug(struct kvm_vcpu *vcpu, struct kvm_run *run)
{
if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) {
run->exit_reason = KVM_EXIT_DEBUG;
run->debug.arch.hsr = ESR_ELx_EC_SOFTSTP_LOW << ESR_ELx_EC_SHIFT;
return true;
}
return false;
}
...@@ -28,6 +28,7 @@ ...@@ -28,6 +28,7 @@
#include <asm/kvm_emulate.h> #include <asm/kvm_emulate.h>
#include <asm/kvm_mmu.h> #include <asm/kvm_mmu.h>
#include <asm/kvm_psci.h> #include <asm/kvm_psci.h>
#include <asm/debug-monitors.h>
#define CREATE_TRACE_POINTS #define CREATE_TRACE_POINTS
#include "trace.h" #include "trace.h"
...@@ -186,6 +187,40 @@ static exit_handle_fn kvm_get_exit_handler(struct kvm_vcpu *vcpu) ...@@ -186,6 +187,40 @@ static exit_handle_fn kvm_get_exit_handler(struct kvm_vcpu *vcpu)
return arm_exit_handlers[hsr_ec]; return arm_exit_handlers[hsr_ec];
} }
/*
* We may be single-stepping an emulated instruction. If the emulation
* has been completed in the kernel, we can return to userspace with a
* KVM_EXIT_DEBUG, otherwise userspace needs to complete its
* emulation first.
*/
static int handle_trap_exceptions(struct kvm_vcpu *vcpu, struct kvm_run *run)
{
int handled;
/*
* See ARM ARM B1.14.1: "Hyp traps on instructions
* that fail their condition code check"
*/
if (!kvm_condition_valid(vcpu)) {
kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu));
handled = 1;
} else {
exit_handle_fn exit_handler;
exit_handler = kvm_get_exit_handler(vcpu);
handled = exit_handler(vcpu, run);
}
/*
* kvm_arm_handle_step_debug() sets the exit_reason on the kvm_run
* structure if we need to return to userspace.
*/
if (handled > 0 && kvm_arm_handle_step_debug(vcpu, run))
handled = 0;
return handled;
}
/* /*
* Return > 0 to return to guest, < 0 on error, 0 (and set exit_reason) on * Return > 0 to return to guest, < 0 on error, 0 (and set exit_reason) on
* proper exit to userspace. * proper exit to userspace.
...@@ -193,8 +228,6 @@ static exit_handle_fn kvm_get_exit_handler(struct kvm_vcpu *vcpu) ...@@ -193,8 +228,6 @@ static exit_handle_fn kvm_get_exit_handler(struct kvm_vcpu *vcpu)
int handle_exit(struct kvm_vcpu *vcpu, struct kvm_run *run, int handle_exit(struct kvm_vcpu *vcpu, struct kvm_run *run,
int exception_index) int exception_index)
{ {
exit_handle_fn exit_handler;
if (ARM_SERROR_PENDING(exception_index)) { if (ARM_SERROR_PENDING(exception_index)) {
u8 hsr_ec = ESR_ELx_EC(kvm_vcpu_get_hsr(vcpu)); u8 hsr_ec = ESR_ELx_EC(kvm_vcpu_get_hsr(vcpu));
...@@ -220,20 +253,14 @@ int handle_exit(struct kvm_vcpu *vcpu, struct kvm_run *run, ...@@ -220,20 +253,14 @@ int handle_exit(struct kvm_vcpu *vcpu, struct kvm_run *run,
return 1; return 1;
case ARM_EXCEPTION_EL1_SERROR: case ARM_EXCEPTION_EL1_SERROR:
kvm_inject_vabt(vcpu); kvm_inject_vabt(vcpu);
return 1; /* We may still need to return for single-step */
case ARM_EXCEPTION_TRAP: if (!(*vcpu_cpsr(vcpu) & DBG_SPSR_SS)
/* && kvm_arm_handle_step_debug(vcpu, run))
* See ARM ARM B1.14.1: "Hyp traps on instructions return 0;
* that fail their condition code check" else
*/
if (!kvm_condition_valid(vcpu)) {
kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu));
return 1; return 1;
} case ARM_EXCEPTION_TRAP:
return handle_trap_exceptions(vcpu, run);
exit_handler = kvm_get_exit_handler(vcpu);
return exit_handler(vcpu, run);
case ARM_EXCEPTION_HYP_GONE: case ARM_EXCEPTION_HYP_GONE:
/* /*
* EL2 has been reset to the hyp-stub. This happens when a guest * EL2 has been reset to the hyp-stub. This happens when a guest
......
...@@ -22,6 +22,7 @@ ...@@ -22,6 +22,7 @@
#include <asm/kvm_emulate.h> #include <asm/kvm_emulate.h>
#include <asm/kvm_hyp.h> #include <asm/kvm_hyp.h>
#include <asm/fpsimd.h> #include <asm/fpsimd.h>
#include <asm/debug-monitors.h>
static bool __hyp_text __fpsimd_enabled_nvhe(void) static bool __hyp_text __fpsimd_enabled_nvhe(void)
{ {
...@@ -269,7 +270,11 @@ static bool __hyp_text __populate_fault_info(struct kvm_vcpu *vcpu) ...@@ -269,7 +270,11 @@ static bool __hyp_text __populate_fault_info(struct kvm_vcpu *vcpu)
return true; return true;
} }
static void __hyp_text __skip_instr(struct kvm_vcpu *vcpu) /* Skip an instruction which has been emulated. Returns true if
* execution can continue or false if we need to exit hyp mode because
* single-step was in effect.
*/
static bool __hyp_text __skip_instr(struct kvm_vcpu *vcpu)
{ {
*vcpu_pc(vcpu) = read_sysreg_el2(elr); *vcpu_pc(vcpu) = read_sysreg_el2(elr);
...@@ -282,6 +287,14 @@ static void __hyp_text __skip_instr(struct kvm_vcpu *vcpu) ...@@ -282,6 +287,14 @@ static void __hyp_text __skip_instr(struct kvm_vcpu *vcpu)
} }
write_sysreg_el2(*vcpu_pc(vcpu), elr); write_sysreg_el2(*vcpu_pc(vcpu), elr);
if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) {
vcpu->arch.fault.esr_el2 =
(ESR_ELx_EC_SOFTSTP_LOW << ESR_ELx_EC_SHIFT) | 0x22;
return false;
} else {
return true;
}
} }
int __hyp_text __kvm_vcpu_run(struct kvm_vcpu *vcpu) int __hyp_text __kvm_vcpu_run(struct kvm_vcpu *vcpu)
...@@ -342,13 +355,21 @@ int __hyp_text __kvm_vcpu_run(struct kvm_vcpu *vcpu) ...@@ -342,13 +355,21 @@ int __hyp_text __kvm_vcpu_run(struct kvm_vcpu *vcpu)
int ret = __vgic_v2_perform_cpuif_access(vcpu); int ret = __vgic_v2_perform_cpuif_access(vcpu);
if (ret == 1) { if (ret == 1) {
__skip_instr(vcpu); if (__skip_instr(vcpu))
goto again; goto again;
else
exit_code = ARM_EXCEPTION_TRAP;
} }
if (ret == -1) { if (ret == -1) {
/* Promote an illegal access to an SError */ /* Promote an illegal access to an
__skip_instr(vcpu); * SError. If we would be returning
* due to single-step clear the SS
* bit so handle_exit knows what to
* do after dealing with the error.
*/
if (!__skip_instr(vcpu))
*vcpu_cpsr(vcpu) &= ~DBG_SPSR_SS;
exit_code = ARM_EXCEPTION_EL1_SERROR; exit_code = ARM_EXCEPTION_EL1_SERROR;
} }
...@@ -363,8 +384,10 @@ int __hyp_text __kvm_vcpu_run(struct kvm_vcpu *vcpu) ...@@ -363,8 +384,10 @@ int __hyp_text __kvm_vcpu_run(struct kvm_vcpu *vcpu)
int ret = __vgic_v3_perform_cpuif_access(vcpu); int ret = __vgic_v3_perform_cpuif_access(vcpu);
if (ret == 1) { if (ret == 1) {
__skip_instr(vcpu); if (__skip_instr(vcpu))
goto again; goto again;
else
exit_code = ARM_EXCEPTION_TRAP;
} }
/* 0 falls through to be handled out of EL2 */ /* 0 falls through to be handled out of EL2 */
......
...@@ -93,7 +93,4 @@ void kvm_timer_init_vhe(void); ...@@ -93,7 +93,4 @@ void kvm_timer_init_vhe(void);
#define vcpu_vtimer(v) (&(v)->arch.timer_cpu.vtimer) #define vcpu_vtimer(v) (&(v)->arch.timer_cpu.vtimer)
#define vcpu_ptimer(v) (&(v)->arch.timer_cpu.ptimer) #define vcpu_ptimer(v) (&(v)->arch.timer_cpu.ptimer)
void enable_el1_phys_timer_access(void);
void disable_el1_phys_timer_access(void);
#endif #endif
...@@ -479,9 +479,6 @@ void kvm_timer_vcpu_load(struct kvm_vcpu *vcpu) ...@@ -479,9 +479,6 @@ void kvm_timer_vcpu_load(struct kvm_vcpu *vcpu)
vtimer_restore_state(vcpu); vtimer_restore_state(vcpu);
if (has_vhe())
disable_el1_phys_timer_access();
/* Set the background timer for the physical timer emulation. */ /* Set the background timer for the physical timer emulation. */
phys_timer_emulate(vcpu); phys_timer_emulate(vcpu);
} }
...@@ -510,9 +507,6 @@ void kvm_timer_vcpu_put(struct kvm_vcpu *vcpu) ...@@ -510,9 +507,6 @@ void kvm_timer_vcpu_put(struct kvm_vcpu *vcpu)
if (unlikely(!timer->enabled)) if (unlikely(!timer->enabled))
return; return;
if (has_vhe())
enable_el1_phys_timer_access();
vtimer_save_state(vcpu); vtimer_save_state(vcpu);
/* /*
...@@ -841,7 +835,10 @@ int kvm_timer_enable(struct kvm_vcpu *vcpu) ...@@ -841,7 +835,10 @@ int kvm_timer_enable(struct kvm_vcpu *vcpu)
no_vgic: no_vgic:
preempt_disable(); preempt_disable();
timer->enabled = 1; timer->enabled = 1;
kvm_timer_vcpu_load_vgic(vcpu); if (!irqchip_in_kernel(vcpu->kvm))
kvm_timer_vcpu_load_user(vcpu);
else
kvm_timer_vcpu_load_vgic(vcpu);
preempt_enable(); preempt_enable();
return 0; return 0;
......
...@@ -188,6 +188,7 @@ void kvm_arch_destroy_vm(struct kvm *kvm) ...@@ -188,6 +188,7 @@ void kvm_arch_destroy_vm(struct kvm *kvm)
kvm->vcpus[i] = NULL; kvm->vcpus[i] = NULL;
} }
} }
atomic_set(&kvm->online_vcpus, 0);
} }
int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
...@@ -296,7 +297,6 @@ void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu) ...@@ -296,7 +297,6 @@ void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu)
{ {
kvm_mmu_free_memory_caches(vcpu); kvm_mmu_free_memory_caches(vcpu);
kvm_timer_vcpu_terminate(vcpu); kvm_timer_vcpu_terminate(vcpu);
kvm_vgic_vcpu_destroy(vcpu);
kvm_pmu_vcpu_destroy(vcpu); kvm_pmu_vcpu_destroy(vcpu);
kvm_vcpu_uninit(vcpu); kvm_vcpu_uninit(vcpu);
kmem_cache_free(kvm_vcpu_cache, vcpu); kmem_cache_free(kvm_vcpu_cache, vcpu);
...@@ -627,6 +627,9 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run) ...@@ -627,6 +627,9 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
ret = kvm_handle_mmio_return(vcpu, vcpu->run); ret = kvm_handle_mmio_return(vcpu, vcpu->run);
if (ret) if (ret)
return ret; return ret;
if (kvm_arm_handle_step_debug(vcpu, vcpu->run))
return 0;
} }
if (run->immediate_exit) if (run->immediate_exit)
...@@ -1502,7 +1505,7 @@ int kvm_arch_init(void *opaque) ...@@ -1502,7 +1505,7 @@ int kvm_arch_init(void *opaque)
bool in_hyp_mode; bool in_hyp_mode;
if (!is_hyp_mode_available()) { if (!is_hyp_mode_available()) {
kvm_err("HYP mode not available\n"); kvm_info("HYP mode not available\n");
return -ENODEV; return -ENODEV;
} }
......
...@@ -27,42 +27,34 @@ void __hyp_text __kvm_timer_set_cntvoff(u32 cntvoff_low, u32 cntvoff_high) ...@@ -27,42 +27,34 @@ void __hyp_text __kvm_timer_set_cntvoff(u32 cntvoff_low, u32 cntvoff_high)
write_sysreg(cntvoff, cntvoff_el2); write_sysreg(cntvoff, cntvoff_el2);
} }
void __hyp_text enable_el1_phys_timer_access(void)
{
u64 val;
/* Allow physical timer/counter access for the host */
val = read_sysreg(cnthctl_el2);
val |= CNTHCTL_EL1PCTEN | CNTHCTL_EL1PCEN;
write_sysreg(val, cnthctl_el2);
}
void __hyp_text disable_el1_phys_timer_access(void)
{
u64 val;
/*
* Disallow physical timer access for the guest
* Physical counter access is allowed
*/
val = read_sysreg(cnthctl_el2);
val &= ~CNTHCTL_EL1PCEN;
val |= CNTHCTL_EL1PCTEN;
write_sysreg(val, cnthctl_el2);
}
void __hyp_text __timer_disable_traps(struct kvm_vcpu *vcpu) void __hyp_text __timer_disable_traps(struct kvm_vcpu *vcpu)
{ {
/* /*
* We don't need to do this for VHE since the host kernel runs in EL2 * We don't need to do this for VHE since the host kernel runs in EL2
* with HCR_EL2.TGE ==1, which makes those bits have no impact. * with HCR_EL2.TGE ==1, which makes those bits have no impact.
*/ */
if (!has_vhe()) if (!has_vhe()) {
enable_el1_phys_timer_access(); u64 val;
/* Allow physical timer/counter access for the host */
val = read_sysreg(cnthctl_el2);
val |= CNTHCTL_EL1PCTEN | CNTHCTL_EL1PCEN;
write_sysreg(val, cnthctl_el2);
}
} }
void __hyp_text __timer_enable_traps(struct kvm_vcpu *vcpu) void __hyp_text __timer_enable_traps(struct kvm_vcpu *vcpu)
{ {
if (!has_vhe()) if (!has_vhe()) {
disable_el1_phys_timer_access(); u64 val;
/*
* Disallow physical timer access for the guest
* Physical counter access is allowed
*/
val = read_sysreg(cnthctl_el2);
val &= ~CNTHCTL_EL1PCEN;
val |= CNTHCTL_EL1PCTEN;
write_sysreg(val, cnthctl_el2);
}
} }
...@@ -34,11 +34,7 @@ static void __hyp_text save_elrsr(struct kvm_vcpu *vcpu, void __iomem *base) ...@@ -34,11 +34,7 @@ static void __hyp_text save_elrsr(struct kvm_vcpu *vcpu, void __iomem *base)
else else
elrsr1 = 0; elrsr1 = 0;
#ifdef CONFIG_CPU_BIG_ENDIAN
cpu_if->vgic_elrsr = ((u64)elrsr0 << 32) | elrsr1;
#else
cpu_if->vgic_elrsr = ((u64)elrsr1 << 32) | elrsr0; cpu_if->vgic_elrsr = ((u64)elrsr1 << 32) | elrsr0;
#endif
} }
static void __hyp_text save_lrs(struct kvm_vcpu *vcpu, void __iomem *base) static void __hyp_text save_lrs(struct kvm_vcpu *vcpu, void __iomem *base)
......
...@@ -112,8 +112,7 @@ int kvm_vgic_setup_default_irq_routing(struct kvm *kvm) ...@@ -112,8 +112,7 @@ int kvm_vgic_setup_default_irq_routing(struct kvm *kvm)
u32 nr = dist->nr_spis; u32 nr = dist->nr_spis;
int i, ret; int i, ret;
entries = kcalloc(nr, sizeof(struct kvm_kernel_irq_routing_entry), entries = kcalloc(nr, sizeof(*entries), GFP_KERNEL);
GFP_KERNEL);
if (!entries) if (!entries)
return -ENOMEM; return -ENOMEM;
......
...@@ -421,6 +421,7 @@ static int its_sync_lpi_pending_table(struct kvm_vcpu *vcpu) ...@@ -421,6 +421,7 @@ static int its_sync_lpi_pending_table(struct kvm_vcpu *vcpu)
u32 *intids; u32 *intids;
int nr_irqs, i; int nr_irqs, i;
unsigned long flags; unsigned long flags;
u8 pendmask;
nr_irqs = vgic_copy_lpi_list(vcpu, &intids); nr_irqs = vgic_copy_lpi_list(vcpu, &intids);
if (nr_irqs < 0) if (nr_irqs < 0)
...@@ -428,7 +429,6 @@ static int its_sync_lpi_pending_table(struct kvm_vcpu *vcpu) ...@@ -428,7 +429,6 @@ static int its_sync_lpi_pending_table(struct kvm_vcpu *vcpu)
for (i = 0; i < nr_irqs; i++) { for (i = 0; i < nr_irqs; i++) {
int byte_offset, bit_nr; int byte_offset, bit_nr;
u8 pendmask;
byte_offset = intids[i] / BITS_PER_BYTE; byte_offset = intids[i] / BITS_PER_BYTE;
bit_nr = intids[i] % BITS_PER_BYTE; bit_nr = intids[i] % BITS_PER_BYTE;
...@@ -821,6 +821,8 @@ static int vgic_its_alloc_collection(struct vgic_its *its, ...@@ -821,6 +821,8 @@ static int vgic_its_alloc_collection(struct vgic_its *its,
return E_ITS_MAPC_COLLECTION_OOR; return E_ITS_MAPC_COLLECTION_OOR;
collection = kzalloc(sizeof(*collection), GFP_KERNEL); collection = kzalloc(sizeof(*collection), GFP_KERNEL);
if (!collection)
return -ENOMEM;
collection->collection_id = coll_id; collection->collection_id = coll_id;
collection->target_addr = COLLECTION_NOT_MAPPED; collection->target_addr = COLLECTION_NOT_MAPPED;
......
...@@ -327,13 +327,13 @@ int vgic_v3_save_pending_tables(struct kvm *kvm) ...@@ -327,13 +327,13 @@ int vgic_v3_save_pending_tables(struct kvm *kvm)
int last_byte_offset = -1; int last_byte_offset = -1;
struct vgic_irq *irq; struct vgic_irq *irq;
int ret; int ret;
u8 val;
list_for_each_entry(irq, &dist->lpi_list_head, lpi_list) { list_for_each_entry(irq, &dist->lpi_list_head, lpi_list) {
int byte_offset, bit_nr; int byte_offset, bit_nr;
struct kvm_vcpu *vcpu; struct kvm_vcpu *vcpu;
gpa_t pendbase, ptr; gpa_t pendbase, ptr;
bool stored; bool stored;
u8 val;
vcpu = irq->target_vcpu; vcpu = irq->target_vcpu;
if (!vcpu) if (!vcpu)
......
...@@ -337,8 +337,10 @@ int kvm_vgic_v4_unset_forwarding(struct kvm *kvm, int virq, ...@@ -337,8 +337,10 @@ int kvm_vgic_v4_unset_forwarding(struct kvm *kvm, int virq,
goto out; goto out;
WARN_ON(!(irq->hw && irq->host_irq == virq)); WARN_ON(!(irq->hw && irq->host_irq == virq));
irq->hw = false; if (irq->hw) {
ret = its_unmap_vlpi(virq); irq->hw = false;
ret = its_unmap_vlpi(virq);
}
out: out:
mutex_unlock(&its->its_lock); mutex_unlock(&its->its_lock);
......
...@@ -492,6 +492,7 @@ int kvm_vgic_unmap_phys_irq(struct kvm_vcpu *vcpu, unsigned int vintid) ...@@ -492,6 +492,7 @@ int kvm_vgic_unmap_phys_irq(struct kvm_vcpu *vcpu, unsigned int vintid)
int kvm_vgic_set_owner(struct kvm_vcpu *vcpu, unsigned int intid, void *owner) int kvm_vgic_set_owner(struct kvm_vcpu *vcpu, unsigned int intid, void *owner)
{ {
struct vgic_irq *irq; struct vgic_irq *irq;
unsigned long flags;
int ret = 0; int ret = 0;
if (!vgic_initialized(vcpu->kvm)) if (!vgic_initialized(vcpu->kvm))
...@@ -502,12 +503,12 @@ int kvm_vgic_set_owner(struct kvm_vcpu *vcpu, unsigned int intid, void *owner) ...@@ -502,12 +503,12 @@ int kvm_vgic_set_owner(struct kvm_vcpu *vcpu, unsigned int intid, void *owner)
return -EINVAL; return -EINVAL;
irq = vgic_get_irq(vcpu->kvm, vcpu, intid); irq = vgic_get_irq(vcpu->kvm, vcpu, intid);
spin_lock(&irq->irq_lock); spin_lock_irqsave(&irq->irq_lock, flags);
if (irq->owner && irq->owner != owner) if (irq->owner && irq->owner != owner)
ret = -EEXIST; ret = -EEXIST;
else else
irq->owner = owner; irq->owner = owner;
spin_unlock(&irq->irq_lock); spin_unlock_irqrestore(&irq->irq_lock, flags);
return ret; return ret;
} }
...@@ -823,13 +824,14 @@ void vgic_kick_vcpus(struct kvm *kvm) ...@@ -823,13 +824,14 @@ void vgic_kick_vcpus(struct kvm *kvm)
bool kvm_vgic_map_is_active(struct kvm_vcpu *vcpu, unsigned int vintid) bool kvm_vgic_map_is_active(struct kvm_vcpu *vcpu, unsigned int vintid)
{ {
struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, vintid); struct vgic_irq *irq;
bool map_is_active; bool map_is_active;
unsigned long flags; unsigned long flags;
if (!vgic_initialized(vcpu->kvm)) if (!vgic_initialized(vcpu->kvm))
return false; return false;
irq = vgic_get_irq(vcpu->kvm, vcpu, vintid);
spin_lock_irqsave(&irq->irq_lock, flags); spin_lock_irqsave(&irq->irq_lock, flags);
map_is_active = irq->hw && irq->active; map_is_active = irq->hw && irq->active;
spin_unlock_irqrestore(&irq->irq_lock, flags); spin_unlock_irqrestore(&irq->irq_lock, flags);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment