Commit 774206bc authored by Paolo Bonzini's avatar Paolo Bonzini

Merge tag 'kvmarm-fixes-5.11-1' of...

Merge tag 'kvmarm-fixes-5.11-1' of git://git.kernel.org/pub/scm/linux/kernel/git/kvmarm/kvmarm into HEAD

KVM/arm64 fixes for 5.11, take #1

- VM init cleanups
- PSCI relay cleanups
- Kill CONFIG_KVM_ARM_PMU
- Fixup __init annotations
- Fixup reg_to_encoding()
- Fix spurious PMCR_EL0 access
parents 647daca2 45ba7b19
...@@ -392,9 +392,14 @@ This ioctl is obsolete and has been removed. ...@@ -392,9 +392,14 @@ This ioctl is obsolete and has been removed.
Errors: Errors:
===== ============================= ======= ==============================================================
EINTR an unmasked signal is pending EINTR an unmasked signal is pending
===== ============================= ENOEXEC the vcpu hasn't been initialized or the guest tried to execute
instructions from device memory (arm64)
ENOSYS data abort outside memslots with no syndrome info and
KVM_CAP_ARM_NISV_TO_USER not enabled (arm64)
EPERM SVE feature set but not finalized (arm64)
======= ==============================================================
This ioctl is used to run a guest virtual cpu. While there are no This ioctl is used to run a guest virtual cpu. While there are no
explicit parameters, there is an implicit parameter block that can be explicit parameters, there is an implicit parameter block that can be
......
...@@ -17,6 +17,7 @@ ...@@ -17,6 +17,7 @@
#include <linux/jump_label.h> #include <linux/jump_label.h>
#include <linux/kvm_types.h> #include <linux/kvm_types.h>
#include <linux/percpu.h> #include <linux/percpu.h>
#include <linux/psci.h>
#include <asm/arch_gicv3.h> #include <asm/arch_gicv3.h>
#include <asm/barrier.h> #include <asm/barrier.h>
#include <asm/cpufeature.h> #include <asm/cpufeature.h>
...@@ -240,6 +241,28 @@ struct kvm_host_data { ...@@ -240,6 +241,28 @@ struct kvm_host_data {
struct kvm_pmu_events pmu_events; struct kvm_pmu_events pmu_events;
}; };
struct kvm_host_psci_config {
/* PSCI version used by host. */
u32 version;
/* Function IDs used by host if version is v0.1. */
struct psci_0_1_function_ids function_ids_0_1;
bool psci_0_1_cpu_suspend_implemented;
bool psci_0_1_cpu_on_implemented;
bool psci_0_1_cpu_off_implemented;
bool psci_0_1_migrate_implemented;
};
extern struct kvm_host_psci_config kvm_nvhe_sym(kvm_host_psci_config);
#define kvm_host_psci_config CHOOSE_NVHE_SYM(kvm_host_psci_config)
extern s64 kvm_nvhe_sym(hyp_physvirt_offset);
#define hyp_physvirt_offset CHOOSE_NVHE_SYM(hyp_physvirt_offset)
extern u64 kvm_nvhe_sym(hyp_cpu_logical_map)[NR_CPUS];
#define hyp_cpu_logical_map CHOOSE_NVHE_SYM(hyp_cpu_logical_map)
struct vcpu_reset_state { struct vcpu_reset_state {
unsigned long pc; unsigned long pc;
unsigned long r0; unsigned long r0;
......
...@@ -2558,7 +2558,7 @@ static void verify_hyp_capabilities(void) ...@@ -2558,7 +2558,7 @@ static void verify_hyp_capabilities(void)
int parange, ipa_max; int parange, ipa_max;
unsigned int safe_vmid_bits, vmid_bits; unsigned int safe_vmid_bits, vmid_bits;
if (!IS_ENABLED(CONFIG_KVM) || !IS_ENABLED(CONFIG_KVM_ARM_HOST)) if (!IS_ENABLED(CONFIG_KVM))
return; return;
safe_mmfr1 = read_sanitised_ftr_reg(SYS_ID_AA64MMFR1_EL1); safe_mmfr1 = read_sanitised_ftr_reg(SYS_ID_AA64MMFR1_EL1);
......
...@@ -434,7 +434,7 @@ static void __init hyp_mode_check(void) ...@@ -434,7 +434,7 @@ static void __init hyp_mode_check(void)
"CPU: CPUs started in inconsistent modes"); "CPU: CPUs started in inconsistent modes");
else else
pr_info("CPU: All CPU(s) started at EL1\n"); pr_info("CPU: All CPU(s) started at EL1\n");
if (IS_ENABLED(CONFIG_KVM)) if (IS_ENABLED(CONFIG_KVM) && !is_kernel_in_hyp_mode())
kvm_compute_layout(); kvm_compute_layout();
} }
......
...@@ -49,14 +49,6 @@ if KVM ...@@ -49,14 +49,6 @@ if KVM
source "virt/kvm/Kconfig" source "virt/kvm/Kconfig"
config KVM_ARM_PMU
bool "Virtual Performance Monitoring Unit (PMU) support"
depends on HW_PERF_EVENTS
default y
help
Adds support for a virtual Performance Monitoring Unit (PMU) in
virtual machines.
endif # KVM endif # KVM
endif # VIRTUALIZATION endif # VIRTUALIZATION
...@@ -24,4 +24,4 @@ kvm-y := $(KVM)/kvm_main.o $(KVM)/coalesced_mmio.o $(KVM)/eventfd.o \ ...@@ -24,4 +24,4 @@ kvm-y := $(KVM)/kvm_main.o $(KVM)/coalesced_mmio.o $(KVM)/eventfd.o \
vgic/vgic-mmio-v3.o vgic/vgic-kvm-device.o \ vgic/vgic-mmio-v3.o vgic/vgic-kvm-device.o \
vgic/vgic-its.o vgic/vgic-debug.o vgic/vgic-its.o vgic/vgic-debug.o
kvm-$(CONFIG_KVM_ARM_PMU) += pmu-emul.o kvm-$(CONFIG_HW_PERF_EVENTS) += pmu-emul.o
...@@ -1129,9 +1129,10 @@ int kvm_timer_enable(struct kvm_vcpu *vcpu) ...@@ -1129,9 +1129,10 @@ int kvm_timer_enable(struct kvm_vcpu *vcpu)
if (!irqchip_in_kernel(vcpu->kvm)) if (!irqchip_in_kernel(vcpu->kvm))
goto no_vgic; goto no_vgic;
if (!vgic_initialized(vcpu->kvm)) /*
return -ENODEV; * At this stage, we have the guarantee that the vgic is both
* available and initialized.
*/
if (!timer_irqs_are_valid(vcpu)) { if (!timer_irqs_are_valid(vcpu)) {
kvm_debug("incorrectly configured timer irqs\n"); kvm_debug("incorrectly configured timer irqs\n");
return -EINVAL; return -EINVAL;
......
...@@ -65,10 +65,6 @@ static bool vgic_present; ...@@ -65,10 +65,6 @@ static bool vgic_present;
static DEFINE_PER_CPU(unsigned char, kvm_arm_hardware_enabled); static DEFINE_PER_CPU(unsigned char, kvm_arm_hardware_enabled);
DEFINE_STATIC_KEY_FALSE(userspace_irqchip_in_use); DEFINE_STATIC_KEY_FALSE(userspace_irqchip_in_use);
extern u64 kvm_nvhe_sym(__cpu_logical_map)[NR_CPUS];
extern u32 kvm_nvhe_sym(kvm_host_psci_version);
extern struct psci_0_1_function_ids kvm_nvhe_sym(kvm_host_psci_0_1_function_ids);
int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu) int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
{ {
return kvm_vcpu_exiting_guest_mode(vcpu) == IN_GUEST_MODE; return kvm_vcpu_exiting_guest_mode(vcpu) == IN_GUEST_MODE;
...@@ -584,11 +580,9 @@ static int kvm_vcpu_first_run_init(struct kvm_vcpu *vcpu) ...@@ -584,11 +580,9 @@ static int kvm_vcpu_first_run_init(struct kvm_vcpu *vcpu)
* Map the VGIC hardware resources before running a vcpu the * Map the VGIC hardware resources before running a vcpu the
* first time on this VM. * first time on this VM.
*/ */
if (unlikely(!vgic_ready(kvm))) {
ret = kvm_vgic_map_resources(kvm); ret = kvm_vgic_map_resources(kvm);
if (ret) if (ret)
return ret; return ret;
}
} else { } else {
/* /*
* Tell the rest of the code that there are userspace irqchip * Tell the rest of the code that there are userspace irqchip
...@@ -1574,12 +1568,12 @@ static struct notifier_block hyp_init_cpu_pm_nb = { ...@@ -1574,12 +1568,12 @@ static struct notifier_block hyp_init_cpu_pm_nb = {
.notifier_call = hyp_init_cpu_pm_notifier, .notifier_call = hyp_init_cpu_pm_notifier,
}; };
static void __init hyp_cpu_pm_init(void) static void hyp_cpu_pm_init(void)
{ {
if (!is_protected_kvm_enabled()) if (!is_protected_kvm_enabled())
cpu_pm_register_notifier(&hyp_init_cpu_pm_nb); cpu_pm_register_notifier(&hyp_init_cpu_pm_nb);
} }
static void __init hyp_cpu_pm_exit(void) static void hyp_cpu_pm_exit(void)
{ {
if (!is_protected_kvm_enabled()) if (!is_protected_kvm_enabled())
cpu_pm_unregister_notifier(&hyp_init_cpu_pm_nb); cpu_pm_unregister_notifier(&hyp_init_cpu_pm_nb);
...@@ -1604,9 +1598,12 @@ static void init_cpu_logical_map(void) ...@@ -1604,9 +1598,12 @@ static void init_cpu_logical_map(void)
* allow any other CPUs from the `possible` set to boot. * allow any other CPUs from the `possible` set to boot.
*/ */
for_each_online_cpu(cpu) for_each_online_cpu(cpu)
kvm_nvhe_sym(__cpu_logical_map)[cpu] = cpu_logical_map(cpu); hyp_cpu_logical_map[cpu] = cpu_logical_map(cpu);
} }
#define init_psci_0_1_impl_state(config, what) \
config.psci_0_1_ ## what ## _implemented = psci_ops.what
static bool init_psci_relay(void) static bool init_psci_relay(void)
{ {
/* /*
...@@ -1618,8 +1615,15 @@ static bool init_psci_relay(void) ...@@ -1618,8 +1615,15 @@ static bool init_psci_relay(void)
return false; return false;
} }
kvm_nvhe_sym(kvm_host_psci_version) = psci_ops.get_version(); kvm_host_psci_config.version = psci_ops.get_version();
kvm_nvhe_sym(kvm_host_psci_0_1_function_ids) = get_psci_0_1_function_ids();
if (kvm_host_psci_config.version == PSCI_VERSION(0, 1)) {
kvm_host_psci_config.function_ids_0_1 = get_psci_0_1_function_ids();
init_psci_0_1_impl_state(kvm_host_psci_config, cpu_suspend);
init_psci_0_1_impl_state(kvm_host_psci_config, cpu_on);
init_psci_0_1_impl_state(kvm_host_psci_config, cpu_off);
init_psci_0_1_impl_state(kvm_host_psci_config, migrate);
}
return true; return true;
} }
......
...@@ -59,4 +59,13 @@ static inline void __adjust_pc(struct kvm_vcpu *vcpu) ...@@ -59,4 +59,13 @@ static inline void __adjust_pc(struct kvm_vcpu *vcpu)
} }
} }
/*
* Skip an instruction while host sysregs are live.
* Assumes host is always 64-bit.
*/
static inline void kvm_skip_host_instr(void)
{
write_sysreg_el2(read_sysreg_el2(SYS_ELR) + 4, SYS_ELR);
}
#endif #endif
...@@ -157,11 +157,6 @@ static void default_host_smc_handler(struct kvm_cpu_context *host_ctxt) ...@@ -157,11 +157,6 @@ static void default_host_smc_handler(struct kvm_cpu_context *host_ctxt)
__kvm_hyp_host_forward_smc(host_ctxt); __kvm_hyp_host_forward_smc(host_ctxt);
} }
static void skip_host_instruction(void)
{
write_sysreg_el2(read_sysreg_el2(SYS_ELR) + 4, SYS_ELR);
}
static void handle_host_smc(struct kvm_cpu_context *host_ctxt) static void handle_host_smc(struct kvm_cpu_context *host_ctxt)
{ {
bool handled; bool handled;
...@@ -170,11 +165,8 @@ static void handle_host_smc(struct kvm_cpu_context *host_ctxt) ...@@ -170,11 +165,8 @@ static void handle_host_smc(struct kvm_cpu_context *host_ctxt)
if (!handled) if (!handled)
default_host_smc_handler(host_ctxt); default_host_smc_handler(host_ctxt);
/* /* SMC was trapped, move ELR past the current PC. */
* Unlike HVC, the return address of an SMC is the instruction's PC. kvm_skip_host_instr();
* Move the return address past the instruction.
*/
skip_host_instruction();
} }
void handle_trap(struct kvm_cpu_context *host_ctxt) void handle_trap(struct kvm_cpu_context *host_ctxt)
......
...@@ -14,14 +14,14 @@ ...@@ -14,14 +14,14 @@
* Other CPUs should not be allowed to boot because their features were * Other CPUs should not be allowed to boot because their features were
* not checked against the finalized system capabilities. * not checked against the finalized system capabilities.
*/ */
u64 __ro_after_init __cpu_logical_map[NR_CPUS] = { [0 ... NR_CPUS-1] = INVALID_HWID }; u64 __ro_after_init hyp_cpu_logical_map[NR_CPUS] = { [0 ... NR_CPUS-1] = INVALID_HWID };
u64 cpu_logical_map(unsigned int cpu) u64 cpu_logical_map(unsigned int cpu)
{ {
if (cpu >= ARRAY_SIZE(__cpu_logical_map)) if (cpu >= ARRAY_SIZE(hyp_cpu_logical_map))
hyp_panic(); hyp_panic();
return __cpu_logical_map[cpu]; return hyp_cpu_logical_map[cpu];
} }
unsigned long __hyp_per_cpu_offset(unsigned int cpu) unsigned long __hyp_per_cpu_offset(unsigned int cpu)
......
...@@ -7,11 +7,8 @@ ...@@ -7,11 +7,8 @@
#include <asm/kvm_asm.h> #include <asm/kvm_asm.h>
#include <asm/kvm_hyp.h> #include <asm/kvm_hyp.h>
#include <asm/kvm_mmu.h> #include <asm/kvm_mmu.h>
#include <kvm/arm_hypercalls.h>
#include <linux/arm-smccc.h> #include <linux/arm-smccc.h>
#include <linux/kvm_host.h> #include <linux/kvm_host.h>
#include <linux/psci.h>
#include <kvm/arm_psci.h>
#include <uapi/linux/psci.h> #include <uapi/linux/psci.h>
#include <nvhe/trap_handler.h> #include <nvhe/trap_handler.h>
...@@ -22,9 +19,8 @@ void kvm_hyp_cpu_resume(unsigned long r0); ...@@ -22,9 +19,8 @@ void kvm_hyp_cpu_resume(unsigned long r0);
void __noreturn __host_enter(struct kvm_cpu_context *host_ctxt); void __noreturn __host_enter(struct kvm_cpu_context *host_ctxt);
/* Config options set by the host. */ /* Config options set by the host. */
__ro_after_init u32 kvm_host_psci_version; struct kvm_host_psci_config __ro_after_init kvm_host_psci_config;
__ro_after_init struct psci_0_1_function_ids kvm_host_psci_0_1_function_ids; s64 __ro_after_init hyp_physvirt_offset;
__ro_after_init s64 hyp_physvirt_offset;
#define __hyp_pa(x) ((phys_addr_t)((x)) + hyp_physvirt_offset) #define __hyp_pa(x) ((phys_addr_t)((x)) + hyp_physvirt_offset)
...@@ -47,19 +43,16 @@ struct psci_boot_args { ...@@ -47,19 +43,16 @@ struct psci_boot_args {
static DEFINE_PER_CPU(struct psci_boot_args, cpu_on_args) = PSCI_BOOT_ARGS_INIT; static DEFINE_PER_CPU(struct psci_boot_args, cpu_on_args) = PSCI_BOOT_ARGS_INIT;
static DEFINE_PER_CPU(struct psci_boot_args, suspend_args) = PSCI_BOOT_ARGS_INIT; static DEFINE_PER_CPU(struct psci_boot_args, suspend_args) = PSCI_BOOT_ARGS_INIT;
static u64 get_psci_func_id(struct kvm_cpu_context *host_ctxt) #define is_psci_0_1(what, func_id) \
{ (kvm_host_psci_config.psci_0_1_ ## what ## _implemented && \
DECLARE_REG(u64, func_id, host_ctxt, 0); (func_id) == kvm_host_psci_config.function_ids_0_1.what)
return func_id;
}
static bool is_psci_0_1_call(u64 func_id) static bool is_psci_0_1_call(u64 func_id)
{ {
return (func_id == kvm_host_psci_0_1_function_ids.cpu_suspend) || return (is_psci_0_1(cpu_suspend, func_id) ||
(func_id == kvm_host_psci_0_1_function_ids.cpu_on) || is_psci_0_1(cpu_on, func_id) ||
(func_id == kvm_host_psci_0_1_function_ids.cpu_off) || is_psci_0_1(cpu_off, func_id) ||
(func_id == kvm_host_psci_0_1_function_ids.migrate); is_psci_0_1(migrate, func_id));
} }
static bool is_psci_0_2_call(u64 func_id) static bool is_psci_0_2_call(u64 func_id)
...@@ -69,16 +62,6 @@ static bool is_psci_0_2_call(u64 func_id) ...@@ -69,16 +62,6 @@ static bool is_psci_0_2_call(u64 func_id)
(PSCI_0_2_FN64(0) <= func_id && func_id <= PSCI_0_2_FN64(31)); (PSCI_0_2_FN64(0) <= func_id && func_id <= PSCI_0_2_FN64(31));
} }
static bool is_psci_call(u64 func_id)
{
switch (kvm_host_psci_version) {
case PSCI_VERSION(0, 1):
return is_psci_0_1_call(func_id);
default:
return is_psci_0_2_call(func_id);
}
}
static unsigned long psci_call(unsigned long fn, unsigned long arg0, static unsigned long psci_call(unsigned long fn, unsigned long arg0,
unsigned long arg1, unsigned long arg2) unsigned long arg1, unsigned long arg2)
{ {
...@@ -248,14 +231,13 @@ asmlinkage void __noreturn kvm_host_psci_cpu_entry(bool is_cpu_on) ...@@ -248,14 +231,13 @@ asmlinkage void __noreturn kvm_host_psci_cpu_entry(bool is_cpu_on)
static unsigned long psci_0_1_handler(u64 func_id, struct kvm_cpu_context *host_ctxt) static unsigned long psci_0_1_handler(u64 func_id, struct kvm_cpu_context *host_ctxt)
{ {
if ((func_id == kvm_host_psci_0_1_function_ids.cpu_off) || if (is_psci_0_1(cpu_off, func_id) || is_psci_0_1(migrate, func_id))
(func_id == kvm_host_psci_0_1_function_ids.migrate))
return psci_forward(host_ctxt); return psci_forward(host_ctxt);
else if (func_id == kvm_host_psci_0_1_function_ids.cpu_on) if (is_psci_0_1(cpu_on, func_id))
return psci_cpu_on(func_id, host_ctxt); return psci_cpu_on(func_id, host_ctxt);
else if (func_id == kvm_host_psci_0_1_function_ids.cpu_suspend) if (is_psci_0_1(cpu_suspend, func_id))
return psci_cpu_suspend(func_id, host_ctxt); return psci_cpu_suspend(func_id, host_ctxt);
else
return PSCI_RET_NOT_SUPPORTED; return PSCI_RET_NOT_SUPPORTED;
} }
...@@ -298,20 +280,23 @@ static unsigned long psci_1_0_handler(u64 func_id, struct kvm_cpu_context *host_ ...@@ -298,20 +280,23 @@ static unsigned long psci_1_0_handler(u64 func_id, struct kvm_cpu_context *host_
bool kvm_host_psci_handler(struct kvm_cpu_context *host_ctxt) bool kvm_host_psci_handler(struct kvm_cpu_context *host_ctxt)
{ {
u64 func_id = get_psci_func_id(host_ctxt); DECLARE_REG(u64, func_id, host_ctxt, 0);
unsigned long ret; unsigned long ret;
if (!is_psci_call(func_id)) switch (kvm_host_psci_config.version) {
return false;
switch (kvm_host_psci_version) {
case PSCI_VERSION(0, 1): case PSCI_VERSION(0, 1):
if (!is_psci_0_1_call(func_id))
return false;
ret = psci_0_1_handler(func_id, host_ctxt); ret = psci_0_1_handler(func_id, host_ctxt);
break; break;
case PSCI_VERSION(0, 2): case PSCI_VERSION(0, 2):
if (!is_psci_0_2_call(func_id))
return false;
ret = psci_0_2_handler(func_id, host_ctxt); ret = psci_0_2_handler(func_id, host_ctxt);
break; break;
default: default:
if (!is_psci_0_2_call(func_id))
return false;
ret = psci_1_0_handler(func_id, host_ctxt); ret = psci_1_0_handler(func_id, host_ctxt);
break; break;
} }
......
...@@ -850,8 +850,6 @@ int kvm_arm_pmu_v3_enable(struct kvm_vcpu *vcpu) ...@@ -850,8 +850,6 @@ int kvm_arm_pmu_v3_enable(struct kvm_vcpu *vcpu)
return -EINVAL; return -EINVAL;
} }
kvm_pmu_vcpu_reset(vcpu);
return 0; return 0;
} }
......
...@@ -594,6 +594,10 @@ static void reset_pmcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r) ...@@ -594,6 +594,10 @@ static void reset_pmcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
{ {
u64 pmcr, val; u64 pmcr, val;
/* No PMU available, PMCR_EL0 may UNDEF... */
if (!kvm_arm_support_pmu_v3())
return;
pmcr = read_sysreg(pmcr_el0); pmcr = read_sysreg(pmcr_el0);
/* /*
* Writable bits of PMCR_EL0 (ARMV8_PMU_PMCR_MASK) are reset to UNKNOWN * Writable bits of PMCR_EL0 (ARMV8_PMU_PMCR_MASK) are reset to UNKNOWN
...@@ -919,7 +923,7 @@ static bool access_pmuserenr(struct kvm_vcpu *vcpu, struct sys_reg_params *p, ...@@ -919,7 +923,7 @@ static bool access_pmuserenr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
#define reg_to_encoding(x) \ #define reg_to_encoding(x) \
sys_reg((u32)(x)->Op0, (u32)(x)->Op1, \ sys_reg((u32)(x)->Op0, (u32)(x)->Op1, \
(u32)(x)->CRn, (u32)(x)->CRm, (u32)(x)->Op2); (u32)(x)->CRn, (u32)(x)->CRm, (u32)(x)->Op2)
/* Silly macro to expand the DBG{BCR,BVR,WVR,WCR}n_EL1 registers in one go */ /* Silly macro to expand the DBG{BCR,BVR,WVR,WCR}n_EL1 registers in one go */
#define DBG_BCR_BVR_WCR_WVR_EL1(n) \ #define DBG_BCR_BVR_WCR_WVR_EL1(n) \
......
...@@ -34,17 +34,16 @@ static u64 __early_kern_hyp_va(u64 addr) ...@@ -34,17 +34,16 @@ static u64 __early_kern_hyp_va(u64 addr)
} }
/* /*
* Store a hyp VA <-> PA offset into a hyp-owned variable. * Store a hyp VA <-> PA offset into a EL2-owned variable.
*/ */
static void init_hyp_physvirt_offset(void) static void init_hyp_physvirt_offset(void)
{ {
extern s64 kvm_nvhe_sym(hyp_physvirt_offset);
u64 kern_va, hyp_va; u64 kern_va, hyp_va;
/* Compute the offset from the hyp VA and PA of a random symbol. */ /* Compute the offset from the hyp VA and PA of a random symbol. */
kern_va = (u64)kvm_ksym_ref(__hyp_text_start); kern_va = (u64)lm_alias(__hyp_text_start);
hyp_va = __early_kern_hyp_va(kern_va); hyp_va = __early_kern_hyp_va(kern_va);
CHOOSE_NVHE_SYM(hyp_physvirt_offset) = (s64)__pa(kern_va) - (s64)hyp_va; hyp_physvirt_offset = (s64)__pa(kern_va) - (s64)hyp_va;
} }
/* /*
......
...@@ -419,7 +419,8 @@ int vgic_lazy_init(struct kvm *kvm) ...@@ -419,7 +419,8 @@ int vgic_lazy_init(struct kvm *kvm)
* Map the MMIO regions depending on the VGIC model exposed to the guest * Map the MMIO regions depending on the VGIC model exposed to the guest
* called on the first VCPU run. * called on the first VCPU run.
* Also map the virtual CPU interface into the VM. * Also map the virtual CPU interface into the VM.
* v2/v3 derivatives call vgic_init if not already done. * v2 calls vgic_init() if not already done.
* v3 and derivatives return an error if the VGIC is not initialized.
* vgic_ready() returns true if this function has succeeded. * vgic_ready() returns true if this function has succeeded.
* @kvm: kvm struct pointer * @kvm: kvm struct pointer
*/ */
...@@ -428,7 +429,13 @@ int kvm_vgic_map_resources(struct kvm *kvm) ...@@ -428,7 +429,13 @@ int kvm_vgic_map_resources(struct kvm *kvm)
struct vgic_dist *dist = &kvm->arch.vgic; struct vgic_dist *dist = &kvm->arch.vgic;
int ret = 0; int ret = 0;
if (likely(vgic_ready(kvm)))
return 0;
mutex_lock(&kvm->lock); mutex_lock(&kvm->lock);
if (vgic_ready(kvm))
goto out;
if (!irqchip_in_kernel(kvm)) if (!irqchip_in_kernel(kvm))
goto out; goto out;
...@@ -439,6 +446,8 @@ int kvm_vgic_map_resources(struct kvm *kvm) ...@@ -439,6 +446,8 @@ int kvm_vgic_map_resources(struct kvm *kvm)
if (ret) if (ret)
__kvm_vgic_destroy(kvm); __kvm_vgic_destroy(kvm);
else
dist->ready = true;
out: out:
mutex_unlock(&kvm->lock); mutex_unlock(&kvm->lock);
......
...@@ -306,20 +306,15 @@ int vgic_v2_map_resources(struct kvm *kvm) ...@@ -306,20 +306,15 @@ int vgic_v2_map_resources(struct kvm *kvm)
struct vgic_dist *dist = &kvm->arch.vgic; struct vgic_dist *dist = &kvm->arch.vgic;
int ret = 0; int ret = 0;
if (vgic_ready(kvm))
goto out;
if (IS_VGIC_ADDR_UNDEF(dist->vgic_dist_base) || if (IS_VGIC_ADDR_UNDEF(dist->vgic_dist_base) ||
IS_VGIC_ADDR_UNDEF(dist->vgic_cpu_base)) { IS_VGIC_ADDR_UNDEF(dist->vgic_cpu_base)) {
kvm_err("Need to set vgic cpu and dist addresses first\n"); kvm_err("Need to set vgic cpu and dist addresses first\n");
ret = -ENXIO; return -ENXIO;
goto out;
} }
if (!vgic_v2_check_base(dist->vgic_dist_base, dist->vgic_cpu_base)) { if (!vgic_v2_check_base(dist->vgic_dist_base, dist->vgic_cpu_base)) {
kvm_err("VGIC CPU and dist frames overlap\n"); kvm_err("VGIC CPU and dist frames overlap\n");
ret = -EINVAL; return -EINVAL;
goto out;
} }
/* /*
...@@ -329,13 +324,13 @@ int vgic_v2_map_resources(struct kvm *kvm) ...@@ -329,13 +324,13 @@ int vgic_v2_map_resources(struct kvm *kvm)
ret = vgic_init(kvm); ret = vgic_init(kvm);
if (ret) { if (ret) {
kvm_err("Unable to initialize VGIC dynamic data structures\n"); kvm_err("Unable to initialize VGIC dynamic data structures\n");
goto out; return ret;
} }
ret = vgic_register_dist_iodev(kvm, dist->vgic_dist_base, VGIC_V2); ret = vgic_register_dist_iodev(kvm, dist->vgic_dist_base, VGIC_V2);
if (ret) { if (ret) {
kvm_err("Unable to register VGIC MMIO regions\n"); kvm_err("Unable to register VGIC MMIO regions\n");
goto out; return ret;
} }
if (!static_branch_unlikely(&vgic_v2_cpuif_trap)) { if (!static_branch_unlikely(&vgic_v2_cpuif_trap)) {
...@@ -344,14 +339,11 @@ int vgic_v2_map_resources(struct kvm *kvm) ...@@ -344,14 +339,11 @@ int vgic_v2_map_resources(struct kvm *kvm)
KVM_VGIC_V2_CPU_SIZE, true); KVM_VGIC_V2_CPU_SIZE, true);
if (ret) { if (ret) {
kvm_err("Unable to remap VGIC CPU to VCPU\n"); kvm_err("Unable to remap VGIC CPU to VCPU\n");
goto out; return ret;
} }
} }
dist->ready = true; return 0;
out:
return ret;
} }
DEFINE_STATIC_KEY_FALSE(vgic_v2_cpuif_trap); DEFINE_STATIC_KEY_FALSE(vgic_v2_cpuif_trap);
......
...@@ -500,29 +500,23 @@ int vgic_v3_map_resources(struct kvm *kvm) ...@@ -500,29 +500,23 @@ int vgic_v3_map_resources(struct kvm *kvm)
int ret = 0; int ret = 0;
int c; int c;
if (vgic_ready(kvm))
goto out;
kvm_for_each_vcpu(c, vcpu, kvm) { kvm_for_each_vcpu(c, vcpu, kvm) {
struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu; struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
if (IS_VGIC_ADDR_UNDEF(vgic_cpu->rd_iodev.base_addr)) { if (IS_VGIC_ADDR_UNDEF(vgic_cpu->rd_iodev.base_addr)) {
kvm_debug("vcpu %d redistributor base not set\n", c); kvm_debug("vcpu %d redistributor base not set\n", c);
ret = -ENXIO; return -ENXIO;
goto out;
} }
} }
if (IS_VGIC_ADDR_UNDEF(dist->vgic_dist_base)) { if (IS_VGIC_ADDR_UNDEF(dist->vgic_dist_base)) {
kvm_err("Need to set vgic distributor addresses first\n"); kvm_err("Need to set vgic distributor addresses first\n");
ret = -ENXIO; return -ENXIO;
goto out;
} }
if (!vgic_v3_check_base(kvm)) { if (!vgic_v3_check_base(kvm)) {
kvm_err("VGIC redist and dist frames overlap\n"); kvm_err("VGIC redist and dist frames overlap\n");
ret = -EINVAL; return -EINVAL;
goto out;
} }
/* /*
...@@ -530,22 +524,19 @@ int vgic_v3_map_resources(struct kvm *kvm) ...@@ -530,22 +524,19 @@ int vgic_v3_map_resources(struct kvm *kvm)
* the VGIC before we need to use it. * the VGIC before we need to use it.
*/ */
if (!vgic_initialized(kvm)) { if (!vgic_initialized(kvm)) {
ret = -EBUSY; return -EBUSY;
goto out;
} }
ret = vgic_register_dist_iodev(kvm, dist->vgic_dist_base, VGIC_V3); ret = vgic_register_dist_iodev(kvm, dist->vgic_dist_base, VGIC_V3);
if (ret) { if (ret) {
kvm_err("Unable to register VGICv3 dist MMIO regions\n"); kvm_err("Unable to register VGICv3 dist MMIO regions\n");
goto out; return ret;
} }
if (kvm_vgic_global_state.has_gicv4_1) if (kvm_vgic_global_state.has_gicv4_1)
vgic_v4_configure_vsgis(kvm); vgic_v4_configure_vsgis(kvm);
dist->ready = true;
out: return 0;
return ret;
} }
DEFINE_STATIC_KEY_FALSE(vgic_v3_cpuif_trap); DEFINE_STATIC_KEY_FALSE(vgic_v3_cpuif_trap);
......
...@@ -13,7 +13,7 @@ ...@@ -13,7 +13,7 @@
#define ARMV8_PMU_CYCLE_IDX (ARMV8_PMU_MAX_COUNTERS - 1) #define ARMV8_PMU_CYCLE_IDX (ARMV8_PMU_MAX_COUNTERS - 1)
#define ARMV8_PMU_MAX_COUNTER_PAIRS ((ARMV8_PMU_MAX_COUNTERS + 1) >> 1) #define ARMV8_PMU_MAX_COUNTER_PAIRS ((ARMV8_PMU_MAX_COUNTERS + 1) >> 1)
#ifdef CONFIG_KVM_ARM_PMU #ifdef CONFIG_HW_PERF_EVENTS
struct kvm_pmc { struct kvm_pmc {
u8 idx; /* index into the pmu->pmc array */ u8 idx; /* index into the pmu->pmc array */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment