Commit 2fe296a6 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'arm64-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux

Pull arm64 fixes/cleanups from Catalin Marinas:

 - Avoid taking a mutex in the secondary CPU bring-up path when
   interrupts are disabled

 - Ignore perf exclude_hv when the kernel is running in Hyp mode

 - Remove redundant instruction in cmpxchg

* tag 'arm64-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux:
  arm64/cpufeature: don't use mutex in bringup path
  arm64: perf: Ignore exclude_hv when kernel is running in HYP
  arm64: Remove redundant mov from LL/SC cmpxchg
parents e5a489ab 63a1e1c9
...@@ -264,7 +264,6 @@ __LL_SC_PREFIX(__cmpxchg_case_##name(volatile void *ptr, \ ...@@ -264,7 +264,6 @@ __LL_SC_PREFIX(__cmpxchg_case_##name(volatile void *ptr, \
" st" #rel "xr" #sz "\t%w[tmp], %" #w "[new], %[v]\n" \ " st" #rel "xr" #sz "\t%w[tmp], %" #w "[new], %[v]\n" \
" cbnz %w[tmp], 1b\n" \ " cbnz %w[tmp], 1b\n" \
" " #mb "\n" \ " " #mb "\n" \
" mov %" #w "[oldval], %" #w "[old]\n" \
"2:" \ "2:" \
: [tmp] "=&r" (tmp), [oldval] "=&r" (oldval), \ : [tmp] "=&r" (tmp), [oldval] "=&r" (oldval), \
[v] "+Q" (*(unsigned long *)ptr) \ [v] "+Q" (*(unsigned long *)ptr) \
......
...@@ -115,6 +115,7 @@ struct arm64_cpu_capabilities { ...@@ -115,6 +115,7 @@ struct arm64_cpu_capabilities {
extern DECLARE_BITMAP(cpu_hwcaps, ARM64_NCAPS); extern DECLARE_BITMAP(cpu_hwcaps, ARM64_NCAPS);
extern struct static_key_false cpu_hwcap_keys[ARM64_NCAPS]; extern struct static_key_false cpu_hwcap_keys[ARM64_NCAPS];
extern struct static_key_false arm64_const_caps_ready;
bool this_cpu_has_cap(unsigned int cap); bool this_cpu_has_cap(unsigned int cap);
...@@ -124,7 +125,7 @@ static inline bool cpu_have_feature(unsigned int num) ...@@ -124,7 +125,7 @@ static inline bool cpu_have_feature(unsigned int num)
} }
/* System capability check for constant caps */ /* System capability check for constant caps */
static inline bool cpus_have_const_cap(int num) static inline bool __cpus_have_const_cap(int num)
{ {
if (num >= ARM64_NCAPS) if (num >= ARM64_NCAPS)
return false; return false;
...@@ -138,6 +139,14 @@ static inline bool cpus_have_cap(unsigned int num) ...@@ -138,6 +139,14 @@ static inline bool cpus_have_cap(unsigned int num)
return test_bit(num, cpu_hwcaps); return test_bit(num, cpu_hwcaps);
} }
static inline bool cpus_have_const_cap(int num)
{
if (static_branch_likely(&arm64_const_caps_ready))
return __cpus_have_const_cap(num);
else
return cpus_have_cap(num);
}
static inline void cpus_set_cap(unsigned int num) static inline void cpus_set_cap(unsigned int num)
{ {
if (num >= ARM64_NCAPS) { if (num >= ARM64_NCAPS) {
...@@ -145,7 +154,6 @@ static inline void cpus_set_cap(unsigned int num) ...@@ -145,7 +154,6 @@ static inline void cpus_set_cap(unsigned int num)
num, ARM64_NCAPS); num, ARM64_NCAPS);
} else { } else {
__set_bit(num, cpu_hwcaps); __set_bit(num, cpu_hwcaps);
static_branch_enable(&cpu_hwcap_keys[num]);
} }
} }
......
...@@ -24,6 +24,7 @@ ...@@ -24,6 +24,7 @@
#include <linux/types.h> #include <linux/types.h>
#include <linux/kvm_types.h> #include <linux/kvm_types.h>
#include <asm/cpufeature.h>
#include <asm/kvm.h> #include <asm/kvm.h>
#include <asm/kvm_asm.h> #include <asm/kvm_asm.h>
#include <asm/kvm_mmio.h> #include <asm/kvm_mmio.h>
...@@ -355,9 +356,12 @@ static inline void __cpu_init_hyp_mode(phys_addr_t pgd_ptr, ...@@ -355,9 +356,12 @@ static inline void __cpu_init_hyp_mode(phys_addr_t pgd_ptr,
unsigned long vector_ptr) unsigned long vector_ptr)
{ {
/* /*
* Call initialization code, and switch to the full blown * Call initialization code, and switch to the full blown HYP code.
* HYP code. * If the cpucaps haven't been finalized yet, something has gone very
* wrong, and hyp will crash and burn when it uses any
* cpus_have_const_cap() wrapper.
*/ */
BUG_ON(!static_branch_likely(&arm64_const_caps_ready));
__kvm_call_hyp((void *)pgd_ptr, hyp_stack_ptr, vector_ptr); __kvm_call_hyp((void *)pgd_ptr, hyp_stack_ptr, vector_ptr);
} }
......
...@@ -985,8 +985,16 @@ void update_cpu_capabilities(const struct arm64_cpu_capabilities *caps, ...@@ -985,8 +985,16 @@ void update_cpu_capabilities(const struct arm64_cpu_capabilities *caps,
*/ */
void __init enable_cpu_capabilities(const struct arm64_cpu_capabilities *caps) void __init enable_cpu_capabilities(const struct arm64_cpu_capabilities *caps)
{ {
for (; caps->matches; caps++) for (; caps->matches; caps++) {
if (caps->enable && cpus_have_cap(caps->capability)) unsigned int num = caps->capability;
if (!cpus_have_cap(num))
continue;
/* Ensure cpus_have_const_cap(num) works */
static_branch_enable(&cpu_hwcap_keys[num]);
if (caps->enable) {
/* /*
* Use stop_machine() as it schedules the work allowing * Use stop_machine() as it schedules the work allowing
* us to modify PSTATE, instead of on_each_cpu() which * us to modify PSTATE, instead of on_each_cpu() which
...@@ -994,6 +1002,8 @@ void __init enable_cpu_capabilities(const struct arm64_cpu_capabilities *caps) ...@@ -994,6 +1002,8 @@ void __init enable_cpu_capabilities(const struct arm64_cpu_capabilities *caps)
* we return. * we return.
*/ */
stop_machine(caps->enable, NULL, cpu_online_mask); stop_machine(caps->enable, NULL, cpu_online_mask);
}
}
} }
/* /*
...@@ -1096,6 +1106,14 @@ static void __init setup_feature_capabilities(void) ...@@ -1096,6 +1106,14 @@ static void __init setup_feature_capabilities(void)
enable_cpu_capabilities(arm64_features); enable_cpu_capabilities(arm64_features);
} }
DEFINE_STATIC_KEY_FALSE(arm64_const_caps_ready);
EXPORT_SYMBOL(arm64_const_caps_ready);
static void __init mark_const_caps_ready(void)
{
static_branch_enable(&arm64_const_caps_ready);
}
/* /*
* Check if the current CPU has a given feature capability. * Check if the current CPU has a given feature capability.
* Should be called from non-preemptible context. * Should be called from non-preemptible context.
...@@ -1131,6 +1149,7 @@ void __init setup_cpu_features(void) ...@@ -1131,6 +1149,7 @@ void __init setup_cpu_features(void)
/* Set the CPU feature capabilies */ /* Set the CPU feature capabilies */
setup_feature_capabilities(); setup_feature_capabilities();
enable_errata_workarounds(); enable_errata_workarounds();
mark_const_caps_ready();
setup_elf_hwcaps(arm64_elf_hwcaps); setup_elf_hwcaps(arm64_elf_hwcaps);
if (system_supports_32bit_el0()) if (system_supports_32bit_el0())
......
...@@ -877,15 +877,24 @@ static int armv8pmu_set_event_filter(struct hw_perf_event *event, ...@@ -877,15 +877,24 @@ static int armv8pmu_set_event_filter(struct hw_perf_event *event,
if (attr->exclude_idle) if (attr->exclude_idle)
return -EPERM; return -EPERM;
if (is_kernel_in_hyp_mode() &&
attr->exclude_kernel != attr->exclude_hv) /*
return -EINVAL; * If we're running in hyp mode, then we *are* the hypervisor.
if (attr->exclude_user) * Therefore we ignore exclude_hv in this configuration, since
config_base |= ARMV8_PMU_EXCLUDE_EL0; * there's no hypervisor to sample anyway. This is consistent
if (!is_kernel_in_hyp_mode() && attr->exclude_kernel) * with other architectures (x86 and Power).
*/
if (is_kernel_in_hyp_mode()) {
if (!attr->exclude_kernel)
config_base |= ARMV8_PMU_INCLUDE_EL2;
} else {
if (attr->exclude_kernel)
config_base |= ARMV8_PMU_EXCLUDE_EL1; config_base |= ARMV8_PMU_EXCLUDE_EL1;
if (!attr->exclude_hv) if (!attr->exclude_hv)
config_base |= ARMV8_PMU_INCLUDE_EL2; config_base |= ARMV8_PMU_INCLUDE_EL2;
}
if (attr->exclude_user)
config_base |= ARMV8_PMU_EXCLUDE_EL0;
/* /*
* Install the filter into config_base as this is used to * Install the filter into config_base as this is used to
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment