Commit 21fb26bf authored by Mark Rutland's avatar Mark Rutland Committed by Catalin Marinas

arm64: alternatives: add alternative_has_feature_*()

Currrently we use a mixture of alternative sequences and static branches
to handle features detected at boot time. For ease of maintenance we
generally prefer to use static branches in C code, but this has a few
downsides:

* Each static branch has metadata in the __jump_table section, which is
  not discarded after features are finalized. This wastes some space,
  and slows down the patching of other static branches.

* The static branches are patched at a different point in time from the
  alternatives, so changes are not atomic. This leaves a transient
  period where there could be a mismatch between the behaviour of
  alternatives and static branches, which could be problematic for some
  features (e.g. pseudo-NMI).

* More (instrumentable) kernel code is executed to patch each static
  branch, which can be risky when patching certain features (e.g.
  irqflags management for pseudo-NMI).

* When CONFIG_JUMP_LABEL=n, static branches are turned into a load of a
  flag and a conditional branch. This means it isn't safe to use such
  static branches in an alternative address space (e.g. the NVHE/PKVM
  hyp code), where the generated address isn't safe to acccess.

To deal with these issues, this patch introduces new
alternative_has_feature_*() helpers, which work like static branches but
are patched using alternatives. This ensures the patching is performed
at the same time as other alternative patching, allows the metadata to
be freed after patching, and is safe for use in alternative address
spaces.

Note that all supported toolchains have asm goto support, and since
commit:

  a0a12c3e ("asm goto: eradicate CC_HAS_ASM_GOTO)"

... the CC_HAS_ASM_GOTO Kconfig symbol has been removed, so no feature
check is necessary, and we can always make use of asm goto.

Additionally, note that:

* This has no impact on cpus_have_cap(), which is a dynamic check.

* This has no functional impact on cpus_have_const_cap(). The branches
  are patched slightly later than before this patch, but these branches
  are not reachable until caps have been finalised.

* It is now invalid to use cpus_have_final_cap() in the window between
  feature detection and patching. All existing uses are only expected
  after patching anyway, so this should not be a problem.

* The LSE atomics will now be enabled during alternatives patching
  rather than immediately before. As the LL/SC an LSE atomics are
  functionally equivalent this should not be problematic.

When building defconfig with GCC 12.1.0, the resulting Image is 64KiB
smaller:

| % ls -al Image-*
| -rw-r--r-- 1 mark mark 37108224 Aug 23 09:56 Image-after
| -rw-r--r-- 1 mark mark 37173760 Aug 23 09:54 Image-before

According to bloat-o-meter.pl:

| add/remove: 44/34 grow/shrink: 602/1294 up/down: 39692/-61108 (-21416)
| Function                                     old     new   delta
| [...]
| Total: Before=16618336, After=16596920, chg -0.13%
| add/remove: 0/2 grow/shrink: 0/0 up/down: 0/-1296 (-1296)
| Data                                         old     new   delta
| arm64_const_caps_ready                        16       -     -16
| cpu_hwcap_keys                              1280       -   -1280
| Total: Before=8987120, After=8985824, chg -0.01%
| add/remove: 0/0 grow/shrink: 0/0 up/down: 0/0 (0)
| RO Data                                      old     new   delta
| Total: Before=18408, After=18408, chg +0.00%
Signed-off-by: default avatarMark Rutland <mark.rutland@arm.com>
Cc: Ard Biesheuvel <ardb@kernel.org>
Cc: James Morse <james.morse@arm.com>
Cc: Joey Gouly <joey.gouly@arm.com>
Cc: Marc Zyngier <maz@kernel.org>
Cc: Will Deacon <will@kernel.org>
Reviewed-by: default avatarArd Biesheuvel <ardb@kernel.org>
Link: https://lore.kernel.org/r/20220912162210.3626215-8-mark.rutland@arm.comSigned-off-by: default avatarCatalin Marinas <catalin.marinas@arm.com>
parent 4c0bd995
......@@ -213,4 +213,45 @@ alternative_endif
#define ALTERNATIVE(oldinstr, newinstr, ...) \
_ALTERNATIVE_CFG(oldinstr, newinstr, __VA_ARGS__, 1)
#ifndef __ASSEMBLY__
#include <linux/bug.h>
#include <linux/types.h>
static __always_inline bool
alternative_has_feature_likely(unsigned long feature)
{
BUILD_BUG_ON(feature >= ARM64_NCAPS);
asm_volatile_goto(
ALTERNATIVE("b %l[l_no]", "nop", %[feature])
:
: [feature] "i" (feature)
:
: l_no);
return true;
l_no:
return false;
}
static __always_inline bool
alternative_has_feature_unlikely(unsigned long feature)
{
BUILD_BUG_ON(feature >= ARM64_NCAPS);
asm_volatile_goto(
ALTERNATIVE("nop", "b %l[l_yes]", %[feature])
:
: [feature] "i" (feature)
:
: l_yes);
return false;
l_yes:
return true;
}
#endif /* __ASSEMBLY__ */
#endif /* __ASM_ALTERNATIVE_MACROS_H */
......@@ -6,6 +6,7 @@
#ifndef __ASM_CPUFEATURE_H
#define __ASM_CPUFEATURE_H
#include <asm/alternative-macros.h>
#include <asm/cpucaps.h>
#include <asm/cputype.h>
#include <asm/hwcap.h>
......@@ -419,8 +420,6 @@ static __always_inline bool is_hyp_code(void)
}
extern DECLARE_BITMAP(cpu_hwcaps, ARM64_NCAPS);
extern struct static_key_false cpu_hwcap_keys[ARM64_NCAPS];
extern struct static_key_false arm64_const_caps_ready;
extern DECLARE_BITMAP(boot_capabilities, ARM64_NCAPS);
......@@ -438,7 +437,7 @@ unsigned long cpu_get_elf_hwcap2(void);
static __always_inline bool system_capabilities_finalized(void)
{
return static_branch_likely(&arm64_const_caps_ready);
return alternative_has_feature_likely(ARM64_ALWAYS_SYSTEM);
}
/*
......@@ -465,7 +464,7 @@ static __always_inline bool __cpus_have_const_cap(int num)
{
if (num >= ARM64_NCAPS)
return false;
return static_branch_unlikely(&cpu_hwcap_keys[num]);
return alternative_has_feature_unlikely(num);
}
/*
......
......@@ -13,14 +13,13 @@
#include <linux/jump_label.h>
#include <linux/stringify.h>
#include <asm/alternative.h>
#include <asm/alternative-macros.h>
#include <asm/atomic_lse.h>
#include <asm/cpucaps.h>
extern struct static_key_false cpu_hwcap_keys[ARM64_NCAPS];
static __always_inline bool system_uses_lse_atomics(void)
{
return static_branch_likely(&cpu_hwcap_keys[ARM64_HAS_LSE_ATOMICS]);
return alternative_has_feature_likely(ARM64_HAS_LSE_ATOMICS);
}
#define __lse_ll_sc_body(op, ...) \
......
......@@ -133,31 +133,12 @@ DEFINE_STATIC_KEY_FALSE(arm64_mismatched_32bit_el0);
*/
static cpumask_var_t cpu_32bit_el0_mask __cpumask_var_read_mostly;
/*
* Flag to indicate if we have computed the system wide
* capabilities based on the boot time active CPUs. This
* will be used to determine if a new booting CPU should
* go through the verification process to make sure that it
* supports the system capabilities, without using a hotplug
* notifier. This is also used to decide if we could use
* the fast path for checking constant CPU caps.
*/
DEFINE_STATIC_KEY_FALSE(arm64_const_caps_ready);
EXPORT_SYMBOL(arm64_const_caps_ready);
static inline void finalize_system_capabilities(void)
{
static_branch_enable(&arm64_const_caps_ready);
}
void dump_cpu_features(void)
{
/* file-wide pr_fmt adds "CPU features: " prefix */
pr_emerg("0x%*pb\n", ARM64_NCAPS, &cpu_hwcaps);
}
DEFINE_STATIC_KEY_ARRAY_FALSE(cpu_hwcap_keys, ARM64_NCAPS);
EXPORT_SYMBOL(cpu_hwcap_keys);
#define __ARM64_FTR_BITS(SIGNED, VISIBLE, STRICT, TYPE, SHIFT, WIDTH, SAFE_VAL) \
{ \
.sign = SIGNED, \
......@@ -2944,9 +2925,6 @@ static void __init enable_cpu_capabilities(u16 scope_mask)
if (!cpus_have_cap(num))
continue;
/* Ensure cpus_have_const_cap(num) works */
static_branch_enable(&cpu_hwcap_keys[num]);
if (boot_scope && caps->cpu_enable)
/*
* Capabilities with SCOPE_BOOT_CPU scope are finalised
......@@ -3268,9 +3246,6 @@ void __init setup_cpu_features(void)
sme_setup();
minsigstksz_setup();
/* Advertise that we have computed the system capabilities */
finalize_system_capabilities();
/*
* Check for sane CTR_EL0.CWG value.
*/
......
......@@ -89,10 +89,6 @@ KVM_NVHE_ALIAS(__icache_flags);
/* VMID bits set by the KVM VMID allocator */
KVM_NVHE_ALIAS(kvm_arm_vmid_bits);
/* Kernel symbols needed for cpus_have_final/const_caps checks. */
KVM_NVHE_ALIAS(arm64_const_caps_ready);
KVM_NVHE_ALIAS(cpu_hwcap_keys);
/* Static keys which are set if a vGIC trap should be handled in hyp. */
KVM_NVHE_ALIAS(vgic_v2_cpuif_trap);
KVM_NVHE_ALIAS(vgic_v3_cpuif_trap);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment