Commit 5235c7e2 authored by Mark Rutland's avatar Mark Rutland Committed by Catalin Marinas

arm64: alternatives: use cpucap naming

To more clearly align the various users of the cpucap enumeration, this patch
changes the alternative code to use the term `cpucap` in favour of `feature`.
The alternative_has_feature_{likely,unlikely}() functions are renamed to
alternative_has_cap_<likely,unlikely}() to more clearly align with the
cpus_have_{const_,}cap() helpers.

At the same time remove the stale comment referring to the "ARM64_CB
bit", which is evidently a typo for ARM64_CB_PATCH, which was removed in
commit:

  4c0bd995 ("arm64: alternatives: have callbacks take a cap")

There should be no functional change as a result of this patch; this is
purely a renaming exercise.
Signed-off-by: default avatarMark Rutland <mark.rutland@arm.com>
Reviewed-by: default avatarSuzuki K Poulose <suzuki.poulose@arm.com>
Cc: Marc Zyngier <maz@kernel.org>
Cc: Mark Brown <broonie@kernel.org>
Cc: Will Deacon <will@kernel.org>
Link: https://lore.kernel.org/r/20230607164846.3967305-3-mark.rutland@arm.comSigned-off-by: default avatarCatalin Marinas <catalin.marinas@arm.com>
parent 7f242982
...@@ -23,17 +23,17 @@ ...@@ -23,17 +23,17 @@
#include <linux/stringify.h> #include <linux/stringify.h>
#define ALTINSTR_ENTRY(feature) \ #define ALTINSTR_ENTRY(cpucap) \
" .word 661b - .\n" /* label */ \ " .word 661b - .\n" /* label */ \
" .word 663f - .\n" /* new instruction */ \ " .word 663f - .\n" /* new instruction */ \
" .hword " __stringify(feature) "\n" /* feature bit */ \ " .hword " __stringify(cpucap) "\n" /* cpucap */ \
" .byte 662b-661b\n" /* source len */ \ " .byte 662b-661b\n" /* source len */ \
" .byte 664f-663f\n" /* replacement len */ " .byte 664f-663f\n" /* replacement len */
#define ALTINSTR_ENTRY_CB(feature, cb) \ #define ALTINSTR_ENTRY_CB(cpucap, cb) \
" .word 661b - .\n" /* label */ \ " .word 661b - .\n" /* label */ \
" .word " __stringify(cb) "- .\n" /* callback */ \ " .word " __stringify(cb) "- .\n" /* callback */ \
" .hword " __stringify(feature) "\n" /* feature bit */ \ " .hword " __stringify(cpucap) "\n" /* cpucap */ \
" .byte 662b-661b\n" /* source len */ \ " .byte 662b-661b\n" /* source len */ \
" .byte 664f-663f\n" /* replacement len */ " .byte 664f-663f\n" /* replacement len */
...@@ -53,13 +53,13 @@ ...@@ -53,13 +53,13 @@
* *
* Alternatives with callbacks do not generate replacement instructions. * Alternatives with callbacks do not generate replacement instructions.
*/ */
#define __ALTERNATIVE_CFG(oldinstr, newinstr, feature, cfg_enabled) \ #define __ALTERNATIVE_CFG(oldinstr, newinstr, cpucap, cfg_enabled) \
".if "__stringify(cfg_enabled)" == 1\n" \ ".if "__stringify(cfg_enabled)" == 1\n" \
"661:\n\t" \ "661:\n\t" \
oldinstr "\n" \ oldinstr "\n" \
"662:\n" \ "662:\n" \
".pushsection .altinstructions,\"a\"\n" \ ".pushsection .altinstructions,\"a\"\n" \
ALTINSTR_ENTRY(feature) \ ALTINSTR_ENTRY(cpucap) \
".popsection\n" \ ".popsection\n" \
".subsection 1\n" \ ".subsection 1\n" \
"663:\n\t" \ "663:\n\t" \
...@@ -70,31 +70,31 @@ ...@@ -70,31 +70,31 @@
".previous\n" \ ".previous\n" \
".endif\n" ".endif\n"
#define __ALTERNATIVE_CFG_CB(oldinstr, feature, cfg_enabled, cb) \ #define __ALTERNATIVE_CFG_CB(oldinstr, cpucap, cfg_enabled, cb) \
".if "__stringify(cfg_enabled)" == 1\n" \ ".if "__stringify(cfg_enabled)" == 1\n" \
"661:\n\t" \ "661:\n\t" \
oldinstr "\n" \ oldinstr "\n" \
"662:\n" \ "662:\n" \
".pushsection .altinstructions,\"a\"\n" \ ".pushsection .altinstructions,\"a\"\n" \
ALTINSTR_ENTRY_CB(feature, cb) \ ALTINSTR_ENTRY_CB(cpucap, cb) \
".popsection\n" \ ".popsection\n" \
"663:\n\t" \ "663:\n\t" \
"664:\n\t" \ "664:\n\t" \
".endif\n" ".endif\n"
#define _ALTERNATIVE_CFG(oldinstr, newinstr, feature, cfg, ...) \ #define _ALTERNATIVE_CFG(oldinstr, newinstr, cpucap, cfg, ...) \
__ALTERNATIVE_CFG(oldinstr, newinstr, feature, IS_ENABLED(cfg)) __ALTERNATIVE_CFG(oldinstr, newinstr, cpucap, IS_ENABLED(cfg))
#define ALTERNATIVE_CB(oldinstr, feature, cb) \ #define ALTERNATIVE_CB(oldinstr, cpucap, cb) \
__ALTERNATIVE_CFG_CB(oldinstr, (1 << ARM64_CB_SHIFT) | (feature), 1, cb) __ALTERNATIVE_CFG_CB(oldinstr, (1 << ARM64_CB_SHIFT) | (cpucap), 1, cb)
#else #else
#include <asm/assembler.h> #include <asm/assembler.h>
.macro altinstruction_entry orig_offset alt_offset feature orig_len alt_len .macro altinstruction_entry orig_offset alt_offset cpucap orig_len alt_len
.word \orig_offset - . .word \orig_offset - .
.word \alt_offset - . .word \alt_offset - .
.hword (\feature) .hword (\cpucap)
.byte \orig_len .byte \orig_len
.byte \alt_len .byte \alt_len
.endm .endm
...@@ -210,9 +210,9 @@ alternative_endif ...@@ -210,9 +210,9 @@ alternative_endif
#endif /* __ASSEMBLY__ */ #endif /* __ASSEMBLY__ */
/* /*
* Usage: asm(ALTERNATIVE(oldinstr, newinstr, feature)); * Usage: asm(ALTERNATIVE(oldinstr, newinstr, cpucap));
* *
* Usage: asm(ALTERNATIVE(oldinstr, newinstr, feature, CONFIG_FOO)); * Usage: asm(ALTERNATIVE(oldinstr, newinstr, cpucap, CONFIG_FOO));
* N.B. If CONFIG_FOO is specified, but not selected, the whole block * N.B. If CONFIG_FOO is specified, but not selected, the whole block
* will be omitted, including oldinstr. * will be omitted, including oldinstr.
*/ */
...@@ -224,15 +224,15 @@ alternative_endif ...@@ -224,15 +224,15 @@ alternative_endif
#include <linux/types.h> #include <linux/types.h>
static __always_inline bool static __always_inline bool
alternative_has_feature_likely(const unsigned long feature) alternative_has_cap_likely(const unsigned long cpucap)
{ {
compiletime_assert(feature < ARM64_NCAPS, compiletime_assert(cpucap < ARM64_NCAPS,
"feature must be < ARM64_NCAPS"); "cpucap must be < ARM64_NCAPS");
asm_volatile_goto( asm_volatile_goto(
ALTERNATIVE_CB("b %l[l_no]", %[feature], alt_cb_patch_nops) ALTERNATIVE_CB("b %l[l_no]", %[cpucap], alt_cb_patch_nops)
: :
: [feature] "i" (feature) : [cpucap] "i" (cpucap)
: :
: l_no); : l_no);
...@@ -242,15 +242,15 @@ alternative_has_feature_likely(const unsigned long feature) ...@@ -242,15 +242,15 @@ alternative_has_feature_likely(const unsigned long feature)
} }
static __always_inline bool static __always_inline bool
alternative_has_feature_unlikely(const unsigned long feature) alternative_has_cap_unlikely(const unsigned long cpucap)
{ {
compiletime_assert(feature < ARM64_NCAPS, compiletime_assert(cpucap < ARM64_NCAPS,
"feature must be < ARM64_NCAPS"); "cpucap must be < ARM64_NCAPS");
asm_volatile_goto( asm_volatile_goto(
ALTERNATIVE("nop", "b %l[l_yes]", %[feature]) ALTERNATIVE("nop", "b %l[l_yes]", %[cpucap])
: :
: [feature] "i" (feature) : [cpucap] "i" (cpucap)
: :
: l_yes); : l_yes);
......
...@@ -13,7 +13,7 @@ ...@@ -13,7 +13,7 @@
struct alt_instr { struct alt_instr {
s32 orig_offset; /* offset to original instruction */ s32 orig_offset; /* offset to original instruction */
s32 alt_offset; /* offset to replacement instruction */ s32 alt_offset; /* offset to replacement instruction */
u16 cpufeature; /* cpufeature bit set for replacement */ u16 cpucap; /* cpucap bit set for replacement */
u8 orig_len; /* size of original instruction(s) */ u8 orig_len; /* size of original instruction(s) */
u8 alt_len; /* size of new instruction(s), <= orig_len */ u8 alt_len; /* size of new instruction(s), <= orig_len */
}; };
...@@ -23,7 +23,7 @@ typedef void (*alternative_cb_t)(struct alt_instr *alt, ...@@ -23,7 +23,7 @@ typedef void (*alternative_cb_t)(struct alt_instr *alt,
void __init apply_boot_alternatives(void); void __init apply_boot_alternatives(void);
void __init apply_alternatives_all(void); void __init apply_alternatives_all(void);
bool alternative_is_applied(u16 cpufeature); bool alternative_is_applied(u16 cpucap);
#ifdef CONFIG_MODULES #ifdef CONFIG_MODULES
void apply_alternatives_module(void *start, size_t length); void apply_alternatives_module(void *start, size_t length);
......
...@@ -437,7 +437,7 @@ unsigned long cpu_get_elf_hwcap2(void); ...@@ -437,7 +437,7 @@ unsigned long cpu_get_elf_hwcap2(void);
static __always_inline bool system_capabilities_finalized(void) static __always_inline bool system_capabilities_finalized(void)
{ {
return alternative_has_feature_likely(ARM64_ALWAYS_SYSTEM); return alternative_has_cap_likely(ARM64_ALWAYS_SYSTEM);
} }
/* /*
...@@ -464,7 +464,7 @@ static __always_inline bool __cpus_have_const_cap(int num) ...@@ -464,7 +464,7 @@ static __always_inline bool __cpus_have_const_cap(int num)
{ {
if (num >= ARM64_NCAPS) if (num >= ARM64_NCAPS)
return false; return false;
return alternative_has_feature_unlikely(num); return alternative_has_cap_unlikely(num);
} }
/* /*
......
...@@ -24,7 +24,7 @@ ...@@ -24,7 +24,7 @@
static __always_inline bool __irqflags_uses_pmr(void) static __always_inline bool __irqflags_uses_pmr(void)
{ {
return IS_ENABLED(CONFIG_ARM64_PSEUDO_NMI) && return IS_ENABLED(CONFIG_ARM64_PSEUDO_NMI) &&
alternative_has_feature_unlikely(ARM64_HAS_GIC_PRIO_MASKING); alternative_has_cap_unlikely(ARM64_HAS_GIC_PRIO_MASKING);
} }
static __always_inline void __daif_local_irq_enable(void) static __always_inline void __daif_local_irq_enable(void)
......
...@@ -18,7 +18,7 @@ ...@@ -18,7 +18,7 @@
static __always_inline bool system_uses_lse_atomics(void) static __always_inline bool system_uses_lse_atomics(void)
{ {
return alternative_has_feature_likely(ARM64_HAS_LSE_ATOMICS); return alternative_has_cap_likely(ARM64_HAS_LSE_ATOMICS);
} }
#define __lse_ll_sc_body(op, ...) \ #define __lse_ll_sc_body(op, ...) \
......
...@@ -24,8 +24,8 @@ ...@@ -24,8 +24,8 @@
#define ALT_ORIG_PTR(a) __ALT_PTR(a, orig_offset) #define ALT_ORIG_PTR(a) __ALT_PTR(a, orig_offset)
#define ALT_REPL_PTR(a) __ALT_PTR(a, alt_offset) #define ALT_REPL_PTR(a) __ALT_PTR(a, alt_offset)
#define ALT_CAP(a) ((a)->cpufeature & ~ARM64_CB_BIT) #define ALT_CAP(a) ((a)->cpucap & ~ARM64_CB_BIT)
#define ALT_HAS_CB(a) ((a)->cpufeature & ARM64_CB_BIT) #define ALT_HAS_CB(a) ((a)->cpucap & ARM64_CB_BIT)
/* Volatile, as we may be patching the guts of READ_ONCE() */ /* Volatile, as we may be patching the guts of READ_ONCE() */
static volatile int all_alternatives_applied; static volatile int all_alternatives_applied;
...@@ -37,12 +37,12 @@ struct alt_region { ...@@ -37,12 +37,12 @@ struct alt_region {
struct alt_instr *end; struct alt_instr *end;
}; };
bool alternative_is_applied(u16 cpufeature) bool alternative_is_applied(u16 cpucap)
{ {
if (WARN_ON(cpufeature >= ARM64_NCAPS)) if (WARN_ON(cpucap >= ARM64_NCAPS))
return false; return false;
return test_bit(cpufeature, applied_alternatives); return test_bit(cpucap, applied_alternatives);
} }
/* /*
...@@ -141,7 +141,7 @@ static void clean_dcache_range_nopatch(u64 start, u64 end) ...@@ -141,7 +141,7 @@ static void clean_dcache_range_nopatch(u64 start, u64 end)
static void __apply_alternatives(const struct alt_region *region, static void __apply_alternatives(const struct alt_region *region,
bool is_module, bool is_module,
unsigned long *feature_mask) unsigned long *cpucap_mask)
{ {
struct alt_instr *alt; struct alt_instr *alt;
__le32 *origptr, *updptr; __le32 *origptr, *updptr;
...@@ -151,7 +151,7 @@ static void __apply_alternatives(const struct alt_region *region, ...@@ -151,7 +151,7 @@ static void __apply_alternatives(const struct alt_region *region,
int nr_inst; int nr_inst;
int cap = ALT_CAP(alt); int cap = ALT_CAP(alt);
if (!test_bit(cap, feature_mask)) if (!test_bit(cap, cpucap_mask))
continue; continue;
if (!cpus_have_cap(cap)) if (!cpus_have_cap(cap))
...@@ -188,9 +188,8 @@ static void __apply_alternatives(const struct alt_region *region, ...@@ -188,9 +188,8 @@ static void __apply_alternatives(const struct alt_region *region,
icache_inval_all_pou(); icache_inval_all_pou();
isb(); isb();
/* Ignore ARM64_CB bit from feature mask */
bitmap_or(applied_alternatives, applied_alternatives, bitmap_or(applied_alternatives, applied_alternatives,
feature_mask, ARM64_NCAPS); cpucap_mask, ARM64_NCAPS);
bitmap_and(applied_alternatives, applied_alternatives, bitmap_and(applied_alternatives, applied_alternatives,
system_cpucaps, ARM64_NCAPS); system_cpucaps, ARM64_NCAPS);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment