Commit c09d6a04 authored by Will Deacon's avatar Will Deacon

arm64: atomics: patch in lse instructions when supported by the CPU

On CPUs which support the LSE atomic instructions introduced in ARMv8.1,
it makes sense to use them in preference to ll/sc sequences.

This patch introduces runtime patching of atomic_t and atomic64_t
routines so that the call-site for the out-of-line ll/sc sequences is
patched with an LSE atomic instruction when we detect that
the CPU supports it.

If binutils is not recent enough to assemble the LSE instructions, then
the ll/sc sequences are inlined as though CONFIG_ARM64_LSE_ATOMICS=n.
Reviewed-by: default avatarCatalin Marinas <catalin.marinas@arm.com>
Signed-off-by: default avatarWill Deacon <will.deacon@arm.com>
parent c0385b24
......@@ -17,7 +17,18 @@ GZFLAGS :=-9
KBUILD_DEFCONFIG := defconfig
KBUILD_CFLAGS += -mgeneral-regs-only
# Check for binutils support for specific extensions
lseinstr := $(call as-instr,.arch_extension lse,-DCONFIG_AS_LSE=1)
ifeq ($(CONFIG_ARM64_LSE_ATOMICS), y)
ifeq ($(lseinstr),)
$(warning LSE atomics not supported by binutils)
endif
endif
KBUILD_CFLAGS += -mgeneral-regs-only $(lseinstr)
KBUILD_AFLAGS += $(lseinstr)
ifeq ($(CONFIG_CPU_BIG_ENDIAN), y)
KBUILD_CPPFLAGS += -mbig-endian
AS += -EB
......
......@@ -21,11 +21,11 @@
#define __ASM_ATOMIC_H
#include <linux/compiler.h>
#include <linux/stringify.h>
#include <linux/types.h>
#include <asm/barrier.h>
#include <asm/cmpxchg.h>
#include <asm/lse.h>
#define ATOMIC_INIT(i) { (i) }
......@@ -33,7 +33,7 @@
#define __ARM64_IN_ATOMIC_IMPL
#ifdef CONFIG_ARM64_LSE_ATOMICS
#if defined(CONFIG_ARM64_LSE_ATOMICS) && defined(CONFIG_AS_LSE)
#include <asm/atomic_lse.h>
#else
#include <asm/atomic_ll_sc.h>
......
......@@ -37,18 +37,6 @@
* (the optimize attribute silently ignores these options).
*/
#ifndef __LL_SC_INLINE
#define __LL_SC_INLINE static inline
#endif
#ifndef __LL_SC_PREFIX
#define __LL_SC_PREFIX(x) x
#endif
#ifndef __LL_SC_EXPORT
#define __LL_SC_EXPORT(x)
#endif
#define ATOMIC_OP(op, asm_op) \
__LL_SC_INLINE void \
__LL_SC_PREFIX(atomic_##op(int i, atomic_t *v)) \
......
This diff is collapsed.
#ifndef __ASM_LSE_H
#define __ASM_LSE_H
#if defined(CONFIG_AS_LSE) && defined(CONFIG_ARM64_LSE_ATOMICS)
#include <linux/stringify.h>
#include <asm/alternative.h>
#include <asm/cpufeature.h>
__asm__(".arch_extension lse");
/* Move the ll/sc atomics out-of-line */
#define __LL_SC_INLINE
#define __LL_SC_PREFIX(x) __ll_sc_##x
#define __LL_SC_EXPORT(x) EXPORT_SYMBOL(__LL_SC_PREFIX(x))
/* Macro for constructing calls to out-of-line ll/sc atomics */
#define __LL_SC_CALL(op) "bl\t" __stringify(__LL_SC_PREFIX(op)) "\n"
/* In-line patching at runtime */
#define ARM64_LSE_ATOMIC_INSN(llsc, lse) \
ALTERNATIVE(llsc, lse, ARM64_CPU_FEAT_LSE_ATOMICS)
#else
#define __LL_SC_INLINE static inline
#define __LL_SC_PREFIX(x) x
#define __LL_SC_EXPORT(x)
#define ARM64_LSE_ATOMIC_INSN(llsc, lse) llsc
#endif /* CONFIG_AS_LSE && CONFIG_ARM64_LSE_ATOMICS */
#endif /* __ASM_LSE_H */
......@@ -285,6 +285,9 @@ static void __init setup_processor(void)
case 2:
elf_hwcap |= HWCAP_ATOMICS;
cpus_set_cap(ARM64_CPU_FEAT_LSE_ATOMICS);
if (IS_ENABLED(CONFIG_AS_LSE) &&
IS_ENABLED(CONFIG_ARM64_LSE_ATOMICS))
pr_info("LSE atomics supported\n");
case 1:
/* RESERVED */
case 0:
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment