Commit 99cf983c authored by Mark Rutland's avatar Mark Rutland Committed by Peter Zijlstra

sched/preempt: Add PREEMPT_DYNAMIC using static keys

Where an architecture selects HAVE_STATIC_CALL but not
HAVE_STATIC_CALL_INLINE, each static call has an out-of-line trampoline
which will either branch to a callee or return to the caller.

On such architectures, a number of constraints can conspire to make
those trampolines more complicated and potentially less useful than we'd
like. For example:

* Hardware and software control flow integrity schemes can require the
  addition of "landing pad" instructions (e.g. `BTI` for arm64), which
  will also be present at the "real" callee.

* Limited branch ranges can require that trampolines generate or load an
  address into a register and perform an indirect branch (or at least
  have a slow path that does so). This loses some of the benefits of
  having a direct branch.

* Interaction with SW CFI schemes can be complicated and fragile, e.g.
  requiring that we can recognise idiomatic codegen and remove
  indirections understand, at least until clang proves more helpful
  mechanisms for dealing with this.

For PREEMPT_DYNAMIC, we don't need the full power of static calls, as we
really only need to enable/disable specific preemption functions. We can
achieve the same effect without a number of the pain points above by
using static keys to fold early returns into the preemption functions
themselves rather than in an out-of-line trampoline, effectively
inlining the trampoline into the start of the function.

For arm64, this results in good code generation. For example, the
dynamic_cond_resched() wrapper looks as follows when enabled. When
disabled, the first `B` is replaced with a `NOP`, resulting in an early
return.

| <dynamic_cond_resched>:
|        bti     c
|        b       <dynamic_cond_resched+0x10>     // or `nop`
|        mov     w0, #0x0
|        ret
|        mrs     x0, sp_el0
|        ldr     x0, [x0, #8]
|        cbnz    x0, <dynamic_cond_resched+0x8>
|        paciasp
|        stp     x29, x30, [sp, #-16]!
|        mov     x29, sp
|        bl      <preempt_schedule_common>
|        mov     w0, #0x1
|        ldp     x29, x30, [sp], #16
|        autiasp
|        ret

... compared to the regular form of the function:

| <__cond_resched>:
|        bti     c
|        mrs     x0, sp_el0
|        ldr     x1, [x0, #8]
|        cbz     x1, <__cond_resched+0x18>
|        mov     w0, #0x0
|        ret
|        paciasp
|        stp     x29, x30, [sp, #-16]!
|        mov     x29, sp
|        bl      <preempt_schedule_common>
|        mov     w0, #0x1
|        ldp     x29, x30, [sp], #16
|        autiasp
|        ret

Any architecture which implements static keys should be able to use this
to implement PREEMPT_DYNAMIC with similar cost to non-inlined static
calls. Since this is likely to have greater overhead than (inlined)
static calls, PREEMPT_DYNAMIC is only defaulted to enabled when
HAVE_PREEMPT_DYNAMIC_CALL is selected.
Signed-off-by: default avatarMark Rutland <mark.rutland@arm.com>
Signed-off-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Acked-by: default avatarArd Biesheuvel <ardb@kernel.org>
Acked-by: default avatarFrederic Weisbecker <frederic@kernel.org>
Link: https://lore.kernel.org/r/20220214165216.2231574-6-mark.rutland@arm.com
parent 33c64734
...@@ -1278,11 +1278,41 @@ config HAVE_STATIC_CALL_INLINE ...@@ -1278,11 +1278,41 @@ config HAVE_STATIC_CALL_INLINE
config HAVE_PREEMPT_DYNAMIC config HAVE_PREEMPT_DYNAMIC
bool bool
config HAVE_PREEMPT_DYNAMIC_CALL
bool
depends on HAVE_STATIC_CALL depends on HAVE_STATIC_CALL
select HAVE_PREEMPT_DYNAMIC
help
An architecture should select this if it can handle the preemption
model being selected at boot time using static calls.
Where an architecture selects HAVE_STATIC_CALL_INLINE, any call to a
preemption function will be patched directly.
Where an architecture does not select HAVE_STATIC_CALL_INLINE, any
call to a preemption function will go through a trampoline, and the
trampoline will be patched.
It is strongly advised to support inline static call to avoid any
overhead.
config HAVE_PREEMPT_DYNAMIC_KEY
bool
depends on HAVE_ARCH_JUMP_LABEL && CC_HAS_ASM_GOTO
select HAVE_PREEMPT_DYNAMIC
help help
Select this if the architecture support boot time preempt setting An architecture should select this if it can handle the preemption
on top of static calls. It is strongly advised to support inline model being selected at boot time using static keys.
static call to avoid any overhead.
Each preemption function will be given an early return based on a
static key. This should have slightly lower overhead than non-inline
static calls, as this effectively inlines each trampoline into the
start of its callee. This may avoid redundant work, and may
integrate better with CFI schemes.
This will have greater overhead than using inline static calls as
the call to the preemption function cannot be entirely elided.
config ARCH_WANT_LD_ORPHAN_WARN config ARCH_WANT_LD_ORPHAN_WARN
bool bool
......
...@@ -245,7 +245,7 @@ config X86 ...@@ -245,7 +245,7 @@ config X86
select HAVE_STACK_VALIDATION if X86_64 select HAVE_STACK_VALIDATION if X86_64
select HAVE_STATIC_CALL select HAVE_STATIC_CALL
select HAVE_STATIC_CALL_INLINE if HAVE_STACK_VALIDATION select HAVE_STATIC_CALL_INLINE if HAVE_STACK_VALIDATION
select HAVE_PREEMPT_DYNAMIC select HAVE_PREEMPT_DYNAMIC_CALL
select HAVE_RSEQ select HAVE_RSEQ
select HAVE_SYSCALL_TRACEPOINTS select HAVE_SYSCALL_TRACEPOINTS
select HAVE_UNSTABLE_SCHED_CLOCK select HAVE_UNSTABLE_SCHED_CLOCK
......
...@@ -456,13 +456,19 @@ irqentry_state_t noinstr irqentry_enter(struct pt_regs *regs); ...@@ -456,13 +456,19 @@ irqentry_state_t noinstr irqentry_enter(struct pt_regs *regs);
*/ */
void raw_irqentry_exit_cond_resched(void); void raw_irqentry_exit_cond_resched(void);
#ifdef CONFIG_PREEMPT_DYNAMIC #ifdef CONFIG_PREEMPT_DYNAMIC
#if defined(CONFIG_HAVE_PREEMPT_DYNAMIC_CALL)
#define irqentry_exit_cond_resched_dynamic_enabled raw_irqentry_exit_cond_resched #define irqentry_exit_cond_resched_dynamic_enabled raw_irqentry_exit_cond_resched
#define irqentry_exit_cond_resched_dynamic_disabled NULL #define irqentry_exit_cond_resched_dynamic_disabled NULL
DECLARE_STATIC_CALL(irqentry_exit_cond_resched, raw_irqentry_exit_cond_resched); DECLARE_STATIC_CALL(irqentry_exit_cond_resched, raw_irqentry_exit_cond_resched);
#define irqentry_exit_cond_resched() static_call(irqentry_exit_cond_resched)() #define irqentry_exit_cond_resched() static_call(irqentry_exit_cond_resched)()
#else #elif defined(CONFIG_HAVE_PREEMPT_DYNAMIC_KEY)
#define irqentry_exit_cond_resched() raw_irqentry_exit_cond_resched() DECLARE_STATIC_KEY_TRUE(sk_dynamic_irqentry_exit_cond_resched);
void dynamic_irqentry_exit_cond_resched(void);
#define irqentry_exit_cond_resched() dynamic_irqentry_exit_cond_resched()
#endif #endif
#else /* CONFIG_PREEMPT_DYNAMIC */
#define irqentry_exit_cond_resched() raw_irqentry_exit_cond_resched()
#endif /* CONFIG_PREEMPT_DYNAMIC */
/** /**
* irqentry_exit - Handle return from exception that used irqentry_enter() * irqentry_exit - Handle return from exception that used irqentry_enter()
......
...@@ -99,7 +99,7 @@ struct user; ...@@ -99,7 +99,7 @@ struct user;
extern int __cond_resched(void); extern int __cond_resched(void);
# define might_resched() __cond_resched() # define might_resched() __cond_resched()
#elif defined(CONFIG_PREEMPT_DYNAMIC) #elif defined(CONFIG_PREEMPT_DYNAMIC) && defined(CONFIG_HAVE_PREEMPT_DYNAMIC_CALL)
extern int __cond_resched(void); extern int __cond_resched(void);
...@@ -110,6 +110,11 @@ static __always_inline void might_resched(void) ...@@ -110,6 +110,11 @@ static __always_inline void might_resched(void)
static_call_mod(might_resched)(); static_call_mod(might_resched)();
} }
#elif defined(CONFIG_PREEMPT_DYNAMIC) && defined(CONFIG_HAVE_PREEMPT_DYNAMIC_KEY)
extern int dynamic_might_resched(void);
# define might_resched() dynamic_might_resched()
#else #else
# define might_resched() do { } while (0) # define might_resched() do { } while (0)
......
...@@ -2020,7 +2020,7 @@ static inline int test_tsk_need_resched(struct task_struct *tsk) ...@@ -2020,7 +2020,7 @@ static inline int test_tsk_need_resched(struct task_struct *tsk)
#if !defined(CONFIG_PREEMPTION) || defined(CONFIG_PREEMPT_DYNAMIC) #if !defined(CONFIG_PREEMPTION) || defined(CONFIG_PREEMPT_DYNAMIC)
extern int __cond_resched(void); extern int __cond_resched(void);
#ifdef CONFIG_PREEMPT_DYNAMIC #if defined(CONFIG_PREEMPT_DYNAMIC) && defined(CONFIG_HAVE_PREEMPT_DYNAMIC_CALL)
DECLARE_STATIC_CALL(cond_resched, __cond_resched); DECLARE_STATIC_CALL(cond_resched, __cond_resched);
...@@ -2029,6 +2029,14 @@ static __always_inline int _cond_resched(void) ...@@ -2029,6 +2029,14 @@ static __always_inline int _cond_resched(void)
return static_call_mod(cond_resched)(); return static_call_mod(cond_resched)();
} }
#elif defined(CONFIG_PREEMPT_DYNAMIC) && defined(CONFIG_HAVE_PREEMPT_DYNAMIC_KEY)
extern int dynamic_cond_resched(void);
static __always_inline int _cond_resched(void)
{
return dynamic_cond_resched();
}
#else #else
static inline int _cond_resched(void) static inline int _cond_resched(void)
......
...@@ -96,8 +96,9 @@ config PREEMPTION ...@@ -96,8 +96,9 @@ config PREEMPTION
config PREEMPT_DYNAMIC config PREEMPT_DYNAMIC
bool "Preemption behaviour defined on boot" bool "Preemption behaviour defined on boot"
depends on HAVE_PREEMPT_DYNAMIC && !PREEMPT_RT depends on HAVE_PREEMPT_DYNAMIC && !PREEMPT_RT
select JUMP_LABEL if HAVE_PREEMPT_DYNAMIC_KEY
select PREEMPT_BUILD select PREEMPT_BUILD
default y default y if HAVE_PREEMPT_DYNAMIC_CALL
help help
This option allows to define the preemption model on the kernel This option allows to define the preemption model on the kernel
command line parameter and thus override the default preemption command line parameter and thus override the default preemption
......
...@@ -3,6 +3,7 @@ ...@@ -3,6 +3,7 @@
#include <linux/context_tracking.h> #include <linux/context_tracking.h>
#include <linux/entry-common.h> #include <linux/entry-common.h>
#include <linux/highmem.h> #include <linux/highmem.h>
#include <linux/jump_label.h>
#include <linux/livepatch.h> #include <linux/livepatch.h>
#include <linux/audit.h> #include <linux/audit.h>
#include <linux/tick.h> #include <linux/tick.h>
...@@ -392,7 +393,17 @@ void raw_irqentry_exit_cond_resched(void) ...@@ -392,7 +393,17 @@ void raw_irqentry_exit_cond_resched(void)
} }
} }
#ifdef CONFIG_PREEMPT_DYNAMIC #ifdef CONFIG_PREEMPT_DYNAMIC
#if defined(CONFIG_HAVE_PREEMPT_DYNAMIC_CALL)
DEFINE_STATIC_CALL(irqentry_exit_cond_resched, raw_irqentry_exit_cond_resched); DEFINE_STATIC_CALL(irqentry_exit_cond_resched, raw_irqentry_exit_cond_resched);
#elif defined(CONFIG_HAVE_PREEMPT_DYNAMIC_KEY)
DEFINE_STATIC_KEY_TRUE(sk_dynamic_irqentry_exit_cond_resched);
void dynamic_irqentry_exit_cond_resched(void)
{
if (!static_key_unlikely(&sk_dynamic_irqentry_exit_cond_resched))
return;
raw_irqentry_exit_cond_resched();
}
#endif
#endif #endif
noinstr void irqentry_exit(struct pt_regs *regs, irqentry_state_t state) noinstr void irqentry_exit(struct pt_regs *regs, irqentry_state_t state)
......
...@@ -14,6 +14,7 @@ ...@@ -14,6 +14,7 @@
#include <linux/nospec.h> #include <linux/nospec.h>
#include <linux/blkdev.h> #include <linux/blkdev.h>
#include <linux/jump_label.h>
#include <linux/kcov.h> #include <linux/kcov.h>
#include <linux/scs.h> #include <linux/scs.h>
...@@ -6484,21 +6485,31 @@ asmlinkage __visible void __sched notrace preempt_schedule(void) ...@@ -6484,21 +6485,31 @@ asmlinkage __visible void __sched notrace preempt_schedule(void)
*/ */
if (likely(!preemptible())) if (likely(!preemptible()))
return; return;
preempt_schedule_common(); preempt_schedule_common();
} }
NOKPROBE_SYMBOL(preempt_schedule); NOKPROBE_SYMBOL(preempt_schedule);
EXPORT_SYMBOL(preempt_schedule); EXPORT_SYMBOL(preempt_schedule);
#ifdef CONFIG_PREEMPT_DYNAMIC #ifdef CONFIG_PREEMPT_DYNAMIC
#if defined(CONFIG_HAVE_PREEMPT_DYNAMIC_CALL)
#ifndef preempt_schedule_dynamic_enabled #ifndef preempt_schedule_dynamic_enabled
#define preempt_schedule_dynamic_enabled preempt_schedule #define preempt_schedule_dynamic_enabled preempt_schedule
#define preempt_schedule_dynamic_disabled NULL #define preempt_schedule_dynamic_disabled NULL
#endif #endif
DEFINE_STATIC_CALL(preempt_schedule, preempt_schedule_dynamic_enabled); DEFINE_STATIC_CALL(preempt_schedule, preempt_schedule_dynamic_enabled);
EXPORT_STATIC_CALL_TRAMP(preempt_schedule); EXPORT_STATIC_CALL_TRAMP(preempt_schedule);
#elif defined(CONFIG_HAVE_PREEMPT_DYNAMIC_KEY)
static DEFINE_STATIC_KEY_TRUE(sk_dynamic_preempt_schedule);
void __sched notrace dynamic_preempt_schedule(void)
{
if (!static_branch_unlikely(&sk_dynamic_preempt_schedule))
return;
preempt_schedule();
}
NOKPROBE_SYMBOL(dynamic_preempt_schedule);
EXPORT_SYMBOL(dynamic_preempt_schedule);
#endif
#endif #endif
/** /**
* preempt_schedule_notrace - preempt_schedule called by tracing * preempt_schedule_notrace - preempt_schedule called by tracing
...@@ -6553,12 +6564,24 @@ asmlinkage __visible void __sched notrace preempt_schedule_notrace(void) ...@@ -6553,12 +6564,24 @@ asmlinkage __visible void __sched notrace preempt_schedule_notrace(void)
EXPORT_SYMBOL_GPL(preempt_schedule_notrace); EXPORT_SYMBOL_GPL(preempt_schedule_notrace);
#ifdef CONFIG_PREEMPT_DYNAMIC #ifdef CONFIG_PREEMPT_DYNAMIC
#if defined(CONFIG_HAVE_PREEMPT_DYNAMIC_CALL)
#ifndef preempt_schedule_notrace_dynamic_enabled #ifndef preempt_schedule_notrace_dynamic_enabled
#define preempt_schedule_notrace_dynamic_enabled preempt_schedule_notrace #define preempt_schedule_notrace_dynamic_enabled preempt_schedule_notrace
#define preempt_schedule_notrace_dynamic_disabled NULL #define preempt_schedule_notrace_dynamic_disabled NULL
#endif #endif
DEFINE_STATIC_CALL(preempt_schedule_notrace, preempt_schedule_notrace_dynamic_enabled); DEFINE_STATIC_CALL(preempt_schedule_notrace, preempt_schedule_notrace_dynamic_enabled);
EXPORT_STATIC_CALL_TRAMP(preempt_schedule_notrace); EXPORT_STATIC_CALL_TRAMP(preempt_schedule_notrace);
#elif defined(CONFIG_HAVE_PREEMPT_DYNAMIC_KEY)
static DEFINE_STATIC_KEY_TRUE(sk_dynamic_preempt_schedule_notrace);
void __sched notrace dynamic_preempt_schedule_notrace(void)
{
if (!static_branch_unlikely(&sk_dynamic_preempt_schedule_notrace))
return;
preempt_schedule_notrace();
}
NOKPROBE_SYMBOL(dynamic_preempt_schedule_notrace);
EXPORT_SYMBOL(dynamic_preempt_schedule_notrace);
#endif
#endif #endif
#endif /* CONFIG_PREEMPTION */ #endif /* CONFIG_PREEMPTION */
...@@ -8068,6 +8091,7 @@ EXPORT_SYMBOL(__cond_resched); ...@@ -8068,6 +8091,7 @@ EXPORT_SYMBOL(__cond_resched);
#endif #endif
#ifdef CONFIG_PREEMPT_DYNAMIC #ifdef CONFIG_PREEMPT_DYNAMIC
#if defined(CONFIG_HAVE_PREEMPT_DYNAMIC_CALL)
#define cond_resched_dynamic_enabled __cond_resched #define cond_resched_dynamic_enabled __cond_resched
#define cond_resched_dynamic_disabled ((void *)&__static_call_return0) #define cond_resched_dynamic_disabled ((void *)&__static_call_return0)
DEFINE_STATIC_CALL_RET0(cond_resched, __cond_resched); DEFINE_STATIC_CALL_RET0(cond_resched, __cond_resched);
...@@ -8077,6 +8101,25 @@ EXPORT_STATIC_CALL_TRAMP(cond_resched); ...@@ -8077,6 +8101,25 @@ EXPORT_STATIC_CALL_TRAMP(cond_resched);
#define might_resched_dynamic_disabled ((void *)&__static_call_return0) #define might_resched_dynamic_disabled ((void *)&__static_call_return0)
DEFINE_STATIC_CALL_RET0(might_resched, __cond_resched); DEFINE_STATIC_CALL_RET0(might_resched, __cond_resched);
EXPORT_STATIC_CALL_TRAMP(might_resched); EXPORT_STATIC_CALL_TRAMP(might_resched);
#elif defined(CONFIG_HAVE_PREEMPT_DYNAMIC_KEY)
static DEFINE_STATIC_KEY_FALSE(sk_dynamic_cond_resched);
int __sched dynamic_cond_resched(void)
{
if (!static_branch_unlikely(&sk_dynamic_cond_resched))
return 0;
return __cond_resched();
}
EXPORT_SYMBOL(dynamic_cond_resched);
static DEFINE_STATIC_KEY_FALSE(sk_dynamic_might_resched);
int __sched dynamic_might_resched(void)
{
if (!static_branch_unlikely(&sk_dynamic_might_resched))
return 0;
return __cond_resched();
}
EXPORT_SYMBOL(dynamic_might_resched);
#endif
#endif #endif
/* /*
...@@ -8206,8 +8249,15 @@ int sched_dynamic_mode(const char *str) ...@@ -8206,8 +8249,15 @@ int sched_dynamic_mode(const char *str)
return -EINVAL; return -EINVAL;
} }
#if defined(CONFIG_HAVE_PREEMPT_DYNAMIC_CALL)
#define preempt_dynamic_enable(f) static_call_update(f, f##_dynamic_enabled) #define preempt_dynamic_enable(f) static_call_update(f, f##_dynamic_enabled)
#define preempt_dynamic_disable(f) static_call_update(f, f##_dynamic_disabled) #define preempt_dynamic_disable(f) static_call_update(f, f##_dynamic_disabled)
#elif defined(CONFIG_HAVE_PREEMPT_DYNAMIC_KEY)
#define preempt_dynamic_enable(f) static_key_enable(&sk_dynamic_##f.key)
#define preempt_dynamic_disable(f) static_key_disable(&sk_dynamic_##f.key)
#else
#error "Unsupported PREEMPT_DYNAMIC mechanism"
#endif
void sched_dynamic_update(int mode) void sched_dynamic_update(int mode)
{ {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment