Commit 546a3fee authored by Peter Zijlstra's avatar Peter Zijlstra

sched: Reverse sched_class layout

Because GCC-12 is fully stupid about array bounds and it's just really
hard to get a solid array definition from a linker script, flip the
array order to avoid needing negative offsets :-/

This makes the whole relational pointer magic a little less obvious, but
alas.
Signed-off-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Reviewed-by: default avatarKees Cook <keescook@chromium.org>
Link: https://lkml.kernel.org/r/YoOLLmLG7HRTXeEm@hirez.programming.kicks-ass.net
parent 734387ec
...@@ -126,13 +126,13 @@ ...@@ -126,13 +126,13 @@
*/ */
#define SCHED_DATA \ #define SCHED_DATA \
STRUCT_ALIGN(); \ STRUCT_ALIGN(); \
__begin_sched_classes = .; \ __sched_class_highest = .; \
*(__idle_sched_class) \
*(__fair_sched_class) \
*(__rt_sched_class) \
*(__dl_sched_class) \
*(__stop_sched_class) \ *(__stop_sched_class) \
__end_sched_classes = .; *(__dl_sched_class) \
*(__rt_sched_class) \
*(__fair_sched_class) \
*(__idle_sched_class) \
__sched_class_lowest = .;
/* The actual configuration determine if the init/exit sections /* The actual configuration determine if the init/exit sections
* are handled as text/data or they can be discarded (which * are handled as text/data or they can be discarded (which
......
...@@ -2193,7 +2193,7 @@ void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags) ...@@ -2193,7 +2193,7 @@ void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags)
{ {
if (p->sched_class == rq->curr->sched_class) if (p->sched_class == rq->curr->sched_class)
rq->curr->sched_class->check_preempt_curr(rq, p, flags); rq->curr->sched_class->check_preempt_curr(rq, p, flags);
else if (p->sched_class > rq->curr->sched_class) else if (sched_class_above(p->sched_class, rq->curr->sched_class))
resched_curr(rq); resched_curr(rq);
/* /*
...@@ -5692,7 +5692,7 @@ __pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf) ...@@ -5692,7 +5692,7 @@ __pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
* higher scheduling class, because otherwise those lose the * higher scheduling class, because otherwise those lose the
* opportunity to pull in more work from other CPUs. * opportunity to pull in more work from other CPUs.
*/ */
if (likely(prev->sched_class <= &fair_sched_class && if (likely(!sched_class_above(prev->sched_class, &fair_sched_class) &&
rq->nr_running == rq->cfs.h_nr_running)) { rq->nr_running == rq->cfs.h_nr_running)) {
p = pick_next_task_fair(rq, prev, rf); p = pick_next_task_fair(rq, prev, rf);
...@@ -9472,11 +9472,11 @@ void __init sched_init(void) ...@@ -9472,11 +9472,11 @@ void __init sched_init(void)
int i; int i;
/* Make sure the linker didn't screw up */ /* Make sure the linker didn't screw up */
BUG_ON(&idle_sched_class + 1 != &fair_sched_class || BUG_ON(&idle_sched_class != &fair_sched_class + 1 ||
&fair_sched_class + 1 != &rt_sched_class || &fair_sched_class != &rt_sched_class + 1 ||
&rt_sched_class + 1 != &dl_sched_class); &rt_sched_class != &dl_sched_class + 1);
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
BUG_ON(&dl_sched_class + 1 != &stop_sched_class); BUG_ON(&dl_sched_class != &stop_sched_class + 1);
#endif #endif
wait_bit_init(); wait_bit_init();
......
...@@ -2177,6 +2177,8 @@ static inline void set_next_task(struct rq *rq, struct task_struct *next) ...@@ -2177,6 +2177,8 @@ static inline void set_next_task(struct rq *rq, struct task_struct *next)
* *
* include/asm-generic/vmlinux.lds.h * include/asm-generic/vmlinux.lds.h
* *
* *CAREFUL* they are laid out in *REVERSE* order!!!
*
* Also enforce alignment on the instance, not the type, to guarantee layout. * Also enforce alignment on the instance, not the type, to guarantee layout.
*/ */
#define DEFINE_SCHED_CLASS(name) \ #define DEFINE_SCHED_CLASS(name) \
...@@ -2185,17 +2187,16 @@ const struct sched_class name##_sched_class \ ...@@ -2185,17 +2187,16 @@ const struct sched_class name##_sched_class \
__section("__" #name "_sched_class") __section("__" #name "_sched_class")
/* Defined in include/asm-generic/vmlinux.lds.h */ /* Defined in include/asm-generic/vmlinux.lds.h */
extern struct sched_class __begin_sched_classes[]; extern struct sched_class __sched_class_highest[];
extern struct sched_class __end_sched_classes[]; extern struct sched_class __sched_class_lowest[];
#define sched_class_highest (__end_sched_classes - 1)
#define sched_class_lowest (__begin_sched_classes - 1)
#define for_class_range(class, _from, _to) \ #define for_class_range(class, _from, _to) \
for (class = (_from); class != (_to); class--) for (class = (_from); class < (_to); class++)
#define for_each_class(class) \ #define for_each_class(class) \
for_class_range(class, sched_class_highest, sched_class_lowest) for_class_range(class, __sched_class_highest, __sched_class_lowest)
#define sched_class_above(_a, _b) ((_a) < (_b))
extern const struct sched_class stop_sched_class; extern const struct sched_class stop_sched_class;
extern const struct sched_class dl_sched_class; extern const struct sched_class dl_sched_class;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment