Commit 2bd8e6d4 authored by Ingo Molnar's avatar Ingo Molnar

sched: use constants if !CONFIG_SCHED_DEBUG

use constants if !CONFIG_SCHED_DEBUG.

this speeds up the code and reduces code-size:

    text    data     bss     dec     hex filename
   27464    3014      16   30494    771e sched.o.before
   26929    3010      20   29959    7507 sched.o.after
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
Signed-off-by: default avatarPeter Zijlstra <a.p.zijlstra@chello.nl>
Signed-off-by: default avatarMike Galbraith <efault@gmx.de>
Reviewed-by: default avatarThomas Gleixner <tglx@linutronix.de>
parent 38ad464d
...@@ -1402,15 +1402,18 @@ static inline void idle_task_exit(void) {} ...@@ -1402,15 +1402,18 @@ static inline void idle_task_exit(void) {}
extern void sched_idle_next(void); extern void sched_idle_next(void);
#ifdef CONFIG_SCHED_DEBUG
extern unsigned int sysctl_sched_latency; extern unsigned int sysctl_sched_latency;
extern unsigned int sysctl_sched_min_granularity; extern unsigned int sysctl_sched_min_granularity;
extern unsigned int sysctl_sched_wakeup_granularity; extern unsigned int sysctl_sched_wakeup_granularity;
extern unsigned int sysctl_sched_batch_wakeup_granularity; extern unsigned int sysctl_sched_batch_wakeup_granularity;
extern unsigned int sysctl_sched_stat_granularity; extern unsigned int sysctl_sched_stat_granularity;
extern unsigned int sysctl_sched_runtime_limit; extern unsigned int sysctl_sched_runtime_limit;
extern unsigned int sysctl_sched_compat_yield;
extern unsigned int sysctl_sched_child_runs_first; extern unsigned int sysctl_sched_child_runs_first;
extern unsigned int sysctl_sched_features; extern unsigned int sysctl_sched_features;
#endif
extern unsigned int sysctl_sched_compat_yield;
#ifdef CONFIG_RT_MUTEXES #ifdef CONFIG_RT_MUTEXES
extern int rt_mutex_getprio(struct task_struct *p); extern int rt_mutex_getprio(struct task_struct *p);
......
...@@ -1658,12 +1658,6 @@ void sched_fork(struct task_struct *p, int clone_flags) ...@@ -1658,12 +1658,6 @@ void sched_fork(struct task_struct *p, int clone_flags)
put_cpu(); put_cpu();
} }
/*
* After fork, child runs first. (default) If set to 0 then
* parent will (try to) run first.
*/
unsigned int __read_mostly sysctl_sched_child_runs_first = 1;
/* /*
* wake_up_new_task - wake up a newly created task for the first time. * wake_up_new_task - wake up a newly created task for the first time.
* *
......
...@@ -20,6 +20,15 @@ ...@@ -20,6 +20,15 @@
* Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com> * Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
*/ */
/*
* Tunables that become constants when CONFIG_SCHED_DEBUG is off:
*/
#ifdef CONFIG_SCHED_DEBUG
# define const_debug __read_mostly
#else
# define const_debug static const
#endif
/* /*
* Targeted preemption latency for CPU-bound tasks: * Targeted preemption latency for CPU-bound tasks:
* (default: 20ms, units: nanoseconds) * (default: 20ms, units: nanoseconds)
...@@ -34,7 +43,13 @@ ...@@ -34,7 +43,13 @@
* systems, 4x on 8-way systems, 5x on 16-way systems, etc.) * systems, 4x on 8-way systems, 5x on 16-way systems, etc.)
* Targeted preemption latency for CPU-bound tasks: * Targeted preemption latency for CPU-bound tasks:
*/ */
unsigned int sysctl_sched_latency __read_mostly = 20000000ULL; const_debug unsigned int sysctl_sched_latency = 20000000ULL;
/*
* After fork, child runs first. (default) If set to 0 then
* parent will (try to) run first.
*/
const_debug unsigned int sysctl_sched_child_runs_first = 1;
/* /*
* Minimal preemption granularity for CPU-bound tasks: * Minimal preemption granularity for CPU-bound tasks:
...@@ -58,7 +73,7 @@ unsigned int __read_mostly sysctl_sched_compat_yield; ...@@ -58,7 +73,7 @@ unsigned int __read_mostly sysctl_sched_compat_yield;
* and reduces their over-scheduling. Synchronous workloads will still * and reduces their over-scheduling. Synchronous workloads will still
* have immediate wakeup/sleep latencies. * have immediate wakeup/sleep latencies.
*/ */
unsigned int sysctl_sched_batch_wakeup_granularity __read_mostly = 25000000UL; const_debug unsigned int sysctl_sched_batch_wakeup_granularity = 25000000UL;
/* /*
* SCHED_OTHER wake-up granularity. * SCHED_OTHER wake-up granularity.
...@@ -68,13 +83,10 @@ unsigned int sysctl_sched_batch_wakeup_granularity __read_mostly = 25000000UL; ...@@ -68,13 +83,10 @@ unsigned int sysctl_sched_batch_wakeup_granularity __read_mostly = 25000000UL;
* and reduces their over-scheduling. Synchronous workloads will still * and reduces their over-scheduling. Synchronous workloads will still
* have immediate wakeup/sleep latencies. * have immediate wakeup/sleep latencies.
*/ */
unsigned int sysctl_sched_wakeup_granularity __read_mostly = 1000000UL; const_debug unsigned int sysctl_sched_wakeup_granularity = 1000000UL;
unsigned int sysctl_sched_stat_granularity __read_mostly; const_debug unsigned int sysctl_sched_stat_granularity;
/*
* Initialized in sched_init_granularity() [to 5 times the base granularity]:
*/
unsigned int sysctl_sched_runtime_limit __read_mostly; unsigned int sysctl_sched_runtime_limit __read_mostly;
/* /*
...@@ -89,7 +101,7 @@ enum { ...@@ -89,7 +101,7 @@ enum {
SCHED_FEAT_SKIP_INITIAL = 32, SCHED_FEAT_SKIP_INITIAL = 32,
}; };
unsigned int sysctl_sched_features __read_mostly = const_debug unsigned int sysctl_sched_features =
SCHED_FEAT_FAIR_SLEEPERS *1 | SCHED_FEAT_FAIR_SLEEPERS *1 |
SCHED_FEAT_SLEEPER_AVG *0 | SCHED_FEAT_SLEEPER_AVG *0 |
SCHED_FEAT_SLEEPER_LOAD_AVG *1 | SCHED_FEAT_SLEEPER_LOAD_AVG *1 |
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment