Commit bf5c91ba authored by Ingo Molnar's avatar Ingo Molnar

sched: move sched_feat() definitions

move sched_feat() definitions so that it can be used sooner by generic
code too.
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
Signed-off-by: default avatarPeter Zijlstra <a.p.zijlstra@chello.nl>
Signed-off-by: default avatarMike Galbraith <efault@gmx.de>
Reviewed-by: default avatarThomas Gleixner <tglx@linutronix.de>
parent e9acbff6
...@@ -381,6 +381,37 @@ static void update_rq_clock(struct rq *rq) ...@@ -381,6 +381,37 @@ static void update_rq_clock(struct rq *rq)
#define task_rq(p) cpu_rq(task_cpu(p)) #define task_rq(p) cpu_rq(task_cpu(p))
#define cpu_curr(cpu) (cpu_rq(cpu)->curr) #define cpu_curr(cpu) (cpu_rq(cpu)->curr)
/*
* Tunables that become constants when CONFIG_SCHED_DEBUG is off:
*/
#ifdef CONFIG_SCHED_DEBUG
# define const_debug __read_mostly
#else
# define const_debug static const
#endif
/*
* Debugging: various feature bits
*/
enum {
SCHED_FEAT_FAIR_SLEEPERS = 1,
SCHED_FEAT_NEW_FAIR_SLEEPERS = 2,
SCHED_FEAT_SLEEPER_AVG = 4,
SCHED_FEAT_SLEEPER_LOAD_AVG = 8,
SCHED_FEAT_START_DEBIT = 16,
SCHED_FEAT_SKIP_INITIAL = 32,
};
const_debug unsigned int sysctl_sched_features =
SCHED_FEAT_FAIR_SLEEPERS *0 |
SCHED_FEAT_NEW_FAIR_SLEEPERS *1 |
SCHED_FEAT_SLEEPER_AVG *0 |
SCHED_FEAT_SLEEPER_LOAD_AVG *1 |
SCHED_FEAT_START_DEBIT *1 |
SCHED_FEAT_SKIP_INITIAL *0;
#define sched_feat(x) (sysctl_sched_features & SCHED_FEAT_##x)
/* /*
* For kernel-internal use: high-speed (but slightly incorrect) per-cpu * For kernel-internal use: high-speed (but slightly incorrect) per-cpu
* clock constructed from sched_clock(): * clock constructed from sched_clock():
......
...@@ -20,15 +20,6 @@ ...@@ -20,15 +20,6 @@
* Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com> * Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
*/ */
/*
* Tunables that become constants when CONFIG_SCHED_DEBUG is off:
*/
#ifdef CONFIG_SCHED_DEBUG
# define const_debug __read_mostly
#else
# define const_debug static const
#endif
/* /*
* Targeted preemption latency for CPU-bound tasks: * Targeted preemption latency for CPU-bound tasks:
* (default: 20ms, units: nanoseconds) * (default: 20ms, units: nanoseconds)
...@@ -87,28 +78,6 @@ const_debug unsigned int sysctl_sched_wakeup_granularity = 1000000UL; ...@@ -87,28 +78,6 @@ const_debug unsigned int sysctl_sched_wakeup_granularity = 1000000UL;
unsigned int sysctl_sched_runtime_limit __read_mostly; unsigned int sysctl_sched_runtime_limit __read_mostly;
/*
* Debugging: various feature bits
*/
enum {
SCHED_FEAT_FAIR_SLEEPERS = 1,
SCHED_FEAT_NEW_FAIR_SLEEPERS = 2,
SCHED_FEAT_SLEEPER_AVG = 4,
SCHED_FEAT_SLEEPER_LOAD_AVG = 8,
SCHED_FEAT_START_DEBIT = 16,
SCHED_FEAT_SKIP_INITIAL = 32,
};
const_debug unsigned int sysctl_sched_features =
SCHED_FEAT_FAIR_SLEEPERS *0 |
SCHED_FEAT_NEW_FAIR_SLEEPERS *1 |
SCHED_FEAT_SLEEPER_AVG *0 |
SCHED_FEAT_SLEEPER_LOAD_AVG *1 |
SCHED_FEAT_START_DEBIT *1 |
SCHED_FEAT_SKIP_INITIAL *0;
#define sched_feat(x) (sysctl_sched_features & SCHED_FEAT_##x)
extern struct sched_class fair_sched_class; extern struct sched_class fair_sched_class;
/************************************************************** /**************************************************************
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment