Commit 6d6bc0ad authored by Dhaval Giani's avatar Dhaval Giani Committed by Ingo Molnar

sched: add comments for ifdefs in sched.c

make sched.c easier to read.
Signed-off-by: default avatarDhaval Giani <dhaval@linux.vnet.ibm.com>
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
Signed-off-by: default avatarThomas Gleixner <tglx@linutronix.de>
parent e21f5b15
...@@ -292,15 +292,15 @@ struct task_group root_task_group; ...@@ -292,15 +292,15 @@ struct task_group root_task_group;
static DEFINE_PER_CPU(struct sched_entity, init_sched_entity); static DEFINE_PER_CPU(struct sched_entity, init_sched_entity);
/* Default task group's cfs_rq on each cpu */ /* Default task group's cfs_rq on each cpu */
static DEFINE_PER_CPU(struct cfs_rq, init_cfs_rq) ____cacheline_aligned_in_smp; static DEFINE_PER_CPU(struct cfs_rq, init_cfs_rq) ____cacheline_aligned_in_smp;
#endif #endif /* CONFIG_FAIR_GROUP_SCHED */
#ifdef CONFIG_RT_GROUP_SCHED #ifdef CONFIG_RT_GROUP_SCHED
static DEFINE_PER_CPU(struct sched_rt_entity, init_sched_rt_entity); static DEFINE_PER_CPU(struct sched_rt_entity, init_sched_rt_entity);
static DEFINE_PER_CPU(struct rt_rq, init_rt_rq) ____cacheline_aligned_in_smp; static DEFINE_PER_CPU(struct rt_rq, init_rt_rq) ____cacheline_aligned_in_smp;
#endif #endif /* CONFIG_RT_GROUP_SCHED */
#else #else /* !CONFIG_FAIR_GROUP_SCHED */
#define root_task_group init_task_group #define root_task_group init_task_group
#endif #endif /* CONFIG_FAIR_GROUP_SCHED */
/* task_group_lock serializes add/remove of task groups and also changes to /* task_group_lock serializes add/remove of task groups and also changes to
* a task group's cpu shares. * a task group's cpu shares.
...@@ -310,9 +310,9 @@ static DEFINE_SPINLOCK(task_group_lock); ...@@ -310,9 +310,9 @@ static DEFINE_SPINLOCK(task_group_lock);
#ifdef CONFIG_FAIR_GROUP_SCHED #ifdef CONFIG_FAIR_GROUP_SCHED
#ifdef CONFIG_USER_SCHED #ifdef CONFIG_USER_SCHED
# define INIT_TASK_GROUP_LOAD (2*NICE_0_LOAD) # define INIT_TASK_GROUP_LOAD (2*NICE_0_LOAD)
#else #else /* !CONFIG_USER_SCHED */
# define INIT_TASK_GROUP_LOAD NICE_0_LOAD # define INIT_TASK_GROUP_LOAD NICE_0_LOAD
#endif #endif /* CONFIG_USER_SCHED */
/* /*
* A weight of 0, 1 or ULONG_MAX can cause arithmetics problems. * A weight of 0, 1 or ULONG_MAX can cause arithmetics problems.
...@@ -1316,15 +1316,15 @@ void wake_up_idle_cpu(int cpu) ...@@ -1316,15 +1316,15 @@ void wake_up_idle_cpu(int cpu)
if (!tsk_is_polling(rq->idle)) if (!tsk_is_polling(rq->idle))
smp_send_reschedule(cpu); smp_send_reschedule(cpu);
} }
#endif #endif /* CONFIG_NO_HZ */
#else #else /* !CONFIG_SMP */
static void __resched_task(struct task_struct *p, int tif_bit) static void __resched_task(struct task_struct *p, int tif_bit)
{ {
assert_spin_locked(&task_rq(p)->lock); assert_spin_locked(&task_rq(p)->lock);
set_tsk_thread_flag(p, tif_bit); set_tsk_thread_flag(p, tif_bit);
} }
#endif #endif /* CONFIG_SMP */
#if BITS_PER_LONG == 32 #if BITS_PER_LONG == 32
# define WMULT_CONST (~0UL) # define WMULT_CONST (~0UL)
...@@ -2129,7 +2129,7 @@ static int try_to_wake_up(struct task_struct *p, unsigned int state, int sync) ...@@ -2129,7 +2129,7 @@ static int try_to_wake_up(struct task_struct *p, unsigned int state, int sync)
} }
} }
} }
#endif #endif /* CONFIG_SCHEDSTATS */
out_activate: out_activate:
#endif /* CONFIG_SMP */ #endif /* CONFIG_SMP */
...@@ -2329,7 +2329,7 @@ fire_sched_out_preempt_notifiers(struct task_struct *curr, ...@@ -2329,7 +2329,7 @@ fire_sched_out_preempt_notifiers(struct task_struct *curr,
notifier->ops->sched_out(notifier, next); notifier->ops->sched_out(notifier, next);
} }
#else #else /* !CONFIG_PREEMPT_NOTIFIERS */
static void fire_sched_in_preempt_notifiers(struct task_struct *curr) static void fire_sched_in_preempt_notifiers(struct task_struct *curr)
{ {
...@@ -2341,7 +2341,7 @@ fire_sched_out_preempt_notifiers(struct task_struct *curr, ...@@ -2341,7 +2341,7 @@ fire_sched_out_preempt_notifiers(struct task_struct *curr,
{ {
} }
#endif #endif /* CONFIG_PREEMPT_NOTIFIERS */
/** /**
* prepare_task_switch - prepare to switch tasks * prepare_task_switch - prepare to switch tasks
...@@ -6300,9 +6300,9 @@ static void sched_domain_debug(struct sched_domain *sd, int cpu) ...@@ -6300,9 +6300,9 @@ static void sched_domain_debug(struct sched_domain *sd, int cpu)
} }
kfree(groupmask); kfree(groupmask);
} }
#else #else /* !CONFIG_SCHED_DEBUG */
# define sched_domain_debug(sd, cpu) do { } while (0) # define sched_domain_debug(sd, cpu) do { } while (0)
#endif #endif /* CONFIG_SCHED_DEBUG */
static int sd_degenerate(struct sched_domain *sd) static int sd_degenerate(struct sched_domain *sd)
{ {
...@@ -6598,7 +6598,7 @@ static void sched_domain_node_span(int node, cpumask_t *span) ...@@ -6598,7 +6598,7 @@ static void sched_domain_node_span(int node, cpumask_t *span)
cpus_or(*span, *span, *nodemask); cpus_or(*span, *span, *nodemask);
} }
} }
#endif #endif /* CONFIG_NUMA */
int sched_smt_power_savings = 0, sched_mc_power_savings = 0; int sched_smt_power_savings = 0, sched_mc_power_savings = 0;
...@@ -6617,7 +6617,7 @@ cpu_to_cpu_group(int cpu, const cpumask_t *cpu_map, struct sched_group **sg, ...@@ -6617,7 +6617,7 @@ cpu_to_cpu_group(int cpu, const cpumask_t *cpu_map, struct sched_group **sg,
*sg = &per_cpu(sched_group_cpus, cpu); *sg = &per_cpu(sched_group_cpus, cpu);
return cpu; return cpu;
} }
#endif #endif /* CONFIG_SCHED_SMT */
/* /*
* multi-core sched-domains: * multi-core sched-domains:
...@@ -6625,7 +6625,7 @@ cpu_to_cpu_group(int cpu, const cpumask_t *cpu_map, struct sched_group **sg, ...@@ -6625,7 +6625,7 @@ cpu_to_cpu_group(int cpu, const cpumask_t *cpu_map, struct sched_group **sg,
#ifdef CONFIG_SCHED_MC #ifdef CONFIG_SCHED_MC
static DEFINE_PER_CPU(struct sched_domain, core_domains); static DEFINE_PER_CPU(struct sched_domain, core_domains);
static DEFINE_PER_CPU(struct sched_group, sched_group_core); static DEFINE_PER_CPU(struct sched_group, sched_group_core);
#endif #endif /* CONFIG_SCHED_MC */
#if defined(CONFIG_SCHED_MC) && defined(CONFIG_SCHED_SMT) #if defined(CONFIG_SCHED_MC) && defined(CONFIG_SCHED_SMT)
static int static int
...@@ -6727,7 +6727,7 @@ static void init_numa_sched_groups_power(struct sched_group *group_head) ...@@ -6727,7 +6727,7 @@ static void init_numa_sched_groups_power(struct sched_group *group_head)
sg = sg->next; sg = sg->next;
} while (sg != group_head); } while (sg != group_head);
} }
#endif #endif /* CONFIG_NUMA */
#ifdef CONFIG_NUMA #ifdef CONFIG_NUMA
/* Free memory allocated for various sched_group structures */ /* Free memory allocated for various sched_group structures */
...@@ -6764,11 +6764,11 @@ static void free_sched_groups(const cpumask_t *cpu_map, cpumask_t *nodemask) ...@@ -6764,11 +6764,11 @@ static void free_sched_groups(const cpumask_t *cpu_map, cpumask_t *nodemask)
sched_group_nodes_bycpu[cpu] = NULL; sched_group_nodes_bycpu[cpu] = NULL;
} }
} }
#else #else /* !CONFIG_NUMA */
static void free_sched_groups(const cpumask_t *cpu_map, cpumask_t *nodemask) static void free_sched_groups(const cpumask_t *cpu_map, cpumask_t *nodemask)
{ {
} }
#endif #endif /* CONFIG_NUMA */
/* /*
* Initialize sched groups cpu_power. * Initialize sched groups cpu_power.
...@@ -7459,7 +7459,7 @@ int sched_create_sysfs_power_savings_entries(struct sysdev_class *cls) ...@@ -7459,7 +7459,7 @@ int sched_create_sysfs_power_savings_entries(struct sysdev_class *cls)
#endif #endif
return err; return err;
} }
#endif #endif /* CONFIG_SCHED_MC || CONFIG_SCHED_SMT */
/* /*
* Force a reinitialization of the sched domains hierarchy. The domains * Force a reinitialization of the sched domains hierarchy. The domains
...@@ -7677,8 +7677,8 @@ void __init sched_init(void) ...@@ -7677,8 +7677,8 @@ void __init sched_init(void)
root_task_group.cfs_rq = (struct cfs_rq **)ptr; root_task_group.cfs_rq = (struct cfs_rq **)ptr;
ptr += nr_cpu_ids * sizeof(void **); ptr += nr_cpu_ids * sizeof(void **);
#endif #endif /* CONFIG_USER_SCHED */
#endif #endif /* CONFIG_FAIR_GROUP_SCHED */
#ifdef CONFIG_RT_GROUP_SCHED #ifdef CONFIG_RT_GROUP_SCHED
init_task_group.rt_se = (struct sched_rt_entity **)ptr; init_task_group.rt_se = (struct sched_rt_entity **)ptr;
ptr += nr_cpu_ids * sizeof(void **); ptr += nr_cpu_ids * sizeof(void **);
...@@ -7692,8 +7692,8 @@ void __init sched_init(void) ...@@ -7692,8 +7692,8 @@ void __init sched_init(void)
root_task_group.rt_rq = (struct rt_rq **)ptr; root_task_group.rt_rq = (struct rt_rq **)ptr;
ptr += nr_cpu_ids * sizeof(void **); ptr += nr_cpu_ids * sizeof(void **);
#endif #endif /* CONFIG_USER_SCHED */
#endif #endif /* CONFIG_RT_GROUP_SCHED */
} }
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
...@@ -7709,8 +7709,8 @@ void __init sched_init(void) ...@@ -7709,8 +7709,8 @@ void __init sched_init(void)
#ifdef CONFIG_USER_SCHED #ifdef CONFIG_USER_SCHED
init_rt_bandwidth(&root_task_group.rt_bandwidth, init_rt_bandwidth(&root_task_group.rt_bandwidth,
global_rt_period(), RUNTIME_INF); global_rt_period(), RUNTIME_INF);
#endif #endif /* CONFIG_USER_SCHED */
#endif #endif /* CONFIG_RT_GROUP_SCHED */
#ifdef CONFIG_GROUP_SCHED #ifdef CONFIG_GROUP_SCHED
list_add(&init_task_group.list, &task_groups); list_add(&init_task_group.list, &task_groups);
...@@ -7720,8 +7720,8 @@ void __init sched_init(void) ...@@ -7720,8 +7720,8 @@ void __init sched_init(void)
INIT_LIST_HEAD(&root_task_group.children); INIT_LIST_HEAD(&root_task_group.children);
init_task_group.parent = &root_task_group; init_task_group.parent = &root_task_group;
list_add(&init_task_group.siblings, &root_task_group.children); list_add(&init_task_group.siblings, &root_task_group.children);
#endif #endif /* CONFIG_USER_SCHED */
#endif #endif /* CONFIG_GROUP_SCHED */
for_each_possible_cpu(i) { for_each_possible_cpu(i) {
struct rq *rq; struct rq *rq;
...@@ -8040,7 +8040,7 @@ static inline void unregister_fair_sched_group(struct task_group *tg, int cpu) ...@@ -8040,7 +8040,7 @@ static inline void unregister_fair_sched_group(struct task_group *tg, int cpu)
{ {
list_del_rcu(&tg->cfs_rq[cpu]->leaf_cfs_rq_list); list_del_rcu(&tg->cfs_rq[cpu]->leaf_cfs_rq_list);
} }
#else #else /* !CONFG_FAIR_GROUP_SCHED */
static inline void free_fair_sched_group(struct task_group *tg) static inline void free_fair_sched_group(struct task_group *tg)
{ {
} }
...@@ -8058,7 +8058,7 @@ static inline void register_fair_sched_group(struct task_group *tg, int cpu) ...@@ -8058,7 +8058,7 @@ static inline void register_fair_sched_group(struct task_group *tg, int cpu)
static inline void unregister_fair_sched_group(struct task_group *tg, int cpu) static inline void unregister_fair_sched_group(struct task_group *tg, int cpu)
{ {
} }
#endif #endif /* CONFIG_FAIR_GROUP_SCHED */
#ifdef CONFIG_RT_GROUP_SCHED #ifdef CONFIG_RT_GROUP_SCHED
static void free_rt_sched_group(struct task_group *tg) static void free_rt_sched_group(struct task_group *tg)
...@@ -8129,7 +8129,7 @@ static inline void unregister_rt_sched_group(struct task_group *tg, int cpu) ...@@ -8129,7 +8129,7 @@ static inline void unregister_rt_sched_group(struct task_group *tg, int cpu)
{ {
list_del_rcu(&tg->rt_rq[cpu]->leaf_rt_rq_list); list_del_rcu(&tg->rt_rq[cpu]->leaf_rt_rq_list);
} }
#else #else /* !CONFIG_RT_GROUP_SCHED */
static inline void free_rt_sched_group(struct task_group *tg) static inline void free_rt_sched_group(struct task_group *tg)
{ {
} }
...@@ -8147,7 +8147,7 @@ static inline void register_rt_sched_group(struct task_group *tg, int cpu) ...@@ -8147,7 +8147,7 @@ static inline void register_rt_sched_group(struct task_group *tg, int cpu)
static inline void unregister_rt_sched_group(struct task_group *tg, int cpu) static inline void unregister_rt_sched_group(struct task_group *tg, int cpu)
{ {
} }
#endif #endif /* CONFIG_RT_GROUP_SCHED */
#ifdef CONFIG_GROUP_SCHED #ifdef CONFIG_GROUP_SCHED
static void free_sched_group(struct task_group *tg) static void free_sched_group(struct task_group *tg)
...@@ -8258,7 +8258,7 @@ void sched_move_task(struct task_struct *tsk) ...@@ -8258,7 +8258,7 @@ void sched_move_task(struct task_struct *tsk)
task_rq_unlock(rq, &flags); task_rq_unlock(rq, &flags);
} }
#endif #endif /* CONFIG_GROUP_SCHED */
#ifdef CONFIG_FAIR_GROUP_SCHED #ifdef CONFIG_FAIR_GROUP_SCHED
static void set_se_shares(struct sched_entity *se, unsigned long shares) static void set_se_shares(struct sched_entity *se, unsigned long shares)
...@@ -8508,7 +8508,7 @@ static int sched_rt_global_constraints(void) ...@@ -8508,7 +8508,7 @@ static int sched_rt_global_constraints(void)
return ret; return ret;
} }
#else #else /* !CONFIG_RT_GROUP_SCHED */
static int sched_rt_global_constraints(void) static int sched_rt_global_constraints(void)
{ {
unsigned long flags; unsigned long flags;
...@@ -8526,7 +8526,7 @@ static int sched_rt_global_constraints(void) ...@@ -8526,7 +8526,7 @@ static int sched_rt_global_constraints(void)
return 0; return 0;
} }
#endif #endif /* CONFIG_RT_GROUP_SCHED */
int sched_rt_handler(struct ctl_table *table, int write, int sched_rt_handler(struct ctl_table *table, int write,
struct file *filp, void __user *buffer, size_t *lenp, struct file *filp, void __user *buffer, size_t *lenp,
...@@ -8634,7 +8634,7 @@ static u64 cpu_shares_read_u64(struct cgroup *cgrp, struct cftype *cft) ...@@ -8634,7 +8634,7 @@ static u64 cpu_shares_read_u64(struct cgroup *cgrp, struct cftype *cft)
return (u64) tg->shares; return (u64) tg->shares;
} }
#endif #endif /* CONFIG_FAIR_GROUP_SCHED */
#ifdef CONFIG_RT_GROUP_SCHED #ifdef CONFIG_RT_GROUP_SCHED
static int cpu_rt_runtime_write(struct cgroup *cgrp, struct cftype *cft, static int cpu_rt_runtime_write(struct cgroup *cgrp, struct cftype *cft,
...@@ -8658,7 +8658,7 @@ static u64 cpu_rt_period_read_uint(struct cgroup *cgrp, struct cftype *cft) ...@@ -8658,7 +8658,7 @@ static u64 cpu_rt_period_read_uint(struct cgroup *cgrp, struct cftype *cft)
{ {
return sched_group_rt_period(cgroup_tg(cgrp)); return sched_group_rt_period(cgroup_tg(cgrp));
} }
#endif #endif /* CONFIG_RT_GROUP_SCHED */
static struct cftype cpu_files[] = { static struct cftype cpu_files[] = {
#ifdef CONFIG_FAIR_GROUP_SCHED #ifdef CONFIG_FAIR_GROUP_SCHED
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment