Commit 052f1dc7 authored by Peter Zijlstra's avatar Peter Zijlstra Committed by Ingo Molnar

sched: rt-group: make rt groups scheduling configurable

Make the rt group scheduler compile time configurable.
Keep it experimental for now.
Signed-off-by: default avatarPeter Zijlstra <a.p.zijlstra@chello.nl>
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
parent 9f0c1e56
...@@ -25,7 +25,7 @@ SUBSYS(ns) ...@@ -25,7 +25,7 @@ SUBSYS(ns)
/* */ /* */
#ifdef CONFIG_FAIR_CGROUP_SCHED #ifdef CONFIG_CGROUP_SCHED
SUBSYS(cpu_cgroup) SUBSYS(cpu_cgroup)
#endif #endif
......
...@@ -590,7 +590,7 @@ struct user_struct { ...@@ -590,7 +590,7 @@ struct user_struct {
struct hlist_node uidhash_node; struct hlist_node uidhash_node;
uid_t uid; uid_t uid;
#ifdef CONFIG_FAIR_USER_SCHED #ifdef CONFIG_USER_SCHED
struct task_group *tg; struct task_group *tg;
#ifdef CONFIG_SYSFS #ifdef CONFIG_SYSFS
struct kobject kobj; struct kobject kobj;
...@@ -973,7 +973,7 @@ struct sched_rt_entity { ...@@ -973,7 +973,7 @@ struct sched_rt_entity {
unsigned long timeout; unsigned long timeout;
int nr_cpus_allowed; int nr_cpus_allowed;
#ifdef CONFIG_FAIR_GROUP_SCHED #ifdef CONFIG_RT_GROUP_SCHED
struct sched_rt_entity *parent; struct sched_rt_entity *parent;
/* rq on which this entity is (to be) queued: */ /* rq on which this entity is (to be) queued: */
struct rt_rq *rt_rq; struct rt_rq *rt_rq;
...@@ -2027,19 +2027,22 @@ extern int sched_mc_power_savings, sched_smt_power_savings; ...@@ -2027,19 +2027,22 @@ extern int sched_mc_power_savings, sched_smt_power_savings;
extern void normalize_rt_tasks(void); extern void normalize_rt_tasks(void);
#ifdef CONFIG_FAIR_GROUP_SCHED #ifdef CONFIG_GROUP_SCHED
extern struct task_group init_task_group; extern struct task_group init_task_group;
extern struct task_group *sched_create_group(void); extern struct task_group *sched_create_group(void);
extern void sched_destroy_group(struct task_group *tg); extern void sched_destroy_group(struct task_group *tg);
extern void sched_move_task(struct task_struct *tsk); extern void sched_move_task(struct task_struct *tsk);
#ifdef CONFIG_FAIR_GROUP_SCHED
extern int sched_group_set_shares(struct task_group *tg, unsigned long shares); extern int sched_group_set_shares(struct task_group *tg, unsigned long shares);
extern unsigned long sched_group_shares(struct task_group *tg); extern unsigned long sched_group_shares(struct task_group *tg);
#endif
#ifdef CONFIG_RT_GROUP_SCHED
extern int sched_group_set_rt_runtime(struct task_group *tg, extern int sched_group_set_rt_runtime(struct task_group *tg,
long rt_runtime_us); long rt_runtime_us);
extern long sched_group_rt_runtime(struct task_group *tg); extern long sched_group_rt_runtime(struct task_group *tg);
#endif
#endif #endif
#ifdef CONFIG_TASK_XACCT #ifdef CONFIG_TASK_XACCT
......
...@@ -311,25 +311,36 @@ config CPUSETS ...@@ -311,25 +311,36 @@ config CPUSETS
Say N if unsure. Say N if unsure.
config FAIR_GROUP_SCHED config GROUP_SCHED
bool "Fair group CPU scheduler" bool "Group CPU scheduler"
default y default y
help help
This feature lets CPU scheduler recognize task groups and control CPU This feature lets CPU scheduler recognize task groups and control CPU
bandwidth allocation to such task groups. bandwidth allocation to such task groups.
config FAIR_GROUP_SCHED
bool "Group scheduling for SCHED_OTHER"
depends on GROUP_SCHED
default y
config RT_GROUP_SCHED
bool "Group scheduling for SCHED_RR/FIFO"
depends on EXPERIMENTAL
depends on GROUP_SCHED
default n
choice choice
depends on FAIR_GROUP_SCHED depends on GROUP_SCHED
prompt "Basis for grouping tasks" prompt "Basis for grouping tasks"
default FAIR_USER_SCHED default USER_SCHED
config FAIR_USER_SCHED config USER_SCHED
bool "user id" bool "user id"
help help
This option will choose userid as the basis for grouping This option will choose userid as the basis for grouping
tasks, thus providing equal CPU bandwidth to each user. tasks, thus providing equal CPU bandwidth to each user.
config FAIR_CGROUP_SCHED config CGROUP_SCHED
bool "Control groups" bool "Control groups"
depends on CGROUPS depends on CGROUPS
help help
......
This diff is collapsed.
...@@ -55,7 +55,7 @@ static inline int on_rt_rq(struct sched_rt_entity *rt_se) ...@@ -55,7 +55,7 @@ static inline int on_rt_rq(struct sched_rt_entity *rt_se)
return !list_empty(&rt_se->run_list); return !list_empty(&rt_se->run_list);
} }
#ifdef CONFIG_FAIR_GROUP_SCHED #ifdef CONFIG_RT_GROUP_SCHED
static inline u64 sched_rt_runtime(struct rt_rq *rt_rq) static inline u64 sched_rt_runtime(struct rt_rq *rt_rq)
{ {
...@@ -177,7 +177,7 @@ static inline int rt_rq_throttled(struct rt_rq *rt_rq) ...@@ -177,7 +177,7 @@ static inline int rt_rq_throttled(struct rt_rq *rt_rq)
static inline int rt_se_prio(struct sched_rt_entity *rt_se) static inline int rt_se_prio(struct sched_rt_entity *rt_se)
{ {
#ifdef CONFIG_FAIR_GROUP_SCHED #ifdef CONFIG_RT_GROUP_SCHED
struct rt_rq *rt_rq = group_rt_rq(rt_se); struct rt_rq *rt_rq = group_rt_rq(rt_se);
if (rt_rq) if (rt_rq)
...@@ -269,7 +269,7 @@ void inc_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) ...@@ -269,7 +269,7 @@ void inc_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
{ {
WARN_ON(!rt_prio(rt_se_prio(rt_se))); WARN_ON(!rt_prio(rt_se_prio(rt_se)));
rt_rq->rt_nr_running++; rt_rq->rt_nr_running++;
#if defined CONFIG_SMP || defined CONFIG_FAIR_GROUP_SCHED #if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED
if (rt_se_prio(rt_se) < rt_rq->highest_prio) if (rt_se_prio(rt_se) < rt_rq->highest_prio)
rt_rq->highest_prio = rt_se_prio(rt_se); rt_rq->highest_prio = rt_se_prio(rt_se);
#endif #endif
...@@ -281,7 +281,7 @@ void inc_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) ...@@ -281,7 +281,7 @@ void inc_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
update_rt_migration(rq_of_rt_rq(rt_rq)); update_rt_migration(rq_of_rt_rq(rt_rq));
#endif #endif
#ifdef CONFIG_FAIR_GROUP_SCHED #ifdef CONFIG_RT_GROUP_SCHED
if (rt_se_boosted(rt_se)) if (rt_se_boosted(rt_se))
rt_rq->rt_nr_boosted++; rt_rq->rt_nr_boosted++;
#endif #endif
...@@ -293,7 +293,7 @@ void dec_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) ...@@ -293,7 +293,7 @@ void dec_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
WARN_ON(!rt_prio(rt_se_prio(rt_se))); WARN_ON(!rt_prio(rt_se_prio(rt_se)));
WARN_ON(!rt_rq->rt_nr_running); WARN_ON(!rt_rq->rt_nr_running);
rt_rq->rt_nr_running--; rt_rq->rt_nr_running--;
#if defined CONFIG_SMP || defined CONFIG_FAIR_GROUP_SCHED #if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED
if (rt_rq->rt_nr_running) { if (rt_rq->rt_nr_running) {
struct rt_prio_array *array; struct rt_prio_array *array;
...@@ -315,7 +315,7 @@ void dec_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) ...@@ -315,7 +315,7 @@ void dec_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
update_rt_migration(rq_of_rt_rq(rt_rq)); update_rt_migration(rq_of_rt_rq(rt_rq));
#endif /* CONFIG_SMP */ #endif /* CONFIG_SMP */
#ifdef CONFIG_FAIR_GROUP_SCHED #ifdef CONFIG_RT_GROUP_SCHED
if (rt_se_boosted(rt_se)) if (rt_se_boosted(rt_se))
rt_rq->rt_nr_boosted--; rt_rq->rt_nr_boosted--;
......
...@@ -57,7 +57,7 @@ struct user_struct root_user = { ...@@ -57,7 +57,7 @@ struct user_struct root_user = {
.uid_keyring = &root_user_keyring, .uid_keyring = &root_user_keyring,
.session_keyring = &root_session_keyring, .session_keyring = &root_session_keyring,
#endif #endif
#ifdef CONFIG_FAIR_USER_SCHED #ifdef CONFIG_USER_SCHED
.tg = &init_task_group, .tg = &init_task_group,
#endif #endif
}; };
...@@ -90,7 +90,7 @@ static struct user_struct *uid_hash_find(uid_t uid, struct hlist_head *hashent) ...@@ -90,7 +90,7 @@ static struct user_struct *uid_hash_find(uid_t uid, struct hlist_head *hashent)
return NULL; return NULL;
} }
#ifdef CONFIG_FAIR_USER_SCHED #ifdef CONFIG_USER_SCHED
static void sched_destroy_user(struct user_struct *up) static void sched_destroy_user(struct user_struct *up)
{ {
...@@ -113,15 +113,15 @@ static void sched_switch_user(struct task_struct *p) ...@@ -113,15 +113,15 @@ static void sched_switch_user(struct task_struct *p)
sched_move_task(p); sched_move_task(p);
} }
#else /* CONFIG_FAIR_USER_SCHED */ #else /* CONFIG_USER_SCHED */
static void sched_destroy_user(struct user_struct *up) { } static void sched_destroy_user(struct user_struct *up) { }
static int sched_create_user(struct user_struct *up) { return 0; } static int sched_create_user(struct user_struct *up) { return 0; }
static void sched_switch_user(struct task_struct *p) { } static void sched_switch_user(struct task_struct *p) { }
#endif /* CONFIG_FAIR_USER_SCHED */ #endif /* CONFIG_USER_SCHED */
#if defined(CONFIG_FAIR_USER_SCHED) && defined(CONFIG_SYSFS) #if defined(CONFIG_USER_SCHED) && defined(CONFIG_SYSFS)
static struct kset *uids_kset; /* represents the /sys/kernel/uids/ directory */ static struct kset *uids_kset; /* represents the /sys/kernel/uids/ directory */
static DEFINE_MUTEX(uids_mutex); static DEFINE_MUTEX(uids_mutex);
...@@ -137,6 +137,7 @@ static inline void uids_mutex_unlock(void) ...@@ -137,6 +137,7 @@ static inline void uids_mutex_unlock(void)
} }
/* uid directory attributes */ /* uid directory attributes */
#ifdef CONFIG_FAIR_GROUP_SCHED
static ssize_t cpu_shares_show(struct kobject *kobj, static ssize_t cpu_shares_show(struct kobject *kobj,
struct kobj_attribute *attr, struct kobj_attribute *attr,
char *buf) char *buf)
...@@ -163,7 +164,9 @@ static ssize_t cpu_shares_store(struct kobject *kobj, ...@@ -163,7 +164,9 @@ static ssize_t cpu_shares_store(struct kobject *kobj,
static struct kobj_attribute cpu_share_attr = static struct kobj_attribute cpu_share_attr =
__ATTR(cpu_share, 0644, cpu_shares_show, cpu_shares_store); __ATTR(cpu_share, 0644, cpu_shares_show, cpu_shares_store);
#endif
#ifdef CONFIG_RT_GROUP_SCHED
static ssize_t cpu_rt_runtime_show(struct kobject *kobj, static ssize_t cpu_rt_runtime_show(struct kobject *kobj,
struct kobj_attribute *attr, struct kobj_attribute *attr,
char *buf) char *buf)
...@@ -190,11 +193,16 @@ static ssize_t cpu_rt_runtime_store(struct kobject *kobj, ...@@ -190,11 +193,16 @@ static ssize_t cpu_rt_runtime_store(struct kobject *kobj,
static struct kobj_attribute cpu_rt_runtime_attr = static struct kobj_attribute cpu_rt_runtime_attr =
__ATTR(cpu_rt_runtime, 0644, cpu_rt_runtime_show, cpu_rt_runtime_store); __ATTR(cpu_rt_runtime, 0644, cpu_rt_runtime_show, cpu_rt_runtime_store);
#endif
/* default attributes per uid directory */ /* default attributes per uid directory */
static struct attribute *uids_attributes[] = { static struct attribute *uids_attributes[] = {
#ifdef CONFIG_FAIR_GROUP_SCHED
&cpu_share_attr.attr, &cpu_share_attr.attr,
#endif
#ifdef CONFIG_RT_GROUP_SCHED
&cpu_rt_runtime_attr.attr, &cpu_rt_runtime_attr.attr,
#endif
NULL NULL
}; };
...@@ -297,7 +305,7 @@ static inline void free_user(struct user_struct *up, unsigned long flags) ...@@ -297,7 +305,7 @@ static inline void free_user(struct user_struct *up, unsigned long flags)
schedule_work(&up->work); schedule_work(&up->work);
} }
#else /* CONFIG_FAIR_USER_SCHED && CONFIG_SYSFS */ #else /* CONFIG_USER_SCHED && CONFIG_SYSFS */
int uids_sysfs_init(void) { return 0; } int uids_sysfs_init(void) { return 0; }
static inline int uids_user_create(struct user_struct *up) { return 0; } static inline int uids_user_create(struct user_struct *up) { return 0; }
...@@ -401,7 +409,7 @@ struct user_struct * alloc_uid(struct user_namespace *ns, uid_t uid) ...@@ -401,7 +409,7 @@ struct user_struct * alloc_uid(struct user_namespace *ns, uid_t uid)
spin_lock_irq(&uidhash_lock); spin_lock_irq(&uidhash_lock);
up = uid_hash_find(uid, hashent); up = uid_hash_find(uid, hashent);
if (up) { if (up) {
/* This case is not possible when CONFIG_FAIR_USER_SCHED /* This case is not possible when CONFIG_USER_SCHED
* is defined, since we serialize alloc_uid() using * is defined, since we serialize alloc_uid() using
* uids_mutex. Hence no need to call * uids_mutex. Hence no need to call
* sched_destroy_user() or remove_user_sysfs_dir(). * sched_destroy_user() or remove_user_sysfs_dir().
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment