Commit fa717060 authored by Peter Zijlstra's avatar Peter Zijlstra Committed by Ingo Molnar

sched: sched_rt_entity

Move the task_struct members specific to rt scheduling together.
A future optimization could be to put sched_entity and sched_rt_entity
into a union.
Signed-off-by: default avatarPeter Zijlstra <a.p.zijlstra@chello.nl>
CC: Srivatsa Vaddagiri <vatsa@linux.vnet.ibm.com>
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
parent 8eb703e4
...@@ -133,9 +133,10 @@ extern struct group_info init_groups; ...@@ -133,9 +133,10 @@ extern struct group_info init_groups;
.nr_cpus_allowed = NR_CPUS, \ .nr_cpus_allowed = NR_CPUS, \
.mm = NULL, \ .mm = NULL, \
.active_mm = &init_mm, \ .active_mm = &init_mm, \
.run_list = LIST_HEAD_INIT(tsk.run_list), \ .rt = { \
.run_list = LIST_HEAD_INIT(tsk.rt.run_list), \
.time_slice = HZ, }, \
.ioprio = 0, \ .ioprio = 0, \
.time_slice = HZ, \
.tasks = LIST_HEAD_INIT(tsk.tasks), \ .tasks = LIST_HEAD_INIT(tsk.tasks), \
.ptrace_children= LIST_HEAD_INIT(tsk.ptrace_children), \ .ptrace_children= LIST_HEAD_INIT(tsk.ptrace_children), \
.ptrace_list = LIST_HEAD_INIT(tsk.ptrace_list), \ .ptrace_list = LIST_HEAD_INIT(tsk.ptrace_list), \
......
...@@ -929,6 +929,11 @@ struct sched_entity { ...@@ -929,6 +929,11 @@ struct sched_entity {
#endif #endif
}; };
struct sched_rt_entity {
struct list_head run_list;
unsigned int time_slice;
};
struct task_struct { struct task_struct {
volatile long state; /* -1 unrunnable, 0 runnable, >0 stopped */ volatile long state; /* -1 unrunnable, 0 runnable, >0 stopped */
void *stack; void *stack;
...@@ -945,9 +950,9 @@ struct task_struct { ...@@ -945,9 +950,9 @@ struct task_struct {
#endif #endif
int prio, static_prio, normal_prio; int prio, static_prio, normal_prio;
struct list_head run_list;
const struct sched_class *sched_class; const struct sched_class *sched_class;
struct sched_entity se; struct sched_entity se;
struct sched_rt_entity rt;
#ifdef CONFIG_PREEMPT_NOTIFIERS #ifdef CONFIG_PREEMPT_NOTIFIERS
/* list of struct preempt_notifier: */ /* list of struct preempt_notifier: */
...@@ -972,7 +977,6 @@ struct task_struct { ...@@ -972,7 +977,6 @@ struct task_struct {
unsigned int policy; unsigned int policy;
cpumask_t cpus_allowed; cpumask_t cpus_allowed;
int nr_cpus_allowed; int nr_cpus_allowed;
unsigned int time_slice;
#ifdef CONFIG_PREEMPT_RCU #ifdef CONFIG_PREEMPT_RCU
int rcu_read_lock_nesting; int rcu_read_lock_nesting;
......
...@@ -1685,7 +1685,7 @@ static void __sched_fork(struct task_struct *p) ...@@ -1685,7 +1685,7 @@ static void __sched_fork(struct task_struct *p)
p->se.wait_max = 0; p->se.wait_max = 0;
#endif #endif
INIT_LIST_HEAD(&p->run_list); INIT_LIST_HEAD(&p->rt.run_list);
p->se.on_rq = 0; p->se.on_rq = 0;
#ifdef CONFIG_PREEMPT_NOTIFIERS #ifdef CONFIG_PREEMPT_NOTIFIERS
......
...@@ -111,7 +111,7 @@ static void enqueue_task_rt(struct rq *rq, struct task_struct *p, int wakeup) ...@@ -111,7 +111,7 @@ static void enqueue_task_rt(struct rq *rq, struct task_struct *p, int wakeup)
{ {
struct rt_prio_array *array = &rq->rt.active; struct rt_prio_array *array = &rq->rt.active;
list_add_tail(&p->run_list, array->queue + p->prio); list_add_tail(&p->rt.run_list, array->queue + p->prio);
__set_bit(p->prio, array->bitmap); __set_bit(p->prio, array->bitmap);
inc_cpu_load(rq, p->se.load.weight); inc_cpu_load(rq, p->se.load.weight);
...@@ -127,7 +127,7 @@ static void dequeue_task_rt(struct rq *rq, struct task_struct *p, int sleep) ...@@ -127,7 +127,7 @@ static void dequeue_task_rt(struct rq *rq, struct task_struct *p, int sleep)
update_curr_rt(rq); update_curr_rt(rq);
list_del(&p->run_list); list_del(&p->rt.run_list);
if (list_empty(array->queue + p->prio)) if (list_empty(array->queue + p->prio))
__clear_bit(p->prio, array->bitmap); __clear_bit(p->prio, array->bitmap);
dec_cpu_load(rq, p->se.load.weight); dec_cpu_load(rq, p->se.load.weight);
...@@ -143,7 +143,7 @@ static void requeue_task_rt(struct rq *rq, struct task_struct *p) ...@@ -143,7 +143,7 @@ static void requeue_task_rt(struct rq *rq, struct task_struct *p)
{ {
struct rt_prio_array *array = &rq->rt.active; struct rt_prio_array *array = &rq->rt.active;
list_move_tail(&p->run_list, array->queue + p->prio); list_move_tail(&p->rt.run_list, array->queue + p->prio);
} }
static void static void
...@@ -212,7 +212,7 @@ static struct task_struct *pick_next_task_rt(struct rq *rq) ...@@ -212,7 +212,7 @@ static struct task_struct *pick_next_task_rt(struct rq *rq)
return NULL; return NULL;
queue = array->queue + idx; queue = array->queue + idx;
next = list_entry(queue->next, struct task_struct, run_list); next = list_entry(queue->next, struct task_struct, rt.run_list);
next->se.exec_start = rq->clock; next->se.exec_start = rq->clock;
...@@ -261,14 +261,14 @@ static struct task_struct *pick_next_highest_task_rt(struct rq *rq, int cpu) ...@@ -261,14 +261,14 @@ static struct task_struct *pick_next_highest_task_rt(struct rq *rq, int cpu)
queue = array->queue + idx; queue = array->queue + idx;
BUG_ON(list_empty(queue)); BUG_ON(list_empty(queue));
next = list_entry(queue->next, struct task_struct, run_list); next = list_entry(queue->next, struct task_struct, rt.run_list);
if (unlikely(pick_rt_task(rq, next, cpu))) if (unlikely(pick_rt_task(rq, next, cpu)))
goto out; goto out;
if (queue->next->next != queue) { if (queue->next->next != queue) {
/* same prio task */ /* same prio task */
next = list_entry(queue->next->next, struct task_struct, next = list_entry(queue->next->next, struct task_struct,
run_list); rt.run_list);
if (pick_rt_task(rq, next, cpu)) if (pick_rt_task(rq, next, cpu))
goto out; goto out;
} }
...@@ -282,7 +282,7 @@ static struct task_struct *pick_next_highest_task_rt(struct rq *rq, int cpu) ...@@ -282,7 +282,7 @@ static struct task_struct *pick_next_highest_task_rt(struct rq *rq, int cpu)
queue = array->queue + idx; queue = array->queue + idx;
BUG_ON(list_empty(queue)); BUG_ON(list_empty(queue));
list_for_each_entry(next, queue, run_list) { list_for_each_entry(next, queue, rt.run_list) {
if (pick_rt_task(rq, next, cpu)) if (pick_rt_task(rq, next, cpu))
goto out; goto out;
} }
...@@ -846,16 +846,16 @@ static void task_tick_rt(struct rq *rq, struct task_struct *p) ...@@ -846,16 +846,16 @@ static void task_tick_rt(struct rq *rq, struct task_struct *p)
if (p->policy != SCHED_RR) if (p->policy != SCHED_RR)
return; return;
if (--p->time_slice) if (--p->rt.time_slice)
return; return;
p->time_slice = DEF_TIMESLICE; p->rt.time_slice = DEF_TIMESLICE;
/* /*
* Requeue to the end of queue if we are not the only element * Requeue to the end of queue if we are not the only element
* on the queue: * on the queue:
*/ */
if (p->run_list.prev != p->run_list.next) { if (p->rt.run_list.prev != p->rt.run_list.next) {
requeue_task_rt(rq, p); requeue_task_rt(rq, p);
set_tsk_need_resched(p); set_tsk_need_resched(p);
} }
......
...@@ -286,7 +286,7 @@ static void __oom_kill_task(struct task_struct *p, int verbose) ...@@ -286,7 +286,7 @@ static void __oom_kill_task(struct task_struct *p, int verbose)
* all the memory it needs. That way it should be able to * all the memory it needs. That way it should be able to
* exit() and clear out its resources quickly... * exit() and clear out its resources quickly...
*/ */
p->time_slice = HZ; p->rt.time_slice = HZ;
set_tsk_thread_flag(p, TIF_MEMDIE); set_tsk_thread_flag(p, TIF_MEMDIE);
force_sig(SIGKILL, p); force_sig(SIGKILL, p);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment