Commit 62160e3f authored by Ingo Molnar's avatar Ingo Molnar

sched: track cfs_rq->curr on !group-scheduling too

Noticed by Roman Zippel: use cfs_rq->curr in the !group-scheduling
case too. Small micro-optimization and cleanup effect:

   text    data     bss     dec     hex filename
   36269    3482      24   39775    9b5f sched.o.before
   36177    3486      24   39687    9b07 sched.o.after
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
Signed-off-by: default avatarPeter Zijlstra <a.p.zijlstra@chello.nl>
Signed-off-by: default avatarMike Galbraith <efault@gmx.de>
Reviewed-by: default avatarThomas Gleixner <tglx@linutronix.de>
parent 53df556e
...@@ -189,11 +189,11 @@ struct cfs_rq { ...@@ -189,11 +189,11 @@ struct cfs_rq {
struct rb_root tasks_timeline; struct rb_root tasks_timeline;
struct rb_node *rb_leftmost; struct rb_node *rb_leftmost;
struct rb_node *rb_load_balance_curr; struct rb_node *rb_load_balance_curr;
#ifdef CONFIG_FAIR_GROUP_SCHED
/* 'curr' points to currently running entity on this cfs_rq. /* 'curr' points to currently running entity on this cfs_rq.
* It is set to NULL otherwise (i.e when none are currently running). * It is set to NULL otherwise (i.e when none are currently running).
*/ */
struct sched_entity *curr; struct sched_entity *curr;
#ifdef CONFIG_FAIR_GROUP_SCHED
struct rq *rq; /* cpu runqueue to which this cfs_rq is attached */ struct rq *rq; /* cpu runqueue to which this cfs_rq is attached */
/* leaf cfs_rqs are those that hold tasks (lowest schedulable entity in /* leaf cfs_rqs are those that hold tasks (lowest schedulable entity in
......
...@@ -111,51 +111,38 @@ extern struct sched_class fair_sched_class; ...@@ -111,51 +111,38 @@ extern struct sched_class fair_sched_class;
* CFS operations on generic schedulable entities: * CFS operations on generic schedulable entities:
*/ */
#ifdef CONFIG_FAIR_GROUP_SCHED
/* cpu runqueue to which this cfs_rq is attached */
static inline struct rq *rq_of(struct cfs_rq *cfs_rq)
{
return cfs_rq->rq;
}
/* currently running entity (if any) on this cfs_rq */ /* currently running entity (if any) on this cfs_rq */
static inline struct sched_entity *cfs_rq_curr(struct cfs_rq *cfs_rq) static inline struct sched_entity *cfs_rq_curr(struct cfs_rq *cfs_rq)
{ {
return cfs_rq->curr; return cfs_rq->curr;
} }
/* An entity is a task if it doesn't "own" a runqueue */
#define entity_is_task(se) (!se->my_q)
static inline void static inline void
set_cfs_rq_curr(struct cfs_rq *cfs_rq, struct sched_entity *se) set_cfs_rq_curr(struct cfs_rq *cfs_rq, struct sched_entity *se)
{ {
cfs_rq->curr = se; cfs_rq->curr = se;
} }
#else /* CONFIG_FAIR_GROUP_SCHED */ #ifdef CONFIG_FAIR_GROUP_SCHED
/* cpu runqueue to which this cfs_rq is attached */
static inline struct rq *rq_of(struct cfs_rq *cfs_rq) static inline struct rq *rq_of(struct cfs_rq *cfs_rq)
{ {
return container_of(cfs_rq, struct rq, cfs); return cfs_rq->rq;
} }
static inline struct sched_entity *cfs_rq_curr(struct cfs_rq *cfs_rq) /* An entity is a task if it doesn't "own" a runqueue */
{ #define entity_is_task(se) (!se->my_q)
struct rq *rq = rq_of(cfs_rq);
if (unlikely(rq->curr->sched_class != &fair_sched_class)) #else /* CONFIG_FAIR_GROUP_SCHED */
return NULL;
return &rq->curr->se; static inline struct rq *rq_of(struct cfs_rq *cfs_rq)
{
return container_of(cfs_rq, struct rq, cfs);
} }
#define entity_is_task(se) 1 #define entity_is_task(se) 1
static inline void
set_cfs_rq_curr(struct cfs_rq *cfs_rq, struct sched_entity *se) { }
#endif /* CONFIG_FAIR_GROUP_SCHED */ #endif /* CONFIG_FAIR_GROUP_SCHED */
static inline struct task_struct *task_of(struct sched_entity *se) static inline struct task_struct *task_of(struct sched_entity *se)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment