Commit 8ecca394 authored by Peter Zijlstra's avatar Peter Zijlstra Committed by Ingo Molnar

rbtree, sched/deadline: Use rb_add_cached()

Reduce rbtree boiler plate by using the new helpers.

Make rb_add_cached() / rb_erase_cached() return a pointer to the
leftmost node to aid in updating additional state.
Signed-off-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Signed-off-by: default avatarIngo Molnar <mingo@kernel.org>
Acked-by: default avatarDavidlohr Bueso <dbueso@suse.de>
parent bf9be9a1
...@@ -141,12 +141,18 @@ static inline void rb_insert_color_cached(struct rb_node *node, ...@@ -141,12 +141,18 @@ static inline void rb_insert_color_cached(struct rb_node *node,
rb_insert_color(node, &root->rb_root); rb_insert_color(node, &root->rb_root);
} }
static inline void rb_erase_cached(struct rb_node *node,
struct rb_root_cached *root) static inline struct rb_node *
rb_erase_cached(struct rb_node *node, struct rb_root_cached *root)
{ {
struct rb_node *leftmost = NULL;
if (root->rb_leftmost == node) if (root->rb_leftmost == node)
root->rb_leftmost = rb_next(node); leftmost = root->rb_leftmost = rb_next(node);
rb_erase(node, &root->rb_root); rb_erase(node, &root->rb_root);
return leftmost;
} }
static inline void rb_replace_node_cached(struct rb_node *victim, static inline void rb_replace_node_cached(struct rb_node *victim,
...@@ -179,8 +185,10 @@ static inline void rb_replace_node_cached(struct rb_node *victim, ...@@ -179,8 +185,10 @@ static inline void rb_replace_node_cached(struct rb_node *victim,
* @node: node to insert * @node: node to insert
* @tree: leftmost cached tree to insert @node into * @tree: leftmost cached tree to insert @node into
* @less: operator defining the (partial) node order * @less: operator defining the (partial) node order
*
* Returns @node when it is the new leftmost, or NULL.
*/ */
static __always_inline void static __always_inline struct rb_node *
rb_add_cached(struct rb_node *node, struct rb_root_cached *tree, rb_add_cached(struct rb_node *node, struct rb_root_cached *tree,
bool (*less)(struct rb_node *, const struct rb_node *)) bool (*less)(struct rb_node *, const struct rb_node *))
{ {
...@@ -200,6 +208,8 @@ rb_add_cached(struct rb_node *node, struct rb_root_cached *tree, ...@@ -200,6 +208,8 @@ rb_add_cached(struct rb_node *node, struct rb_root_cached *tree,
rb_link_node(node, parent, link); rb_link_node(node, parent, link);
rb_insert_color_cached(node, tree, leftmost); rb_insert_color_cached(node, tree, leftmost);
return leftmost ? node : NULL;
} }
/** /**
......
...@@ -517,58 +517,44 @@ static void dec_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq) ...@@ -517,58 +517,44 @@ static void dec_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
update_dl_migration(dl_rq); update_dl_migration(dl_rq);
} }
#define __node_2_pdl(node) \
rb_entry((node), struct task_struct, pushable_dl_tasks)
static inline bool __pushable_less(struct rb_node *a, const struct rb_node *b)
{
return dl_entity_preempt(&__node_2_pdl(a)->dl, &__node_2_pdl(b)->dl);
}
/* /*
* The list of pushable -deadline task is not a plist, like in * The list of pushable -deadline task is not a plist, like in
* sched_rt.c, it is an rb-tree with tasks ordered by deadline. * sched_rt.c, it is an rb-tree with tasks ordered by deadline.
*/ */
static void enqueue_pushable_dl_task(struct rq *rq, struct task_struct *p) static void enqueue_pushable_dl_task(struct rq *rq, struct task_struct *p)
{ {
struct dl_rq *dl_rq = &rq->dl; struct rb_node *leftmost;
struct rb_node **link = &dl_rq->pushable_dl_tasks_root.rb_root.rb_node;
struct rb_node *parent = NULL;
struct task_struct *entry;
bool leftmost = true;
BUG_ON(!RB_EMPTY_NODE(&p->pushable_dl_tasks)); BUG_ON(!RB_EMPTY_NODE(&p->pushable_dl_tasks));
while (*link) { leftmost = rb_add_cached(&p->pushable_dl_tasks,
parent = *link; &rq->dl.pushable_dl_tasks_root,
entry = rb_entry(parent, struct task_struct, __pushable_less);
pushable_dl_tasks);
if (dl_entity_preempt(&p->dl, &entry->dl))
link = &parent->rb_left;
else {
link = &parent->rb_right;
leftmost = false;
}
}
if (leftmost) if (leftmost)
dl_rq->earliest_dl.next = p->dl.deadline; rq->dl.earliest_dl.next = p->dl.deadline;
rb_link_node(&p->pushable_dl_tasks, parent, link);
rb_insert_color_cached(&p->pushable_dl_tasks,
&dl_rq->pushable_dl_tasks_root, leftmost);
} }
static void dequeue_pushable_dl_task(struct rq *rq, struct task_struct *p) static void dequeue_pushable_dl_task(struct rq *rq, struct task_struct *p)
{ {
struct dl_rq *dl_rq = &rq->dl; struct dl_rq *dl_rq = &rq->dl;
struct rb_root_cached *root = &dl_rq->pushable_dl_tasks_root;
struct rb_node *leftmost;
if (RB_EMPTY_NODE(&p->pushable_dl_tasks)) if (RB_EMPTY_NODE(&p->pushable_dl_tasks))
return; return;
if (dl_rq->pushable_dl_tasks_root.rb_leftmost == &p->pushable_dl_tasks) { leftmost = rb_erase_cached(&p->pushable_dl_tasks, root);
struct rb_node *next_node; if (leftmost)
dl_rq->earliest_dl.next = __node_2_pdl(leftmost)->dl.deadline;
next_node = rb_next(&p->pushable_dl_tasks);
if (next_node) {
dl_rq->earliest_dl.next = rb_entry(next_node,
struct task_struct, pushable_dl_tasks)->dl.deadline;
}
}
rb_erase_cached(&p->pushable_dl_tasks, &dl_rq->pushable_dl_tasks_root);
RB_CLEAR_NODE(&p->pushable_dl_tasks); RB_CLEAR_NODE(&p->pushable_dl_tasks);
} }
...@@ -1478,29 +1464,21 @@ void dec_dl_tasks(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq) ...@@ -1478,29 +1464,21 @@ void dec_dl_tasks(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
dec_dl_migration(dl_se, dl_rq); dec_dl_migration(dl_se, dl_rq);
} }
#define __node_2_dle(node) \
rb_entry((node), struct sched_dl_entity, rb_node)
static inline bool __dl_less(struct rb_node *a, const struct rb_node *b)
{
return dl_time_before(__node_2_dle(a)->deadline, __node_2_dle(b)->deadline);
}
static void __enqueue_dl_entity(struct sched_dl_entity *dl_se) static void __enqueue_dl_entity(struct sched_dl_entity *dl_se)
{ {
struct dl_rq *dl_rq = dl_rq_of_se(dl_se); struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
struct rb_node **link = &dl_rq->root.rb_root.rb_node;
struct rb_node *parent = NULL;
struct sched_dl_entity *entry;
int leftmost = 1;
BUG_ON(!RB_EMPTY_NODE(&dl_se->rb_node)); BUG_ON(!RB_EMPTY_NODE(&dl_se->rb_node));
while (*link) { rb_add_cached(&dl_se->rb_node, &dl_rq->root, __dl_less);
parent = *link;
entry = rb_entry(parent, struct sched_dl_entity, rb_node);
if (dl_time_before(dl_se->deadline, entry->deadline))
link = &parent->rb_left;
else {
link = &parent->rb_right;
leftmost = 0;
}
}
rb_link_node(&dl_se->rb_node, parent, link);
rb_insert_color_cached(&dl_se->rb_node, &dl_rq->root, leftmost);
inc_dl_tasks(dl_se, dl_rq); inc_dl_tasks(dl_se, dl_rq);
} }
...@@ -1513,6 +1491,7 @@ static void __dequeue_dl_entity(struct sched_dl_entity *dl_se) ...@@ -1513,6 +1491,7 @@ static void __dequeue_dl_entity(struct sched_dl_entity *dl_se)
return; return;
rb_erase_cached(&dl_se->rb_node, &dl_rq->root); rb_erase_cached(&dl_se->rb_node, &dl_rq->root);
RB_CLEAR_NODE(&dl_se->rb_node); RB_CLEAR_NODE(&dl_se->rb_node);
dec_dl_tasks(dl_se, dl_rq); dec_dl_tasks(dl_se, dl_rq);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment