Commit d8206bb3 authored by Tommaso Cucinotta's avatar Tommaso Cucinotta Committed by Ingo Molnar

sched/deadline: Split cpudl_set() into cpudl_set() and cpudl_clear()

These 2 exercise independent code paths and need different arguments.

After this change, you call:

  cpudl_clear(cp, cpu);
  cpudl_set(cp, cpu, dl);

instead of:

  cpudl_set(cp, cpu, 0 /* dl */, 0 /* is_valid */);
  cpudl_set(cp, cpu, dl, 1 /* is_valid */);
Signed-off-by: default avatarTommaso Cucinotta <tommaso.cucinotta@sssup.it>
Signed-off-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Reviewed-by: default avatarLuca Abeni <luca.abeni@unitn.it>
Reviewed-by: default avatarJuri Lelli <juri.lelli@arm.com>
Cc: Juri Lelli <juri.lelli@gmail.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: linux-dl@retis.sssup.it
Link: http://lkml.kernel.org/r/1471184828-12644-4-git-send-email-tommaso.cucinotta@sssup.itSigned-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent 8e1bc301
...@@ -145,16 +145,15 @@ int cpudl_find(struct cpudl *cp, struct task_struct *p, ...@@ -145,16 +145,15 @@ int cpudl_find(struct cpudl *cp, struct task_struct *p,
} }
/* /*
* cpudl_set - update the cpudl max-heap * cpudl_clear - remove a cpu from the cpudl max-heap
* @cp: the cpudl max-heap context * @cp: the cpudl max-heap context
* @cpu: the target cpu * @cpu: the target cpu
* @dl: the new earliest deadline for this cpu
* *
* Notes: assumes cpu_rq(cpu)->lock is locked * Notes: assumes cpu_rq(cpu)->lock is locked
* *
* Returns: (void) * Returns: (void)
*/ */
void cpudl_set(struct cpudl *cp, int cpu, u64 dl, int is_valid) void cpudl_clear(struct cpudl *cp, int cpu)
{ {
int old_idx, new_cpu; int old_idx, new_cpu;
unsigned long flags; unsigned long flags;
...@@ -162,17 +161,15 @@ void cpudl_set(struct cpudl *cp, int cpu, u64 dl, int is_valid) ...@@ -162,17 +161,15 @@ void cpudl_set(struct cpudl *cp, int cpu, u64 dl, int is_valid)
WARN_ON(!cpu_present(cpu)); WARN_ON(!cpu_present(cpu));
raw_spin_lock_irqsave(&cp->lock, flags); raw_spin_lock_irqsave(&cp->lock, flags);
old_idx = cp->elements[cpu].idx; old_idx = cp->elements[cpu].idx;
if (!is_valid) { if (old_idx == IDX_INVALID) {
/* remove item */ /*
if (old_idx == IDX_INVALID) { * Nothing to remove if old_idx was invalid.
/* * This could happen if a rq_offline_dl is
* Nothing to remove if old_idx was invalid. * called for a CPU without -dl tasks running.
* This could happen if a rq_offline_dl is */
* called for a CPU without -dl tasks running. } else {
*/
goto out;
}
new_cpu = cp->elements[cp->size - 1].cpu; new_cpu = cp->elements[cp->size - 1].cpu;
cp->elements[old_idx].dl = cp->elements[cp->size - 1].dl; cp->elements[old_idx].dl = cp->elements[cp->size - 1].dl;
cp->elements[old_idx].cpu = new_cpu; cp->elements[old_idx].cpu = new_cpu;
...@@ -180,11 +177,32 @@ void cpudl_set(struct cpudl *cp, int cpu, u64 dl, int is_valid) ...@@ -180,11 +177,32 @@ void cpudl_set(struct cpudl *cp, int cpu, u64 dl, int is_valid)
cp->elements[new_cpu].idx = old_idx; cp->elements[new_cpu].idx = old_idx;
cp->elements[cpu].idx = IDX_INVALID; cp->elements[cpu].idx = IDX_INVALID;
cpudl_heapify(cp, old_idx); cpudl_heapify(cp, old_idx);
cpumask_set_cpu(cpu, cp->free_cpus);
goto out; cpumask_set_cpu(cpu, cp->free_cpus);
} }
raw_spin_unlock_irqrestore(&cp->lock, flags);
}
/*
* cpudl_set - update the cpudl max-heap
* @cp: the cpudl max-heap context
* @cpu: the target cpu
* @dl: the new earliest deadline for this cpu
*
* Notes: assumes cpu_rq(cpu)->lock is locked
*
* Returns: (void)
*/
void cpudl_set(struct cpudl *cp, int cpu, u64 dl)
{
int old_idx;
unsigned long flags;
WARN_ON(!cpu_present(cpu));
raw_spin_lock_irqsave(&cp->lock, flags);
old_idx = cp->elements[cpu].idx;
if (old_idx == IDX_INVALID) { if (old_idx == IDX_INVALID) {
int new_idx = cp->size++; int new_idx = cp->size++;
cp->elements[new_idx].dl = dl; cp->elements[new_idx].dl = dl;
...@@ -197,7 +215,6 @@ void cpudl_set(struct cpudl *cp, int cpu, u64 dl, int is_valid) ...@@ -197,7 +215,6 @@ void cpudl_set(struct cpudl *cp, int cpu, u64 dl, int is_valid)
cpudl_heapify(cp, old_idx); cpudl_heapify(cp, old_idx);
} }
out:
raw_spin_unlock_irqrestore(&cp->lock, flags); raw_spin_unlock_irqrestore(&cp->lock, flags);
} }
......
...@@ -23,7 +23,8 @@ struct cpudl { ...@@ -23,7 +23,8 @@ struct cpudl {
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
int cpudl_find(struct cpudl *cp, struct task_struct *p, int cpudl_find(struct cpudl *cp, struct task_struct *p,
struct cpumask *later_mask); struct cpumask *later_mask);
void cpudl_set(struct cpudl *cp, int cpu, u64 dl, int is_valid); void cpudl_set(struct cpudl *cp, int cpu, u64 dl);
void cpudl_clear(struct cpudl *cp, int cpu);
int cpudl_init(struct cpudl *cp); int cpudl_init(struct cpudl *cp);
void cpudl_set_freecpu(struct cpudl *cp, int cpu); void cpudl_set_freecpu(struct cpudl *cp, int cpu);
void cpudl_clear_freecpu(struct cpudl *cp, int cpu); void cpudl_clear_freecpu(struct cpudl *cp, int cpu);
......
...@@ -798,7 +798,7 @@ static void inc_dl_deadline(struct dl_rq *dl_rq, u64 deadline) ...@@ -798,7 +798,7 @@ static void inc_dl_deadline(struct dl_rq *dl_rq, u64 deadline)
if (dl_rq->earliest_dl.curr == 0 || if (dl_rq->earliest_dl.curr == 0 ||
dl_time_before(deadline, dl_rq->earliest_dl.curr)) { dl_time_before(deadline, dl_rq->earliest_dl.curr)) {
dl_rq->earliest_dl.curr = deadline; dl_rq->earliest_dl.curr = deadline;
cpudl_set(&rq->rd->cpudl, rq->cpu, deadline, 1); cpudl_set(&rq->rd->cpudl, rq->cpu, deadline);
} }
} }
...@@ -813,14 +813,14 @@ static void dec_dl_deadline(struct dl_rq *dl_rq, u64 deadline) ...@@ -813,14 +813,14 @@ static void dec_dl_deadline(struct dl_rq *dl_rq, u64 deadline)
if (!dl_rq->dl_nr_running) { if (!dl_rq->dl_nr_running) {
dl_rq->earliest_dl.curr = 0; dl_rq->earliest_dl.curr = 0;
dl_rq->earliest_dl.next = 0; dl_rq->earliest_dl.next = 0;
cpudl_set(&rq->rd->cpudl, rq->cpu, 0, 0); cpudl_clear(&rq->rd->cpudl, rq->cpu);
} else { } else {
struct rb_node *leftmost = dl_rq->rb_leftmost; struct rb_node *leftmost = dl_rq->rb_leftmost;
struct sched_dl_entity *entry; struct sched_dl_entity *entry;
entry = rb_entry(leftmost, struct sched_dl_entity, rb_node); entry = rb_entry(leftmost, struct sched_dl_entity, rb_node);
dl_rq->earliest_dl.curr = entry->deadline; dl_rq->earliest_dl.curr = entry->deadline;
cpudl_set(&rq->rd->cpudl, rq->cpu, entry->deadline, 1); cpudl_set(&rq->rd->cpudl, rq->cpu, entry->deadline);
} }
} }
...@@ -1671,7 +1671,7 @@ static void rq_online_dl(struct rq *rq) ...@@ -1671,7 +1671,7 @@ static void rq_online_dl(struct rq *rq)
cpudl_set_freecpu(&rq->rd->cpudl, rq->cpu); cpudl_set_freecpu(&rq->rd->cpudl, rq->cpu);
if (rq->dl.dl_nr_running > 0) if (rq->dl.dl_nr_running > 0)
cpudl_set(&rq->rd->cpudl, rq->cpu, rq->dl.earliest_dl.curr, 1); cpudl_set(&rq->rd->cpudl, rq->cpu, rq->dl.earliest_dl.curr);
} }
/* Assumes rq->lock is held */ /* Assumes rq->lock is held */
...@@ -1680,7 +1680,7 @@ static void rq_offline_dl(struct rq *rq) ...@@ -1680,7 +1680,7 @@ static void rq_offline_dl(struct rq *rq)
if (rq->dl.overloaded) if (rq->dl.overloaded)
dl_clear_overload(rq); dl_clear_overload(rq);
cpudl_set(&rq->rd->cpudl, rq->cpu, 0, 0); cpudl_clear(&rq->rd->cpudl, rq->cpu);
cpudl_clear_freecpu(&rq->rd->cpudl, rq->cpu); cpudl_clear_freecpu(&rq->rd->cpudl, rq->cpu);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment