Commit 8a8c69c3 authored by Peter Zijlstra's avatar Peter Zijlstra Committed by Ingo Molnar

sched/core: Add rq->lock wrappers

The missing update_rq_clock() check can work with partial rq->lock
wrappery, since a missing wrapper can cause the warning to not be
emitted when it should have, but cannot cause the warning to trigger
when it should not have.

The duplicate update_rq_clock() check however can cause false warnings
to trigger. Therefore add more comprehensive rq->lock wrappery.
Signed-off-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent 26ae58d2
This diff is collapsed.
...@@ -4271,8 +4271,9 @@ static u64 distribute_cfs_runtime(struct cfs_bandwidth *cfs_b, ...@@ -4271,8 +4271,9 @@ static u64 distribute_cfs_runtime(struct cfs_bandwidth *cfs_b,
list_for_each_entry_rcu(cfs_rq, &cfs_b->throttled_cfs_rq, list_for_each_entry_rcu(cfs_rq, &cfs_b->throttled_cfs_rq,
throttled_list) { throttled_list) {
struct rq *rq = rq_of(cfs_rq); struct rq *rq = rq_of(cfs_rq);
struct rq_flags rf;
raw_spin_lock(&rq->lock); rq_lock(rq, &rf);
if (!cfs_rq_throttled(cfs_rq)) if (!cfs_rq_throttled(cfs_rq))
goto next; goto next;
...@@ -4289,7 +4290,7 @@ static u64 distribute_cfs_runtime(struct cfs_bandwidth *cfs_b, ...@@ -4289,7 +4290,7 @@ static u64 distribute_cfs_runtime(struct cfs_bandwidth *cfs_b,
unthrottle_cfs_rq(cfs_rq); unthrottle_cfs_rq(cfs_rq);
next: next:
raw_spin_unlock(&rq->lock); rq_unlock(rq, &rf);
if (!remaining) if (!remaining)
break; break;
...@@ -5097,15 +5098,16 @@ void cpu_load_update_nohz_stop(void) ...@@ -5097,15 +5098,16 @@ void cpu_load_update_nohz_stop(void)
unsigned long curr_jiffies = READ_ONCE(jiffies); unsigned long curr_jiffies = READ_ONCE(jiffies);
struct rq *this_rq = this_rq(); struct rq *this_rq = this_rq();
unsigned long load; unsigned long load;
struct rq_flags rf;
if (curr_jiffies == this_rq->last_load_update_tick) if (curr_jiffies == this_rq->last_load_update_tick)
return; return;
load = weighted_cpuload(cpu_of(this_rq)); load = weighted_cpuload(cpu_of(this_rq));
raw_spin_lock(&this_rq->lock); rq_lock(this_rq, &rf);
update_rq_clock(this_rq); update_rq_clock(this_rq);
cpu_load_update_nohz(this_rq, curr_jiffies, load); cpu_load_update_nohz(this_rq, curr_jiffies, load);
raw_spin_unlock(&this_rq->lock); rq_unlock(this_rq, &rf);
} }
#else /* !CONFIG_NO_HZ_COMMON */ #else /* !CONFIG_NO_HZ_COMMON */
static inline void cpu_load_update_nohz(struct rq *this_rq, static inline void cpu_load_update_nohz(struct rq *this_rq,
...@@ -6913,9 +6915,11 @@ static void attach_task(struct rq *rq, struct task_struct *p) ...@@ -6913,9 +6915,11 @@ static void attach_task(struct rq *rq, struct task_struct *p)
*/ */
static void attach_one_task(struct rq *rq, struct task_struct *p) static void attach_one_task(struct rq *rq, struct task_struct *p)
{ {
raw_spin_lock(&rq->lock); struct rq_flags rf;
rq_lock(rq, &rf);
attach_task(rq, p); attach_task(rq, p);
raw_spin_unlock(&rq->lock); rq_unlock(rq, &rf);
} }
/* /*
...@@ -6926,8 +6930,9 @@ static void attach_tasks(struct lb_env *env) ...@@ -6926,8 +6930,9 @@ static void attach_tasks(struct lb_env *env)
{ {
struct list_head *tasks = &env->tasks; struct list_head *tasks = &env->tasks;
struct task_struct *p; struct task_struct *p;
struct rq_flags rf;
raw_spin_lock(&env->dst_rq->lock); rq_lock(env->dst_rq, &rf);
while (!list_empty(tasks)) { while (!list_empty(tasks)) {
p = list_first_entry(tasks, struct task_struct, se.group_node); p = list_first_entry(tasks, struct task_struct, se.group_node);
...@@ -6936,7 +6941,7 @@ static void attach_tasks(struct lb_env *env) ...@@ -6936,7 +6941,7 @@ static void attach_tasks(struct lb_env *env)
attach_task(env->dst_rq, p); attach_task(env->dst_rq, p);
} }
raw_spin_unlock(&env->dst_rq->lock); rq_unlock(env->dst_rq, &rf);
} }
#ifdef CONFIG_FAIR_GROUP_SCHED #ifdef CONFIG_FAIR_GROUP_SCHED
...@@ -6944,9 +6949,9 @@ static void update_blocked_averages(int cpu) ...@@ -6944,9 +6949,9 @@ static void update_blocked_averages(int cpu)
{ {
struct rq *rq = cpu_rq(cpu); struct rq *rq = cpu_rq(cpu);
struct cfs_rq *cfs_rq; struct cfs_rq *cfs_rq;
unsigned long flags; struct rq_flags rf;
raw_spin_lock_irqsave(&rq->lock, flags); rq_lock_irqsave(rq, &rf);
update_rq_clock(rq); update_rq_clock(rq);
/* /*
...@@ -6965,7 +6970,7 @@ static void update_blocked_averages(int cpu) ...@@ -6965,7 +6970,7 @@ static void update_blocked_averages(int cpu)
if (cfs_rq->tg->se[cpu]) if (cfs_rq->tg->se[cpu])
update_load_avg(cfs_rq->tg->se[cpu], 0); update_load_avg(cfs_rq->tg->se[cpu], 0);
} }
raw_spin_unlock_irqrestore(&rq->lock, flags); rq_unlock_irqrestore(rq, &rf);
} }
/* /*
...@@ -7019,12 +7024,12 @@ static inline void update_blocked_averages(int cpu) ...@@ -7019,12 +7024,12 @@ static inline void update_blocked_averages(int cpu)
{ {
struct rq *rq = cpu_rq(cpu); struct rq *rq = cpu_rq(cpu);
struct cfs_rq *cfs_rq = &rq->cfs; struct cfs_rq *cfs_rq = &rq->cfs;
unsigned long flags; struct rq_flags rf;
raw_spin_lock_irqsave(&rq->lock, flags); rq_lock_irqsave(rq, &rf);
update_rq_clock(rq); update_rq_clock(rq);
update_cfs_rq_load_avg(cfs_rq_clock_task(cfs_rq), cfs_rq, true); update_cfs_rq_load_avg(cfs_rq_clock_task(cfs_rq), cfs_rq, true);
raw_spin_unlock_irqrestore(&rq->lock, flags); rq_unlock_irqrestore(rq, &rf);
} }
static unsigned long task_h_load(struct task_struct *p) static unsigned long task_h_load(struct task_struct *p)
...@@ -8042,7 +8047,7 @@ static int load_balance(int this_cpu, struct rq *this_rq, ...@@ -8042,7 +8047,7 @@ static int load_balance(int this_cpu, struct rq *this_rq,
struct sched_domain *sd_parent = sd->parent; struct sched_domain *sd_parent = sd->parent;
struct sched_group *group; struct sched_group *group;
struct rq *busiest; struct rq *busiest;
unsigned long flags; struct rq_flags rf;
struct cpumask *cpus = this_cpu_cpumask_var_ptr(load_balance_mask); struct cpumask *cpus = this_cpu_cpumask_var_ptr(load_balance_mask);
struct lb_env env = { struct lb_env env = {
...@@ -8105,7 +8110,7 @@ static int load_balance(int this_cpu, struct rq *this_rq, ...@@ -8105,7 +8110,7 @@ static int load_balance(int this_cpu, struct rq *this_rq,
env.loop_max = min(sysctl_sched_nr_migrate, busiest->nr_running); env.loop_max = min(sysctl_sched_nr_migrate, busiest->nr_running);
more_balance: more_balance:
raw_spin_lock_irqsave(&busiest->lock, flags); rq_lock_irqsave(busiest, &rf);
update_rq_clock(busiest); update_rq_clock(busiest);
/* /*
...@@ -8122,14 +8127,14 @@ static int load_balance(int this_cpu, struct rq *this_rq, ...@@ -8122,14 +8127,14 @@ static int load_balance(int this_cpu, struct rq *this_rq,
* See task_rq_lock() family for the details. * See task_rq_lock() family for the details.
*/ */
raw_spin_unlock(&busiest->lock); rq_unlock(busiest, &rf);
if (cur_ld_moved) { if (cur_ld_moved) {
attach_tasks(&env); attach_tasks(&env);
ld_moved += cur_ld_moved; ld_moved += cur_ld_moved;
} }
local_irq_restore(flags); local_irq_restore(rf.flags);
if (env.flags & LBF_NEED_BREAK) { if (env.flags & LBF_NEED_BREAK) {
env.flags &= ~LBF_NEED_BREAK; env.flags &= ~LBF_NEED_BREAK;
...@@ -8207,6 +8212,8 @@ static int load_balance(int this_cpu, struct rq *this_rq, ...@@ -8207,6 +8212,8 @@ static int load_balance(int this_cpu, struct rq *this_rq,
sd->nr_balance_failed++; sd->nr_balance_failed++;
if (need_active_balance(&env)) { if (need_active_balance(&env)) {
unsigned long flags;
raw_spin_lock_irqsave(&busiest->lock, flags); raw_spin_lock_irqsave(&busiest->lock, flags);
/* don't kick the active_load_balance_cpu_stop, /* don't kick the active_load_balance_cpu_stop,
...@@ -8444,8 +8451,9 @@ static int active_load_balance_cpu_stop(void *data) ...@@ -8444,8 +8451,9 @@ static int active_load_balance_cpu_stop(void *data)
struct rq *target_rq = cpu_rq(target_cpu); struct rq *target_rq = cpu_rq(target_cpu);
struct sched_domain *sd; struct sched_domain *sd;
struct task_struct *p = NULL; struct task_struct *p = NULL;
struct rq_flags rf;
raw_spin_lock_irq(&busiest_rq->lock); rq_lock_irq(busiest_rq, &rf);
/* make sure the requested cpu hasn't gone down in the meantime */ /* make sure the requested cpu hasn't gone down in the meantime */
if (unlikely(busiest_cpu != smp_processor_id() || if (unlikely(busiest_cpu != smp_processor_id() ||
...@@ -8496,7 +8504,7 @@ static int active_load_balance_cpu_stop(void *data) ...@@ -8496,7 +8504,7 @@ static int active_load_balance_cpu_stop(void *data)
rcu_read_unlock(); rcu_read_unlock();
out_unlock: out_unlock:
busiest_rq->active_balance = 0; busiest_rq->active_balance = 0;
raw_spin_unlock(&busiest_rq->lock); rq_unlock(busiest_rq, &rf);
if (p) if (p)
attach_one_task(target_rq, p); attach_one_task(target_rq, p);
...@@ -8794,10 +8802,13 @@ static void nohz_idle_balance(struct rq *this_rq, enum cpu_idle_type idle) ...@@ -8794,10 +8802,13 @@ static void nohz_idle_balance(struct rq *this_rq, enum cpu_idle_type idle)
* do the balance. * do the balance.
*/ */
if (time_after_eq(jiffies, rq->next_balance)) { if (time_after_eq(jiffies, rq->next_balance)) {
raw_spin_lock_irq(&rq->lock); struct rq_flags rf;
rq_lock_irq(rq, &rf);
update_rq_clock(rq); update_rq_clock(rq);
cpu_load_update_idle(rq); cpu_load_update_idle(rq);
raw_spin_unlock_irq(&rq->lock); rq_unlock_irq(rq, &rf);
rebalance_domains(rq, CPU_IDLE); rebalance_domains(rq, CPU_IDLE);
} }
...@@ -8988,8 +8999,9 @@ static void task_fork_fair(struct task_struct *p) ...@@ -8988,8 +8999,9 @@ static void task_fork_fair(struct task_struct *p)
struct cfs_rq *cfs_rq; struct cfs_rq *cfs_rq;
struct sched_entity *se = &p->se, *curr; struct sched_entity *se = &p->se, *curr;
struct rq *rq = this_rq(); struct rq *rq = this_rq();
struct rq_flags rf;
raw_spin_lock(&rq->lock); rq_lock(rq, &rf);
update_rq_clock(rq); update_rq_clock(rq);
cfs_rq = task_cfs_rq(current); cfs_rq = task_cfs_rq(current);
...@@ -9010,7 +9022,7 @@ static void task_fork_fair(struct task_struct *p) ...@@ -9010,7 +9022,7 @@ static void task_fork_fair(struct task_struct *p)
} }
se->vruntime -= cfs_rq->min_vruntime; se->vruntime -= cfs_rq->min_vruntime;
raw_spin_unlock(&rq->lock); rq_unlock(rq, &rf);
} }
/* /*
...@@ -9372,7 +9384,6 @@ static DEFINE_MUTEX(shares_mutex); ...@@ -9372,7 +9384,6 @@ static DEFINE_MUTEX(shares_mutex);
int sched_group_set_shares(struct task_group *tg, unsigned long shares) int sched_group_set_shares(struct task_group *tg, unsigned long shares)
{ {
int i; int i;
unsigned long flags;
/* /*
* We can't change the weight of the root cgroup. * We can't change the weight of the root cgroup.
...@@ -9389,19 +9400,17 @@ int sched_group_set_shares(struct task_group *tg, unsigned long shares) ...@@ -9389,19 +9400,17 @@ int sched_group_set_shares(struct task_group *tg, unsigned long shares)
tg->shares = shares; tg->shares = shares;
for_each_possible_cpu(i) { for_each_possible_cpu(i) {
struct rq *rq = cpu_rq(i); struct rq *rq = cpu_rq(i);
struct sched_entity *se; struct sched_entity *se = tg->se[i];
struct rq_flags rf;
se = tg->se[i];
/* Propagate contribution to hierarchy */ /* Propagate contribution to hierarchy */
raw_spin_lock_irqsave(&rq->lock, flags); rq_lock_irqsave(rq, &rf);
/* Possible calls to update_curr() need rq clock */
update_rq_clock(rq); update_rq_clock(rq);
for_each_sched_entity(se) { for_each_sched_entity(se) {
update_load_avg(se, UPDATE_TG); update_load_avg(se, UPDATE_TG);
update_cfs_shares(se); update_cfs_shares(se);
} }
raw_spin_unlock_irqrestore(&rq->lock, flags); rq_unlock_irqrestore(rq, &rf);
} }
done: done:
......
...@@ -1624,6 +1624,7 @@ static inline void sched_avg_update(struct rq *rq) { } ...@@ -1624,6 +1624,7 @@ static inline void sched_avg_update(struct rq *rq) { }
struct rq *__task_rq_lock(struct task_struct *p, struct rq_flags *rf) struct rq *__task_rq_lock(struct task_struct *p, struct rq_flags *rf)
__acquires(rq->lock); __acquires(rq->lock);
struct rq *task_rq_lock(struct task_struct *p, struct rq_flags *rf) struct rq *task_rq_lock(struct task_struct *p, struct rq_flags *rf)
__acquires(p->pi_lock) __acquires(p->pi_lock)
__acquires(rq->lock); __acquires(rq->lock);
...@@ -1645,6 +1646,62 @@ task_rq_unlock(struct rq *rq, struct task_struct *p, struct rq_flags *rf) ...@@ -1645,6 +1646,62 @@ task_rq_unlock(struct rq *rq, struct task_struct *p, struct rq_flags *rf)
raw_spin_unlock_irqrestore(&p->pi_lock, rf->flags); raw_spin_unlock_irqrestore(&p->pi_lock, rf->flags);
} }
static inline void
rq_lock_irqsave(struct rq *rq, struct rq_flags *rf)
__acquires(rq->lock)
{
raw_spin_lock_irqsave(&rq->lock, rf->flags);
rq_pin_lock(rq, rf);
}
static inline void
rq_lock_irq(struct rq *rq, struct rq_flags *rf)
__acquires(rq->lock)
{
raw_spin_lock_irq(&rq->lock);
rq_pin_lock(rq, rf);
}
static inline void
rq_lock(struct rq *rq, struct rq_flags *rf)
__acquires(rq->lock)
{
raw_spin_lock(&rq->lock);
rq_pin_lock(rq, rf);
}
static inline void
rq_relock(struct rq *rq, struct rq_flags *rf)
__acquires(rq->lock)
{
raw_spin_lock(&rq->lock);
rq_repin_lock(rq, rf);
}
static inline void
rq_unlock_irqrestore(struct rq *rq, struct rq_flags *rf)
__releases(rq->lock)
{
rq_unpin_lock(rq, rf);
raw_spin_unlock_irqrestore(&rq->lock, rf->flags);
}
static inline void
rq_unlock_irq(struct rq *rq, struct rq_flags *rf)
__releases(rq->lock)
{
rq_unpin_lock(rq, rf);
raw_spin_unlock_irq(&rq->lock);
}
static inline void
rq_unlock(struct rq *rq, struct rq_flags *rf)
__releases(rq->lock)
{
rq_unpin_lock(rq, rf);
raw_spin_unlock(&rq->lock);
}
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
#ifdef CONFIG_PREEMPT #ifdef CONFIG_PREEMPT
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment