Commit 0e34600a authored by Peter Zijlstra's avatar Peter Zijlstra Committed by Ingo Molnar

sched: Misc cleanups

Random remaining guard use...
Signed-off-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Signed-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent 6fb45460
...@@ -1480,16 +1480,12 @@ static void __uclamp_update_util_min_rt_default(struct task_struct *p) ...@@ -1480,16 +1480,12 @@ static void __uclamp_update_util_min_rt_default(struct task_struct *p)
static void uclamp_update_util_min_rt_default(struct task_struct *p) static void uclamp_update_util_min_rt_default(struct task_struct *p)
{ {
struct rq_flags rf;
struct rq *rq;
if (!rt_task(p)) if (!rt_task(p))
return; return;
/* Protect updates to p->uclamp_* */ /* Protect updates to p->uclamp_* */
rq = task_rq_lock(p, &rf); guard(task_rq_lock)(p);
__uclamp_update_util_min_rt_default(p); __uclamp_update_util_min_rt_default(p);
task_rq_unlock(rq, p, &rf);
} }
static inline struct uclamp_se static inline struct uclamp_se
...@@ -1785,9 +1781,8 @@ static void uclamp_update_root_tg(void) ...@@ -1785,9 +1781,8 @@ static void uclamp_update_root_tg(void)
uclamp_se_set(&tg->uclamp_req[UCLAMP_MAX], uclamp_se_set(&tg->uclamp_req[UCLAMP_MAX],
sysctl_sched_uclamp_util_max, false); sysctl_sched_uclamp_util_max, false);
rcu_read_lock(); guard(rcu)();
cpu_util_update_eff(&root_task_group.css); cpu_util_update_eff(&root_task_group.css);
rcu_read_unlock();
} }
#else #else
static void uclamp_update_root_tg(void) { } static void uclamp_update_root_tg(void) { }
...@@ -1814,10 +1809,9 @@ static void uclamp_sync_util_min_rt_default(void) ...@@ -1814,10 +1809,9 @@ static void uclamp_sync_util_min_rt_default(void)
smp_mb__after_spinlock(); smp_mb__after_spinlock();
read_unlock(&tasklist_lock); read_unlock(&tasklist_lock);
rcu_read_lock(); guard(rcu)();
for_each_process_thread(g, p) for_each_process_thread(g, p)
uclamp_update_util_min_rt_default(p); uclamp_update_util_min_rt_default(p);
rcu_read_unlock();
} }
static int sysctl_sched_uclamp_handler(struct ctl_table *table, int write, static int sysctl_sched_uclamp_handler(struct ctl_table *table, int write,
...@@ -2250,20 +2244,13 @@ static __always_inline ...@@ -2250,20 +2244,13 @@ static __always_inline
int task_state_match(struct task_struct *p, unsigned int state) int task_state_match(struct task_struct *p, unsigned int state)
{ {
#ifdef CONFIG_PREEMPT_RT #ifdef CONFIG_PREEMPT_RT
int match;
/* /*
* Serialize against current_save_and_set_rtlock_wait_state() and * Serialize against current_save_and_set_rtlock_wait_state() and
* current_restore_rtlock_saved_state(). * current_restore_rtlock_saved_state().
*/ */
raw_spin_lock_irq(&p->pi_lock); guard(raw_spinlock_irq)(&p->pi_lock);
match = __task_state_match(p, state);
raw_spin_unlock_irq(&p->pi_lock);
return match;
#else
return __task_state_match(p, state);
#endif #endif
return __task_state_match(p, state);
} }
/* /*
...@@ -2417,10 +2404,9 @@ void migrate_disable(void) ...@@ -2417,10 +2404,9 @@ void migrate_disable(void)
return; return;
} }
preempt_disable(); guard(preempt)();
this_rq()->nr_pinned++; this_rq()->nr_pinned++;
p->migration_disabled = 1; p->migration_disabled = 1;
preempt_enable();
} }
EXPORT_SYMBOL_GPL(migrate_disable); EXPORT_SYMBOL_GPL(migrate_disable);
...@@ -2444,7 +2430,7 @@ void migrate_enable(void) ...@@ -2444,7 +2430,7 @@ void migrate_enable(void)
* Ensure stop_task runs either before or after this, and that * Ensure stop_task runs either before or after this, and that
* __set_cpus_allowed_ptr(SCA_MIGRATE_ENABLE) doesn't schedule(). * __set_cpus_allowed_ptr(SCA_MIGRATE_ENABLE) doesn't schedule().
*/ */
preempt_disable(); guard(preempt)();
if (p->cpus_ptr != &p->cpus_mask) if (p->cpus_ptr != &p->cpus_mask)
__set_cpus_allowed_ptr(p, &ac); __set_cpus_allowed_ptr(p, &ac);
/* /*
...@@ -2455,7 +2441,6 @@ void migrate_enable(void) ...@@ -2455,7 +2441,6 @@ void migrate_enable(void)
barrier(); barrier();
p->migration_disabled = 0; p->migration_disabled = 0;
this_rq()->nr_pinned--; this_rq()->nr_pinned--;
preempt_enable();
} }
EXPORT_SYMBOL_GPL(migrate_enable); EXPORT_SYMBOL_GPL(migrate_enable);
...@@ -3516,13 +3501,11 @@ int migrate_swap(struct task_struct *cur, struct task_struct *p, ...@@ -3516,13 +3501,11 @@ int migrate_swap(struct task_struct *cur, struct task_struct *p,
*/ */
void kick_process(struct task_struct *p) void kick_process(struct task_struct *p)
{ {
int cpu; guard(preempt)();
int cpu = task_cpu(p);
preempt_disable();
cpu = task_cpu(p);
if ((cpu != smp_processor_id()) && task_curr(p)) if ((cpu != smp_processor_id()) && task_curr(p))
smp_send_reschedule(cpu); smp_send_reschedule(cpu);
preempt_enable();
} }
EXPORT_SYMBOL_GPL(kick_process); EXPORT_SYMBOL_GPL(kick_process);
...@@ -6368,8 +6351,9 @@ static void sched_core_balance(struct rq *rq) ...@@ -6368,8 +6351,9 @@ static void sched_core_balance(struct rq *rq)
struct sched_domain *sd; struct sched_domain *sd;
int cpu = cpu_of(rq); int cpu = cpu_of(rq);
preempt_disable(); guard(preempt)();
rcu_read_lock(); guard(rcu)();
raw_spin_rq_unlock_irq(rq); raw_spin_rq_unlock_irq(rq);
for_each_domain(cpu, sd) { for_each_domain(cpu, sd) {
if (need_resched()) if (need_resched())
...@@ -6379,8 +6363,6 @@ static void sched_core_balance(struct rq *rq) ...@@ -6379,8 +6363,6 @@ static void sched_core_balance(struct rq *rq)
break; break;
} }
raw_spin_rq_lock_irq(rq); raw_spin_rq_lock_irq(rq);
rcu_read_unlock();
preempt_enable();
} }
static DEFINE_PER_CPU(struct balance_callback, core_balance_head); static DEFINE_PER_CPU(struct balance_callback, core_balance_head);
...@@ -8258,8 +8240,6 @@ SYSCALL_DEFINE4(sched_getattr, pid_t, pid, struct sched_attr __user *, uattr, ...@@ -8258,8 +8240,6 @@ SYSCALL_DEFINE4(sched_getattr, pid_t, pid, struct sched_attr __user *, uattr,
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
int dl_task_check_affinity(struct task_struct *p, const struct cpumask *mask) int dl_task_check_affinity(struct task_struct *p, const struct cpumask *mask)
{ {
int ret = 0;
/* /*
* If the task isn't a deadline task or admission control is * If the task isn't a deadline task or admission control is
* disabled then we don't care about affinity changes. * disabled then we don't care about affinity changes.
...@@ -8273,11 +8253,11 @@ int dl_task_check_affinity(struct task_struct *p, const struct cpumask *mask) ...@@ -8273,11 +8253,11 @@ int dl_task_check_affinity(struct task_struct *p, const struct cpumask *mask)
* tasks allowed to run on all the CPUs in the task's * tasks allowed to run on all the CPUs in the task's
* root_domain. * root_domain.
*/ */
rcu_read_lock(); guard(rcu)();
if (!cpumask_subset(task_rq(p)->rd->span, mask)) if (!cpumask_subset(task_rq(p)->rd->span, mask))
ret = -EBUSY; return -EBUSY;
rcu_read_unlock();
return ret; return 0;
} }
#endif #endif
...@@ -10509,11 +10489,9 @@ static int cpu_cgroup_css_online(struct cgroup_subsys_state *css) ...@@ -10509,11 +10489,9 @@ static int cpu_cgroup_css_online(struct cgroup_subsys_state *css)
#ifdef CONFIG_UCLAMP_TASK_GROUP #ifdef CONFIG_UCLAMP_TASK_GROUP
/* Propagate the effective uclamp value for the new group */ /* Propagate the effective uclamp value for the new group */
mutex_lock(&uclamp_mutex); guard(mutex)(&uclamp_mutex);
rcu_read_lock(); guard(rcu)();
cpu_util_update_eff(css); cpu_util_update_eff(css);
rcu_read_unlock();
mutex_unlock(&uclamp_mutex);
#endif #endif
return 0; return 0;
...@@ -10664,8 +10642,8 @@ static ssize_t cpu_uclamp_write(struct kernfs_open_file *of, char *buf, ...@@ -10664,8 +10642,8 @@ static ssize_t cpu_uclamp_write(struct kernfs_open_file *of, char *buf,
static_branch_enable(&sched_uclamp_used); static_branch_enable(&sched_uclamp_used);
mutex_lock(&uclamp_mutex); guard(mutex)(&uclamp_mutex);
rcu_read_lock(); guard(rcu)();
tg = css_tg(of_css(of)); tg = css_tg(of_css(of));
if (tg->uclamp_req[clamp_id].value != req.util) if (tg->uclamp_req[clamp_id].value != req.util)
...@@ -10680,9 +10658,6 @@ static ssize_t cpu_uclamp_write(struct kernfs_open_file *of, char *buf, ...@@ -10680,9 +10658,6 @@ static ssize_t cpu_uclamp_write(struct kernfs_open_file *of, char *buf,
/* Update effective clamps to track the most restrictive value */ /* Update effective clamps to track the most restrictive value */
cpu_util_update_eff(of_css(of)); cpu_util_update_eff(of_css(of));
rcu_read_unlock();
mutex_unlock(&uclamp_mutex);
return nbytes; return nbytes;
} }
...@@ -10708,10 +10683,10 @@ static inline void cpu_uclamp_print(struct seq_file *sf, ...@@ -10708,10 +10683,10 @@ static inline void cpu_uclamp_print(struct seq_file *sf,
u64 percent; u64 percent;
u32 rem; u32 rem;
rcu_read_lock(); scoped_guard (rcu) {
tg = css_tg(seq_css(sf)); tg = css_tg(seq_css(sf));
util_clamp = tg->uclamp_req[clamp_id].value; util_clamp = tg->uclamp_req[clamp_id].value;
rcu_read_unlock(); }
if (util_clamp == SCHED_CAPACITY_SCALE) { if (util_clamp == SCHED_CAPACITY_SCALE) {
seq_puts(sf, "max\n"); seq_puts(sf, "max\n");
...@@ -11033,7 +11008,6 @@ static int tg_cfs_schedulable_down(struct task_group *tg, void *data) ...@@ -11033,7 +11008,6 @@ static int tg_cfs_schedulable_down(struct task_group *tg, void *data)
static int __cfs_schedulable(struct task_group *tg, u64 period, u64 quota) static int __cfs_schedulable(struct task_group *tg, u64 period, u64 quota)
{ {
int ret;
struct cfs_schedulable_data data = { struct cfs_schedulable_data data = {
.tg = tg, .tg = tg,
.period = period, .period = period,
...@@ -11045,11 +11019,8 @@ static int __cfs_schedulable(struct task_group *tg, u64 period, u64 quota) ...@@ -11045,11 +11019,8 @@ static int __cfs_schedulable(struct task_group *tg, u64 period, u64 quota)
do_div(data.quota, NSEC_PER_USEC); do_div(data.quota, NSEC_PER_USEC);
} }
rcu_read_lock(); guard(rcu)();
ret = walk_tg_tree(tg_cfs_schedulable_down, tg_nop, &data); return walk_tg_tree(tg_cfs_schedulable_down, tg_nop, &data);
rcu_read_unlock();
return ret;
} }
static int cpu_cfs_stat_show(struct seq_file *sf, void *v) static int cpu_cfs_stat_show(struct seq_file *sf, void *v)
...@@ -11654,14 +11625,12 @@ int __sched_mm_cid_migrate_from_fetch_cid(struct rq *src_rq, ...@@ -11654,14 +11625,12 @@ int __sched_mm_cid_migrate_from_fetch_cid(struct rq *src_rq,
* are not the last task to be migrated from this cpu for this mm, so * are not the last task to be migrated from this cpu for this mm, so
* there is no need to move src_cid to the destination cpu. * there is no need to move src_cid to the destination cpu.
*/ */
rcu_read_lock(); guard(rcu)();
src_task = rcu_dereference(src_rq->curr); src_task = rcu_dereference(src_rq->curr);
if (READ_ONCE(src_task->mm_cid_active) && src_task->mm == mm) { if (READ_ONCE(src_task->mm_cid_active) && src_task->mm == mm) {
rcu_read_unlock();
t->last_mm_cid = -1; t->last_mm_cid = -1;
return -1; return -1;
} }
rcu_read_unlock();
return src_cid; return src_cid;
} }
...@@ -11705,18 +11674,17 @@ int __sched_mm_cid_migrate_from_try_steal_cid(struct rq *src_rq, ...@@ -11705,18 +11674,17 @@ int __sched_mm_cid_migrate_from_try_steal_cid(struct rq *src_rq,
* the lazy-put flag, this task will be responsible for transitioning * the lazy-put flag, this task will be responsible for transitioning
* from lazy-put flag set to MM_CID_UNSET. * from lazy-put flag set to MM_CID_UNSET.
*/ */
rcu_read_lock(); scoped_guard (rcu) {
src_task = rcu_dereference(src_rq->curr); src_task = rcu_dereference(src_rq->curr);
if (READ_ONCE(src_task->mm_cid_active) && src_task->mm == mm) { if (READ_ONCE(src_task->mm_cid_active) && src_task->mm == mm) {
rcu_read_unlock(); /*
/* * We observed an active task for this mm, there is therefore
* We observed an active task for this mm, there is therefore * no point in moving this cid to the destination cpu.
* no point in moving this cid to the destination cpu. */
*/ t->last_mm_cid = -1;
t->last_mm_cid = -1; return -1;
return -1; }
} }
rcu_read_unlock();
/* /*
* The src_cid is unused, so it can be unset. * The src_cid is unused, so it can be unset.
...@@ -11789,7 +11757,6 @@ static void sched_mm_cid_remote_clear(struct mm_struct *mm, struct mm_cid *pcpu_ ...@@ -11789,7 +11757,6 @@ static void sched_mm_cid_remote_clear(struct mm_struct *mm, struct mm_cid *pcpu_
{ {
struct rq *rq = cpu_rq(cpu); struct rq *rq = cpu_rq(cpu);
struct task_struct *t; struct task_struct *t;
unsigned long flags;
int cid, lazy_cid; int cid, lazy_cid;
cid = READ_ONCE(pcpu_cid->cid); cid = READ_ONCE(pcpu_cid->cid);
...@@ -11824,23 +11791,21 @@ static void sched_mm_cid_remote_clear(struct mm_struct *mm, struct mm_cid *pcpu_ ...@@ -11824,23 +11791,21 @@ static void sched_mm_cid_remote_clear(struct mm_struct *mm, struct mm_cid *pcpu_
* the lazy-put flag, that task will be responsible for transitioning * the lazy-put flag, that task will be responsible for transitioning
* from lazy-put flag set to MM_CID_UNSET. * from lazy-put flag set to MM_CID_UNSET.
*/ */
rcu_read_lock(); scoped_guard (rcu) {
t = rcu_dereference(rq->curr); t = rcu_dereference(rq->curr);
if (READ_ONCE(t->mm_cid_active) && t->mm == mm) { if (READ_ONCE(t->mm_cid_active) && t->mm == mm)
rcu_read_unlock(); return;
return;
} }
rcu_read_unlock();
/* /*
* The cid is unused, so it can be unset. * The cid is unused, so it can be unset.
* Disable interrupts to keep the window of cid ownership without rq * Disable interrupts to keep the window of cid ownership without rq
* lock small. * lock small.
*/ */
local_irq_save(flags); scoped_guard (irqsave) {
if (try_cmpxchg(&pcpu_cid->cid, &lazy_cid, MM_CID_UNSET)) if (try_cmpxchg(&pcpu_cid->cid, &lazy_cid, MM_CID_UNSET))
__mm_cid_put(mm, cid); __mm_cid_put(mm, cid);
local_irq_restore(flags); }
} }
static void sched_mm_cid_remote_clear_old(struct mm_struct *mm, int cpu) static void sched_mm_cid_remote_clear_old(struct mm_struct *mm, int cpu)
...@@ -11862,14 +11827,13 @@ static void sched_mm_cid_remote_clear_old(struct mm_struct *mm, int cpu) ...@@ -11862,14 +11827,13 @@ static void sched_mm_cid_remote_clear_old(struct mm_struct *mm, int cpu)
* snapshot associated with this cid if an active task using the mm is * snapshot associated with this cid if an active task using the mm is
* observed on this rq. * observed on this rq.
*/ */
rcu_read_lock(); scoped_guard (rcu) {
curr = rcu_dereference(rq->curr); curr = rcu_dereference(rq->curr);
if (READ_ONCE(curr->mm_cid_active) && curr->mm == mm) { if (READ_ONCE(curr->mm_cid_active) && curr->mm == mm) {
WRITE_ONCE(pcpu_cid->time, rq_clock); WRITE_ONCE(pcpu_cid->time, rq_clock);
rcu_read_unlock(); return;
return; }
} }
rcu_read_unlock();
if (rq_clock < pcpu_cid->time + SCHED_MM_CID_PERIOD_NS) if (rq_clock < pcpu_cid->time + SCHED_MM_CID_PERIOD_NS)
return; return;
...@@ -11963,7 +11927,6 @@ void task_tick_mm_cid(struct rq *rq, struct task_struct *curr) ...@@ -11963,7 +11927,6 @@ void task_tick_mm_cid(struct rq *rq, struct task_struct *curr)
void sched_mm_cid_exit_signals(struct task_struct *t) void sched_mm_cid_exit_signals(struct task_struct *t)
{ {
struct mm_struct *mm = t->mm; struct mm_struct *mm = t->mm;
struct rq_flags rf;
struct rq *rq; struct rq *rq;
if (!mm) if (!mm)
...@@ -11971,7 +11934,7 @@ void sched_mm_cid_exit_signals(struct task_struct *t) ...@@ -11971,7 +11934,7 @@ void sched_mm_cid_exit_signals(struct task_struct *t)
preempt_disable(); preempt_disable();
rq = this_rq(); rq = this_rq();
rq_lock_irqsave(rq, &rf); guard(rq_lock_irqsave)(rq);
preempt_enable_no_resched(); /* holding spinlock */ preempt_enable_no_resched(); /* holding spinlock */
WRITE_ONCE(t->mm_cid_active, 0); WRITE_ONCE(t->mm_cid_active, 0);
/* /*
...@@ -11981,13 +11944,11 @@ void sched_mm_cid_exit_signals(struct task_struct *t) ...@@ -11981,13 +11944,11 @@ void sched_mm_cid_exit_signals(struct task_struct *t)
smp_mb(); smp_mb();
mm_cid_put(mm); mm_cid_put(mm);
t->last_mm_cid = t->mm_cid = -1; t->last_mm_cid = t->mm_cid = -1;
rq_unlock_irqrestore(rq, &rf);
} }
void sched_mm_cid_before_execve(struct task_struct *t) void sched_mm_cid_before_execve(struct task_struct *t)
{ {
struct mm_struct *mm = t->mm; struct mm_struct *mm = t->mm;
struct rq_flags rf;
struct rq *rq; struct rq *rq;
if (!mm) if (!mm)
...@@ -11995,7 +11956,7 @@ void sched_mm_cid_before_execve(struct task_struct *t) ...@@ -11995,7 +11956,7 @@ void sched_mm_cid_before_execve(struct task_struct *t)
preempt_disable(); preempt_disable();
rq = this_rq(); rq = this_rq();
rq_lock_irqsave(rq, &rf); guard(rq_lock_irqsave)(rq);
preempt_enable_no_resched(); /* holding spinlock */ preempt_enable_no_resched(); /* holding spinlock */
WRITE_ONCE(t->mm_cid_active, 0); WRITE_ONCE(t->mm_cid_active, 0);
/* /*
...@@ -12005,13 +11966,11 @@ void sched_mm_cid_before_execve(struct task_struct *t) ...@@ -12005,13 +11966,11 @@ void sched_mm_cid_before_execve(struct task_struct *t)
smp_mb(); smp_mb();
mm_cid_put(mm); mm_cid_put(mm);
t->last_mm_cid = t->mm_cid = -1; t->last_mm_cid = t->mm_cid = -1;
rq_unlock_irqrestore(rq, &rf);
} }
void sched_mm_cid_after_execve(struct task_struct *t) void sched_mm_cid_after_execve(struct task_struct *t)
{ {
struct mm_struct *mm = t->mm; struct mm_struct *mm = t->mm;
struct rq_flags rf;
struct rq *rq; struct rq *rq;
if (!mm) if (!mm)
...@@ -12019,16 +11978,16 @@ void sched_mm_cid_after_execve(struct task_struct *t) ...@@ -12019,16 +11978,16 @@ void sched_mm_cid_after_execve(struct task_struct *t)
preempt_disable(); preempt_disable();
rq = this_rq(); rq = this_rq();
rq_lock_irqsave(rq, &rf); scoped_guard (rq_lock_irqsave, rq) {
preempt_enable_no_resched(); /* holding spinlock */ preempt_enable_no_resched(); /* holding spinlock */
WRITE_ONCE(t->mm_cid_active, 1); WRITE_ONCE(t->mm_cid_active, 1);
/* /*
* Store t->mm_cid_active before loading per-mm/cpu cid. * Store t->mm_cid_active before loading per-mm/cpu cid.
* Matches barrier in sched_mm_cid_remote_clear_old(). * Matches barrier in sched_mm_cid_remote_clear_old().
*/ */
smp_mb(); smp_mb();
t->last_mm_cid = t->mm_cid = mm_cid_get(rq, mm); t->last_mm_cid = t->mm_cid = mm_cid_get(rq, mm);
rq_unlock_irqrestore(rq, &rf); }
rseq_set_notify_resume(t); rseq_set_notify_resume(t);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment