Commit 0e34600a authored by Peter Zijlstra's avatar Peter Zijlstra Committed by Ingo Molnar

sched: Misc cleanups

Random remaining guard use...
Signed-off-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Signed-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent 6fb45460
......@@ -1480,16 +1480,12 @@ static void __uclamp_update_util_min_rt_default(struct task_struct *p)
static void uclamp_update_util_min_rt_default(struct task_struct *p)
{
struct rq_flags rf;
struct rq *rq;
if (!rt_task(p))
return;
/* Protect updates to p->uclamp_* */
rq = task_rq_lock(p, &rf);
guard(task_rq_lock)(p);
__uclamp_update_util_min_rt_default(p);
task_rq_unlock(rq, p, &rf);
}
static inline struct uclamp_se
......@@ -1785,9 +1781,8 @@ static void uclamp_update_root_tg(void)
uclamp_se_set(&tg->uclamp_req[UCLAMP_MAX],
sysctl_sched_uclamp_util_max, false);
rcu_read_lock();
guard(rcu)();
cpu_util_update_eff(&root_task_group.css);
rcu_read_unlock();
}
#else
static void uclamp_update_root_tg(void) { }
......@@ -1814,10 +1809,9 @@ static void uclamp_sync_util_min_rt_default(void)
smp_mb__after_spinlock();
read_unlock(&tasklist_lock);
rcu_read_lock();
guard(rcu)();
for_each_process_thread(g, p)
uclamp_update_util_min_rt_default(p);
rcu_read_unlock();
}
static int sysctl_sched_uclamp_handler(struct ctl_table *table, int write,
......@@ -2250,20 +2244,13 @@ static __always_inline
int task_state_match(struct task_struct *p, unsigned int state)
{
#ifdef CONFIG_PREEMPT_RT
int match;
/*
* Serialize against current_save_and_set_rtlock_wait_state() and
* current_restore_rtlock_saved_state().
*/
raw_spin_lock_irq(&p->pi_lock);
match = __task_state_match(p, state);
raw_spin_unlock_irq(&p->pi_lock);
return match;
#else
return __task_state_match(p, state);
guard(raw_spinlock_irq)(&p->pi_lock);
#endif
return __task_state_match(p, state);
}
/*
......@@ -2417,10 +2404,9 @@ void migrate_disable(void)
return;
}
preempt_disable();
guard(preempt)();
this_rq()->nr_pinned++;
p->migration_disabled = 1;
preempt_enable();
}
EXPORT_SYMBOL_GPL(migrate_disable);
......@@ -2444,7 +2430,7 @@ void migrate_enable(void)
* Ensure stop_task runs either before or after this, and that
* __set_cpus_allowed_ptr(SCA_MIGRATE_ENABLE) doesn't schedule().
*/
preempt_disable();
guard(preempt)();
if (p->cpus_ptr != &p->cpus_mask)
__set_cpus_allowed_ptr(p, &ac);
/*
......@@ -2455,7 +2441,6 @@ void migrate_enable(void)
barrier();
p->migration_disabled = 0;
this_rq()->nr_pinned--;
preempt_enable();
}
EXPORT_SYMBOL_GPL(migrate_enable);
......@@ -3516,13 +3501,11 @@ int migrate_swap(struct task_struct *cur, struct task_struct *p,
*/
void kick_process(struct task_struct *p)
{
int cpu;
guard(preempt)();
int cpu = task_cpu(p);
preempt_disable();
cpu = task_cpu(p);
if ((cpu != smp_processor_id()) && task_curr(p))
smp_send_reschedule(cpu);
preempt_enable();
}
EXPORT_SYMBOL_GPL(kick_process);
......@@ -6368,8 +6351,9 @@ static void sched_core_balance(struct rq *rq)
struct sched_domain *sd;
int cpu = cpu_of(rq);
preempt_disable();
rcu_read_lock();
guard(preempt)();
guard(rcu)();
raw_spin_rq_unlock_irq(rq);
for_each_domain(cpu, sd) {
if (need_resched())
......@@ -6379,8 +6363,6 @@ static void sched_core_balance(struct rq *rq)
break;
}
raw_spin_rq_lock_irq(rq);
rcu_read_unlock();
preempt_enable();
}
static DEFINE_PER_CPU(struct balance_callback, core_balance_head);
......@@ -8258,8 +8240,6 @@ SYSCALL_DEFINE4(sched_getattr, pid_t, pid, struct sched_attr __user *, uattr,
#ifdef CONFIG_SMP
int dl_task_check_affinity(struct task_struct *p, const struct cpumask *mask)
{
int ret = 0;
/*
* If the task isn't a deadline task or admission control is
* disabled then we don't care about affinity changes.
......@@ -8273,11 +8253,11 @@ int dl_task_check_affinity(struct task_struct *p, const struct cpumask *mask)
* tasks allowed to run on all the CPUs in the task's
* root_domain.
*/
rcu_read_lock();
guard(rcu)();
if (!cpumask_subset(task_rq(p)->rd->span, mask))
ret = -EBUSY;
rcu_read_unlock();
return ret;
return -EBUSY;
return 0;
}
#endif
......@@ -10509,11 +10489,9 @@ static int cpu_cgroup_css_online(struct cgroup_subsys_state *css)
#ifdef CONFIG_UCLAMP_TASK_GROUP
/* Propagate the effective uclamp value for the new group */
mutex_lock(&uclamp_mutex);
rcu_read_lock();
guard(mutex)(&uclamp_mutex);
guard(rcu)();
cpu_util_update_eff(css);
rcu_read_unlock();
mutex_unlock(&uclamp_mutex);
#endif
return 0;
......@@ -10664,8 +10642,8 @@ static ssize_t cpu_uclamp_write(struct kernfs_open_file *of, char *buf,
static_branch_enable(&sched_uclamp_used);
mutex_lock(&uclamp_mutex);
rcu_read_lock();
guard(mutex)(&uclamp_mutex);
guard(rcu)();
tg = css_tg(of_css(of));
if (tg->uclamp_req[clamp_id].value != req.util)
......@@ -10680,9 +10658,6 @@ static ssize_t cpu_uclamp_write(struct kernfs_open_file *of, char *buf,
/* Update effective clamps to track the most restrictive value */
cpu_util_update_eff(of_css(of));
rcu_read_unlock();
mutex_unlock(&uclamp_mutex);
return nbytes;
}
......@@ -10708,10 +10683,10 @@ static inline void cpu_uclamp_print(struct seq_file *sf,
u64 percent;
u32 rem;
rcu_read_lock();
scoped_guard (rcu) {
tg = css_tg(seq_css(sf));
util_clamp = tg->uclamp_req[clamp_id].value;
rcu_read_unlock();
}
if (util_clamp == SCHED_CAPACITY_SCALE) {
seq_puts(sf, "max\n");
......@@ -11033,7 +11008,6 @@ static int tg_cfs_schedulable_down(struct task_group *tg, void *data)
static int __cfs_schedulable(struct task_group *tg, u64 period, u64 quota)
{
int ret;
struct cfs_schedulable_data data = {
.tg = tg,
.period = period,
......@@ -11045,11 +11019,8 @@ static int __cfs_schedulable(struct task_group *tg, u64 period, u64 quota)
do_div(data.quota, NSEC_PER_USEC);
}
rcu_read_lock();
ret = walk_tg_tree(tg_cfs_schedulable_down, tg_nop, &data);
rcu_read_unlock();
return ret;
guard(rcu)();
return walk_tg_tree(tg_cfs_schedulable_down, tg_nop, &data);
}
static int cpu_cfs_stat_show(struct seq_file *sf, void *v)
......@@ -11654,14 +11625,12 @@ int __sched_mm_cid_migrate_from_fetch_cid(struct rq *src_rq,
* are not the last task to be migrated from this cpu for this mm, so
* there is no need to move src_cid to the destination cpu.
*/
rcu_read_lock();
guard(rcu)();
src_task = rcu_dereference(src_rq->curr);
if (READ_ONCE(src_task->mm_cid_active) && src_task->mm == mm) {
rcu_read_unlock();
t->last_mm_cid = -1;
return -1;
}
rcu_read_unlock();
return src_cid;
}
......@@ -11705,10 +11674,9 @@ int __sched_mm_cid_migrate_from_try_steal_cid(struct rq *src_rq,
* the lazy-put flag, this task will be responsible for transitioning
* from lazy-put flag set to MM_CID_UNSET.
*/
rcu_read_lock();
scoped_guard (rcu) {
src_task = rcu_dereference(src_rq->curr);
if (READ_ONCE(src_task->mm_cid_active) && src_task->mm == mm) {
rcu_read_unlock();
/*
* We observed an active task for this mm, there is therefore
* no point in moving this cid to the destination cpu.
......@@ -11716,7 +11684,7 @@ int __sched_mm_cid_migrate_from_try_steal_cid(struct rq *src_rq,
t->last_mm_cid = -1;
return -1;
}
rcu_read_unlock();
}
/*
* The src_cid is unused, so it can be unset.
......@@ -11789,7 +11757,6 @@ static void sched_mm_cid_remote_clear(struct mm_struct *mm, struct mm_cid *pcpu_
{
struct rq *rq = cpu_rq(cpu);
struct task_struct *t;
unsigned long flags;
int cid, lazy_cid;
cid = READ_ONCE(pcpu_cid->cid);
......@@ -11824,23 +11791,21 @@ static void sched_mm_cid_remote_clear(struct mm_struct *mm, struct mm_cid *pcpu_
* the lazy-put flag, that task will be responsible for transitioning
* from lazy-put flag set to MM_CID_UNSET.
*/
rcu_read_lock();
scoped_guard (rcu) {
t = rcu_dereference(rq->curr);
if (READ_ONCE(t->mm_cid_active) && t->mm == mm) {
rcu_read_unlock();
if (READ_ONCE(t->mm_cid_active) && t->mm == mm)
return;
}
rcu_read_unlock();
/*
* The cid is unused, so it can be unset.
* Disable interrupts to keep the window of cid ownership without rq
* lock small.
*/
local_irq_save(flags);
scoped_guard (irqsave) {
if (try_cmpxchg(&pcpu_cid->cid, &lazy_cid, MM_CID_UNSET))
__mm_cid_put(mm, cid);
local_irq_restore(flags);
}
}
static void sched_mm_cid_remote_clear_old(struct mm_struct *mm, int cpu)
......@@ -11862,14 +11827,13 @@ static void sched_mm_cid_remote_clear_old(struct mm_struct *mm, int cpu)
* snapshot associated with this cid if an active task using the mm is
* observed on this rq.
*/
rcu_read_lock();
scoped_guard (rcu) {
curr = rcu_dereference(rq->curr);
if (READ_ONCE(curr->mm_cid_active) && curr->mm == mm) {
WRITE_ONCE(pcpu_cid->time, rq_clock);
rcu_read_unlock();
return;
}
rcu_read_unlock();
}
if (rq_clock < pcpu_cid->time + SCHED_MM_CID_PERIOD_NS)
return;
......@@ -11963,7 +11927,6 @@ void task_tick_mm_cid(struct rq *rq, struct task_struct *curr)
void sched_mm_cid_exit_signals(struct task_struct *t)
{
struct mm_struct *mm = t->mm;
struct rq_flags rf;
struct rq *rq;
if (!mm)
......@@ -11971,7 +11934,7 @@ void sched_mm_cid_exit_signals(struct task_struct *t)
preempt_disable();
rq = this_rq();
rq_lock_irqsave(rq, &rf);
guard(rq_lock_irqsave)(rq);
preempt_enable_no_resched(); /* holding spinlock */
WRITE_ONCE(t->mm_cid_active, 0);
/*
......@@ -11981,13 +11944,11 @@ void sched_mm_cid_exit_signals(struct task_struct *t)
smp_mb();
mm_cid_put(mm);
t->last_mm_cid = t->mm_cid = -1;
rq_unlock_irqrestore(rq, &rf);
}
void sched_mm_cid_before_execve(struct task_struct *t)
{
struct mm_struct *mm = t->mm;
struct rq_flags rf;
struct rq *rq;
if (!mm)
......@@ -11995,7 +11956,7 @@ void sched_mm_cid_before_execve(struct task_struct *t)
preempt_disable();
rq = this_rq();
rq_lock_irqsave(rq, &rf);
guard(rq_lock_irqsave)(rq);
preempt_enable_no_resched(); /* holding spinlock */
WRITE_ONCE(t->mm_cid_active, 0);
/*
......@@ -12005,13 +11966,11 @@ void sched_mm_cid_before_execve(struct task_struct *t)
smp_mb();
mm_cid_put(mm);
t->last_mm_cid = t->mm_cid = -1;
rq_unlock_irqrestore(rq, &rf);
}
void sched_mm_cid_after_execve(struct task_struct *t)
{
struct mm_struct *mm = t->mm;
struct rq_flags rf;
struct rq *rq;
if (!mm)
......@@ -12019,7 +11978,7 @@ void sched_mm_cid_after_execve(struct task_struct *t)
preempt_disable();
rq = this_rq();
rq_lock_irqsave(rq, &rf);
scoped_guard (rq_lock_irqsave, rq) {
preempt_enable_no_resched(); /* holding spinlock */
WRITE_ONCE(t->mm_cid_active, 1);
/*
......@@ -12028,7 +11987,7 @@ void sched_mm_cid_after_execve(struct task_struct *t)
*/
smp_mb();
t->last_mm_cid = t->mm_cid = mm_cid_get(rq, mm);
rq_unlock_irqrestore(rq, &rf);
}
rseq_set_notify_resume(t);
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment