Commit 316c1608 authored by Jason Low's avatar Jason Low Committed by Ingo Molnar

sched, timer: Convert usages of ACCESS_ONCE() in the scheduler to READ_ONCE()/WRITE_ONCE()

ACCESS_ONCE doesn't work reliably on non-scalar types. This patch removes
the rest of the existing usages of ACCESS_ONCE() in the scheduler, and use
the new READ_ONCE() and WRITE_ONCE() APIs as appropriate.
Signed-off-by: default avatarJason Low <jason.low2@hp.com>
Signed-off-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Acked-by: default avatarThomas Gleixner <tglx@linutronix.de>
Acked-by: default avatarRik van Riel <riel@redhat.com>
Acked-by: default avatarWaiman Long <Waiman.Long@hp.com>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Aswin Chandramouleeswaran <aswin@hp.com>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Davidlohr Bueso <dave@stgolabs.net>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: H. Peter Anvin <hpa@zytor.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Mel Gorman <mgorman@suse.de>
Cc: Mike Galbraith <umgwanakikbuti@gmail.com>
Cc: Oleg Nesterov <oleg@redhat.com>
Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: Preeti U Murthy <preeti@linux.vnet.ibm.com>
Cc: Scott J Norton <scott.norton@hp.com>
Cc: Steven Rostedt <rostedt@goodmis.org>
Link: http://lkml.kernel.org/r/1430251224-5764-2-git-send-email-jason.low2@hp.comSigned-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent ce2f5fe4
...@@ -3085,13 +3085,13 @@ static inline void mm_update_next_owner(struct mm_struct *mm) ...@@ -3085,13 +3085,13 @@ static inline void mm_update_next_owner(struct mm_struct *mm)
static inline unsigned long task_rlimit(const struct task_struct *tsk, static inline unsigned long task_rlimit(const struct task_struct *tsk,
unsigned int limit) unsigned int limit)
{ {
return ACCESS_ONCE(tsk->signal->rlim[limit].rlim_cur); return READ_ONCE(tsk->signal->rlim[limit].rlim_cur);
} }
static inline unsigned long task_rlimit_max(const struct task_struct *tsk, static inline unsigned long task_rlimit_max(const struct task_struct *tsk,
unsigned int limit) unsigned int limit)
{ {
return ACCESS_ONCE(tsk->signal->rlim[limit].rlim_max); return READ_ONCE(tsk->signal->rlim[limit].rlim_max);
} }
static inline unsigned long rlimit(unsigned int limit) static inline unsigned long rlimit(unsigned int limit)
......
...@@ -1094,7 +1094,7 @@ static void posix_cpu_timers_init_group(struct signal_struct *sig) ...@@ -1094,7 +1094,7 @@ static void posix_cpu_timers_init_group(struct signal_struct *sig)
/* Thread group counters. */ /* Thread group counters. */
thread_group_cputime_init(sig); thread_group_cputime_init(sig);
cpu_limit = ACCESS_ONCE(sig->rlim[RLIMIT_CPU].rlim_cur); cpu_limit = READ_ONCE(sig->rlim[RLIMIT_CPU].rlim_cur);
if (cpu_limit != RLIM_INFINITY) { if (cpu_limit != RLIM_INFINITY) {
sig->cputime_expires.prof_exp = secs_to_cputime(cpu_limit); sig->cputime_expires.prof_exp = secs_to_cputime(cpu_limit);
sig->cputimer.running = 1; sig->cputimer.running = 1;
......
...@@ -139,7 +139,7 @@ autogroup_move_group(struct task_struct *p, struct autogroup *ag) ...@@ -139,7 +139,7 @@ autogroup_move_group(struct task_struct *p, struct autogroup *ag)
p->signal->autogroup = autogroup_kref_get(ag); p->signal->autogroup = autogroup_kref_get(ag);
if (!ACCESS_ONCE(sysctl_sched_autogroup_enabled)) if (!READ_ONCE(sysctl_sched_autogroup_enabled))
goto out; goto out;
for_each_thread(p, t) for_each_thread(p, t)
......
...@@ -29,7 +29,7 @@ extern bool task_wants_autogroup(struct task_struct *p, struct task_group *tg); ...@@ -29,7 +29,7 @@ extern bool task_wants_autogroup(struct task_struct *p, struct task_group *tg);
static inline struct task_group * static inline struct task_group *
autogroup_task_group(struct task_struct *p, struct task_group *tg) autogroup_task_group(struct task_struct *p, struct task_group *tg)
{ {
int enabled = ACCESS_ONCE(sysctl_sched_autogroup_enabled); int enabled = READ_ONCE(sysctl_sched_autogroup_enabled);
if (enabled && task_wants_autogroup(p, tg)) if (enabled && task_wants_autogroup(p, tg))
return p->signal->autogroup->tg; return p->signal->autogroup->tg;
......
...@@ -511,7 +511,7 @@ static bool set_nr_and_not_polling(struct task_struct *p) ...@@ -511,7 +511,7 @@ static bool set_nr_and_not_polling(struct task_struct *p)
static bool set_nr_if_polling(struct task_struct *p) static bool set_nr_if_polling(struct task_struct *p)
{ {
struct thread_info *ti = task_thread_info(p); struct thread_info *ti = task_thread_info(p);
typeof(ti->flags) old, val = ACCESS_ONCE(ti->flags); typeof(ti->flags) old, val = READ_ONCE(ti->flags);
for (;;) { for (;;) {
if (!(val & _TIF_POLLING_NRFLAG)) if (!(val & _TIF_POLLING_NRFLAG))
...@@ -2526,7 +2526,7 @@ void scheduler_tick(void) ...@@ -2526,7 +2526,7 @@ void scheduler_tick(void)
u64 scheduler_tick_max_deferment(void) u64 scheduler_tick_max_deferment(void)
{ {
struct rq *rq = this_rq(); struct rq *rq = this_rq();
unsigned long next, now = ACCESS_ONCE(jiffies); unsigned long next, now = READ_ONCE(jiffies);
next = rq->last_sched_tick + HZ; next = rq->last_sched_tick + HZ;
......
...@@ -567,7 +567,7 @@ static void cputime_advance(cputime_t *counter, cputime_t new) ...@@ -567,7 +567,7 @@ static void cputime_advance(cputime_t *counter, cputime_t new)
{ {
cputime_t old; cputime_t old;
while (new > (old = ACCESS_ONCE(*counter))) while (new > (old = READ_ONCE(*counter)))
cmpxchg_cputime(counter, old, new); cmpxchg_cputime(counter, old, new);
} }
......
...@@ -995,7 +995,7 @@ select_task_rq_dl(struct task_struct *p, int cpu, int sd_flag, int flags) ...@@ -995,7 +995,7 @@ select_task_rq_dl(struct task_struct *p, int cpu, int sd_flag, int flags)
rq = cpu_rq(cpu); rq = cpu_rq(cpu);
rcu_read_lock(); rcu_read_lock();
curr = ACCESS_ONCE(rq->curr); /* unlocked access */ curr = READ_ONCE(rq->curr); /* unlocked access */
/* /*
* If we are dealing with a -deadline task, we must * If we are dealing with a -deadline task, we must
......
...@@ -834,7 +834,7 @@ static unsigned int task_nr_scan_windows(struct task_struct *p) ...@@ -834,7 +834,7 @@ static unsigned int task_nr_scan_windows(struct task_struct *p)
static unsigned int task_scan_min(struct task_struct *p) static unsigned int task_scan_min(struct task_struct *p)
{ {
unsigned int scan_size = ACCESS_ONCE(sysctl_numa_balancing_scan_size); unsigned int scan_size = READ_ONCE(sysctl_numa_balancing_scan_size);
unsigned int scan, floor; unsigned int scan, floor;
unsigned int windows = 1; unsigned int windows = 1;
...@@ -1794,7 +1794,7 @@ static void task_numa_placement(struct task_struct *p) ...@@ -1794,7 +1794,7 @@ static void task_numa_placement(struct task_struct *p)
u64 runtime, period; u64 runtime, period;
spinlock_t *group_lock = NULL; spinlock_t *group_lock = NULL;
seq = ACCESS_ONCE(p->mm->numa_scan_seq); seq = READ_ONCE(p->mm->numa_scan_seq);
if (p->numa_scan_seq == seq) if (p->numa_scan_seq == seq)
return; return;
p->numa_scan_seq = seq; p->numa_scan_seq = seq;
...@@ -1938,7 +1938,7 @@ static void task_numa_group(struct task_struct *p, int cpupid, int flags, ...@@ -1938,7 +1938,7 @@ static void task_numa_group(struct task_struct *p, int cpupid, int flags,
} }
rcu_read_lock(); rcu_read_lock();
tsk = ACCESS_ONCE(cpu_rq(cpu)->curr); tsk = READ_ONCE(cpu_rq(cpu)->curr);
if (!cpupid_match_pid(tsk, cpupid)) if (!cpupid_match_pid(tsk, cpupid))
goto no_join; goto no_join;
...@@ -2107,7 +2107,7 @@ void task_numa_fault(int last_cpupid, int mem_node, int pages, int flags) ...@@ -2107,7 +2107,7 @@ void task_numa_fault(int last_cpupid, int mem_node, int pages, int flags)
static void reset_ptenuma_scan(struct task_struct *p) static void reset_ptenuma_scan(struct task_struct *p)
{ {
ACCESS_ONCE(p->mm->numa_scan_seq)++; WRITE_ONCE(p->mm->numa_scan_seq, READ_ONCE(p->mm->numa_scan_seq) + 1);
p->mm->numa_scan_offset = 0; p->mm->numa_scan_offset = 0;
} }
...@@ -4451,7 +4451,7 @@ static void __update_cpu_load(struct rq *this_rq, unsigned long this_load, ...@@ -4451,7 +4451,7 @@ static void __update_cpu_load(struct rq *this_rq, unsigned long this_load,
*/ */
static void update_idle_cpu_load(struct rq *this_rq) static void update_idle_cpu_load(struct rq *this_rq)
{ {
unsigned long curr_jiffies = ACCESS_ONCE(jiffies); unsigned long curr_jiffies = READ_ONCE(jiffies);
unsigned long load = this_rq->cfs.runnable_load_avg; unsigned long load = this_rq->cfs.runnable_load_avg;
unsigned long pending_updates; unsigned long pending_updates;
...@@ -4473,7 +4473,7 @@ static void update_idle_cpu_load(struct rq *this_rq) ...@@ -4473,7 +4473,7 @@ static void update_idle_cpu_load(struct rq *this_rq)
void update_cpu_load_nohz(void) void update_cpu_load_nohz(void)
{ {
struct rq *this_rq = this_rq(); struct rq *this_rq = this_rq();
unsigned long curr_jiffies = ACCESS_ONCE(jiffies); unsigned long curr_jiffies = READ_ONCE(jiffies);
unsigned long pending_updates; unsigned long pending_updates;
if (curr_jiffies == this_rq->last_load_update_tick) if (curr_jiffies == this_rq->last_load_update_tick)
...@@ -4558,7 +4558,7 @@ static unsigned long capacity_orig_of(int cpu) ...@@ -4558,7 +4558,7 @@ static unsigned long capacity_orig_of(int cpu)
static unsigned long cpu_avg_load_per_task(int cpu) static unsigned long cpu_avg_load_per_task(int cpu)
{ {
struct rq *rq = cpu_rq(cpu); struct rq *rq = cpu_rq(cpu);
unsigned long nr_running = ACCESS_ONCE(rq->cfs.h_nr_running); unsigned long nr_running = READ_ONCE(rq->cfs.h_nr_running);
unsigned long load_avg = rq->cfs.runnable_load_avg; unsigned long load_avg = rq->cfs.runnable_load_avg;
if (nr_running) if (nr_running)
...@@ -6220,8 +6220,8 @@ static unsigned long scale_rt_capacity(int cpu) ...@@ -6220,8 +6220,8 @@ static unsigned long scale_rt_capacity(int cpu)
* Since we're reading these variables without serialization make sure * Since we're reading these variables without serialization make sure
* we read them once before doing sanity checks on them. * we read them once before doing sanity checks on them.
*/ */
age_stamp = ACCESS_ONCE(rq->age_stamp); age_stamp = READ_ONCE(rq->age_stamp);
avg = ACCESS_ONCE(rq->rt_avg); avg = READ_ONCE(rq->rt_avg);
delta = __rq_clock_broken(rq) - age_stamp; delta = __rq_clock_broken(rq) - age_stamp;
if (unlikely(delta < 0)) if (unlikely(delta < 0))
......
...@@ -1323,7 +1323,7 @@ select_task_rq_rt(struct task_struct *p, int cpu, int sd_flag, int flags) ...@@ -1323,7 +1323,7 @@ select_task_rq_rt(struct task_struct *p, int cpu, int sd_flag, int flags)
rq = cpu_rq(cpu); rq = cpu_rq(cpu);
rcu_read_lock(); rcu_read_lock();
curr = ACCESS_ONCE(rq->curr); /* unlocked access */ curr = READ_ONCE(rq->curr); /* unlocked access */
/* /*
* If the current task on @p's runqueue is an RT task, then * If the current task on @p's runqueue is an RT task, then
......
...@@ -713,7 +713,7 @@ DECLARE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues); ...@@ -713,7 +713,7 @@ DECLARE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues);
static inline u64 __rq_clock_broken(struct rq *rq) static inline u64 __rq_clock_broken(struct rq *rq)
{ {
return ACCESS_ONCE(rq->clock); return READ_ONCE(rq->clock);
} }
static inline u64 rq_clock(struct rq *rq) static inline u64 rq_clock(struct rq *rq)
......
...@@ -601,7 +601,7 @@ EXPORT_SYMBOL(bit_wait_io); ...@@ -601,7 +601,7 @@ EXPORT_SYMBOL(bit_wait_io);
__sched int bit_wait_timeout(struct wait_bit_key *word) __sched int bit_wait_timeout(struct wait_bit_key *word)
{ {
unsigned long now = ACCESS_ONCE(jiffies); unsigned long now = READ_ONCE(jiffies);
if (signal_pending_state(current->state, current)) if (signal_pending_state(current->state, current))
return 1; return 1;
if (time_after_eq(now, word->timeout)) if (time_after_eq(now, word->timeout))
...@@ -613,7 +613,7 @@ EXPORT_SYMBOL_GPL(bit_wait_timeout); ...@@ -613,7 +613,7 @@ EXPORT_SYMBOL_GPL(bit_wait_timeout);
__sched int bit_wait_io_timeout(struct wait_bit_key *word) __sched int bit_wait_io_timeout(struct wait_bit_key *word)
{ {
unsigned long now = ACCESS_ONCE(jiffies); unsigned long now = READ_ONCE(jiffies);
if (signal_pending_state(current->state, current)) if (signal_pending_state(current->state, current))
return 1; return 1;
if (time_after_eq(now, word->timeout)) if (time_after_eq(now, word->timeout))
......
...@@ -852,10 +852,10 @@ static void check_thread_timers(struct task_struct *tsk, ...@@ -852,10 +852,10 @@ static void check_thread_timers(struct task_struct *tsk,
/* /*
* Check for the special case thread timers. * Check for the special case thread timers.
*/ */
soft = ACCESS_ONCE(sig->rlim[RLIMIT_RTTIME].rlim_cur); soft = READ_ONCE(sig->rlim[RLIMIT_RTTIME].rlim_cur);
if (soft != RLIM_INFINITY) { if (soft != RLIM_INFINITY) {
unsigned long hard = unsigned long hard =
ACCESS_ONCE(sig->rlim[RLIMIT_RTTIME].rlim_max); READ_ONCE(sig->rlim[RLIMIT_RTTIME].rlim_max);
if (hard != RLIM_INFINITY && if (hard != RLIM_INFINITY &&
tsk->rt.timeout > DIV_ROUND_UP(hard, USEC_PER_SEC/HZ)) { tsk->rt.timeout > DIV_ROUND_UP(hard, USEC_PER_SEC/HZ)) {
...@@ -958,11 +958,11 @@ static void check_process_timers(struct task_struct *tsk, ...@@ -958,11 +958,11 @@ static void check_process_timers(struct task_struct *tsk,
SIGPROF); SIGPROF);
check_cpu_itimer(tsk, &sig->it[CPUCLOCK_VIRT], &virt_expires, utime, check_cpu_itimer(tsk, &sig->it[CPUCLOCK_VIRT], &virt_expires, utime,
SIGVTALRM); SIGVTALRM);
soft = ACCESS_ONCE(sig->rlim[RLIMIT_CPU].rlim_cur); soft = READ_ONCE(sig->rlim[RLIMIT_CPU].rlim_cur);
if (soft != RLIM_INFINITY) { if (soft != RLIM_INFINITY) {
unsigned long psecs = cputime_to_secs(ptime); unsigned long psecs = cputime_to_secs(ptime);
unsigned long hard = unsigned long hard =
ACCESS_ONCE(sig->rlim[RLIMIT_CPU].rlim_max); READ_ONCE(sig->rlim[RLIMIT_CPU].rlim_max);
cputime_t x; cputime_t x;
if (psecs >= hard) { if (psecs >= hard) {
/* /*
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment