Commit 3382290e authored by Will Deacon's avatar Will Deacon Committed by Ingo Molnar

locking/barriers: Convert users of lockless_dereference() to READ_ONCE()

[ Note, this is a Git cherry-pick of the following commit:

    506458ef ("locking/barriers: Convert users of lockless_dereference() to READ_ONCE()")

  ... for easier x86 PTI code testing and back-porting. ]

READ_ONCE() now has an implicit smp_read_barrier_depends() call, so it
can be used instead of lockless_dereference() without any change in
semantics.
Signed-off-by: default avatarWill Deacon <will.deacon@arm.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Link: http://lkml.kernel.org/r/1508840570-22169-4-git-send-email-will.deacon@arm.comSigned-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent c2bc6608
...@@ -2371,7 +2371,7 @@ static unsigned long get_segment_base(unsigned int segment) ...@@ -2371,7 +2371,7 @@ static unsigned long get_segment_base(unsigned int segment)
struct ldt_struct *ldt; struct ldt_struct *ldt;
/* IRQs are off, so this synchronizes with smp_store_release */ /* IRQs are off, so this synchronizes with smp_store_release */
ldt = lockless_dereference(current->active_mm->context.ldt); ldt = READ_ONCE(current->active_mm->context.ldt);
if (!ldt || idx >= ldt->nr_entries) if (!ldt || idx >= ldt->nr_entries)
return 0; return 0;
......
...@@ -73,8 +73,8 @@ static inline void load_mm_ldt(struct mm_struct *mm) ...@@ -73,8 +73,8 @@ static inline void load_mm_ldt(struct mm_struct *mm)
#ifdef CONFIG_MODIFY_LDT_SYSCALL #ifdef CONFIG_MODIFY_LDT_SYSCALL
struct ldt_struct *ldt; struct ldt_struct *ldt;
/* lockless_dereference synchronizes with smp_store_release */ /* READ_ONCE synchronizes with smp_store_release */
ldt = lockless_dereference(mm->context.ldt); ldt = READ_ONCE(mm->context.ldt);
/* /*
* Any change to mm->context.ldt is followed by an IPI to all * Any change to mm->context.ldt is followed by an IPI to all
......
...@@ -103,7 +103,7 @@ static void finalize_ldt_struct(struct ldt_struct *ldt) ...@@ -103,7 +103,7 @@ static void finalize_ldt_struct(struct ldt_struct *ldt)
static void install_ldt(struct mm_struct *current_mm, static void install_ldt(struct mm_struct *current_mm,
struct ldt_struct *ldt) struct ldt_struct *ldt)
{ {
/* Synchronizes with lockless_dereference in load_mm_ldt. */ /* Synchronizes with READ_ONCE in load_mm_ldt. */
smp_store_release(&current_mm->context.ldt, ldt); smp_store_release(&current_mm->context.ldt, ldt);
/* Activate the LDT for all CPUs using current_mm. */ /* Activate the LDT for all CPUs using current_mm. */
......
...@@ -366,7 +366,7 @@ static struct pgpath *choose_path_in_pg(struct multipath *m, ...@@ -366,7 +366,7 @@ static struct pgpath *choose_path_in_pg(struct multipath *m,
pgpath = path_to_pgpath(path); pgpath = path_to_pgpath(path);
if (unlikely(lockless_dereference(m->current_pg) != pg)) { if (unlikely(READ_ONCE(m->current_pg) != pg)) {
/* Only update current_pgpath if pg changed */ /* Only update current_pgpath if pg changed */
spin_lock_irqsave(&m->lock, flags); spin_lock_irqsave(&m->lock, flags);
m->current_pgpath = pgpath; m->current_pgpath = pgpath;
...@@ -390,7 +390,7 @@ static struct pgpath *choose_pgpath(struct multipath *m, size_t nr_bytes) ...@@ -390,7 +390,7 @@ static struct pgpath *choose_pgpath(struct multipath *m, size_t nr_bytes)
} }
/* Were we instructed to switch PG? */ /* Were we instructed to switch PG? */
if (lockless_dereference(m->next_pg)) { if (READ_ONCE(m->next_pg)) {
spin_lock_irqsave(&m->lock, flags); spin_lock_irqsave(&m->lock, flags);
pg = m->next_pg; pg = m->next_pg;
if (!pg) { if (!pg) {
...@@ -406,7 +406,7 @@ static struct pgpath *choose_pgpath(struct multipath *m, size_t nr_bytes) ...@@ -406,7 +406,7 @@ static struct pgpath *choose_pgpath(struct multipath *m, size_t nr_bytes)
/* Don't change PG until it has no remaining paths */ /* Don't change PG until it has no remaining paths */
check_current_pg: check_current_pg:
pg = lockless_dereference(m->current_pg); pg = READ_ONCE(m->current_pg);
if (pg) { if (pg) {
pgpath = choose_path_in_pg(m, pg, nr_bytes); pgpath = choose_path_in_pg(m, pg, nr_bytes);
if (!IS_ERR_OR_NULL(pgpath)) if (!IS_ERR_OR_NULL(pgpath))
...@@ -473,7 +473,7 @@ static int multipath_clone_and_map(struct dm_target *ti, struct request *rq, ...@@ -473,7 +473,7 @@ static int multipath_clone_and_map(struct dm_target *ti, struct request *rq,
struct request *clone; struct request *clone;
/* Do we need to select a new pgpath? */ /* Do we need to select a new pgpath? */
pgpath = lockless_dereference(m->current_pgpath); pgpath = READ_ONCE(m->current_pgpath);
if (!pgpath || !test_bit(MPATHF_QUEUE_IO, &m->flags)) if (!pgpath || !test_bit(MPATHF_QUEUE_IO, &m->flags))
pgpath = choose_pgpath(m, nr_bytes); pgpath = choose_pgpath(m, nr_bytes);
...@@ -535,7 +535,7 @@ static int __multipath_map_bio(struct multipath *m, struct bio *bio, struct dm_m ...@@ -535,7 +535,7 @@ static int __multipath_map_bio(struct multipath *m, struct bio *bio, struct dm_m
bool queue_io; bool queue_io;
/* Do we need to select a new pgpath? */ /* Do we need to select a new pgpath? */
pgpath = lockless_dereference(m->current_pgpath); pgpath = READ_ONCE(m->current_pgpath);
queue_io = test_bit(MPATHF_QUEUE_IO, &m->flags); queue_io = test_bit(MPATHF_QUEUE_IO, &m->flags);
if (!pgpath || !queue_io) if (!pgpath || !queue_io)
pgpath = choose_pgpath(m, nr_bytes); pgpath = choose_pgpath(m, nr_bytes);
...@@ -1804,7 +1804,7 @@ static int multipath_prepare_ioctl(struct dm_target *ti, ...@@ -1804,7 +1804,7 @@ static int multipath_prepare_ioctl(struct dm_target *ti,
struct pgpath *current_pgpath; struct pgpath *current_pgpath;
int r; int r;
current_pgpath = lockless_dereference(m->current_pgpath); current_pgpath = READ_ONCE(m->current_pgpath);
if (!current_pgpath) if (!current_pgpath)
current_pgpath = choose_pgpath(m, 0); current_pgpath = choose_pgpath(m, 0);
...@@ -1826,7 +1826,7 @@ static int multipath_prepare_ioctl(struct dm_target *ti, ...@@ -1826,7 +1826,7 @@ static int multipath_prepare_ioctl(struct dm_target *ti,
} }
if (r == -ENOTCONN) { if (r == -ENOTCONN) {
if (!lockless_dereference(m->current_pg)) { if (!READ_ONCE(m->current_pg)) {
/* Path status changed, redo selection */ /* Path status changed, redo selection */
(void) choose_pgpath(m, 0); (void) choose_pgpath(m, 0);
} }
...@@ -1895,9 +1895,9 @@ static int multipath_busy(struct dm_target *ti) ...@@ -1895,9 +1895,9 @@ static int multipath_busy(struct dm_target *ti)
return (m->queue_mode != DM_TYPE_MQ_REQUEST_BASED); return (m->queue_mode != DM_TYPE_MQ_REQUEST_BASED);
/* Guess which priority_group will be used at next mapping time */ /* Guess which priority_group will be used at next mapping time */
pg = lockless_dereference(m->current_pg); pg = READ_ONCE(m->current_pg);
next_pg = lockless_dereference(m->next_pg); next_pg = READ_ONCE(m->next_pg);
if (unlikely(!lockless_dereference(m->current_pgpath) && next_pg)) if (unlikely(!READ_ONCE(m->current_pgpath) && next_pg))
pg = next_pg; pg = next_pg;
if (!pg) { if (!pg) {
......
...@@ -231,7 +231,7 @@ static inline int dentry_cmp(const struct dentry *dentry, const unsigned char *c ...@@ -231,7 +231,7 @@ static inline int dentry_cmp(const struct dentry *dentry, const unsigned char *c
{ {
/* /*
* Be careful about RCU walk racing with rename: * Be careful about RCU walk racing with rename:
* use 'lockless_dereference' to fetch the name pointer. * use 'READ_ONCE' to fetch the name pointer.
* *
* NOTE! Even if a rename will mean that the length * NOTE! Even if a rename will mean that the length
* was not loaded atomically, we don't care. The * was not loaded atomically, we don't care. The
...@@ -245,7 +245,7 @@ static inline int dentry_cmp(const struct dentry *dentry, const unsigned char *c ...@@ -245,7 +245,7 @@ static inline int dentry_cmp(const struct dentry *dentry, const unsigned char *c
* early because the data cannot match (there can * early because the data cannot match (there can
* be no NUL in the ct/tcount data) * be no NUL in the ct/tcount data)
*/ */
const unsigned char *cs = lockless_dereference(dentry->d_name.name); const unsigned char *cs = READ_ONCE(dentry->d_name.name);
return dentry_string_cmp(cs, ct, tcount); return dentry_string_cmp(cs, ct, tcount);
} }
......
...@@ -77,5 +77,5 @@ static inline struct ovl_inode *OVL_I(struct inode *inode) ...@@ -77,5 +77,5 @@ static inline struct ovl_inode *OVL_I(struct inode *inode)
static inline struct dentry *ovl_upperdentry_dereference(struct ovl_inode *oi) static inline struct dentry *ovl_upperdentry_dereference(struct ovl_inode *oi)
{ {
return lockless_dereference(oi->__upperdentry); return READ_ONCE(oi->__upperdentry);
} }
...@@ -754,7 +754,7 @@ static int ovl_dir_fsync(struct file *file, loff_t start, loff_t end, ...@@ -754,7 +754,7 @@ static int ovl_dir_fsync(struct file *file, loff_t start, loff_t end,
if (!od->is_upper && OVL_TYPE_UPPER(ovl_path_type(dentry))) { if (!od->is_upper && OVL_TYPE_UPPER(ovl_path_type(dentry))) {
struct inode *inode = file_inode(file); struct inode *inode = file_inode(file);
realfile = lockless_dereference(od->upperfile); realfile = READ_ONCE(od->upperfile);
if (!realfile) { if (!realfile) {
struct path upperpath; struct path upperpath;
......
...@@ -275,7 +275,7 @@ static inline void list_splice_tail_init_rcu(struct list_head *list, ...@@ -275,7 +275,7 @@ static inline void list_splice_tail_init_rcu(struct list_head *list,
* primitives such as list_add_rcu() as long as it's guarded by rcu_read_lock(). * primitives such as list_add_rcu() as long as it's guarded by rcu_read_lock().
*/ */
#define list_entry_rcu(ptr, type, member) \ #define list_entry_rcu(ptr, type, member) \
container_of(lockless_dereference(ptr), type, member) container_of(READ_ONCE(ptr), type, member)
/* /*
* Where are list_empty_rcu() and list_first_entry_rcu()? * Where are list_empty_rcu() and list_first_entry_rcu()?
...@@ -368,7 +368,7 @@ static inline void list_splice_tail_init_rcu(struct list_head *list, ...@@ -368,7 +368,7 @@ static inline void list_splice_tail_init_rcu(struct list_head *list,
* example is when items are added to the list, but never deleted. * example is when items are added to the list, but never deleted.
*/ */
#define list_entry_lockless(ptr, type, member) \ #define list_entry_lockless(ptr, type, member) \
container_of((typeof(ptr))lockless_dereference(ptr), type, member) container_of((typeof(ptr))READ_ONCE(ptr), type, member)
/** /**
* list_for_each_entry_lockless - iterate over rcu list of given type * list_for_each_entry_lockless - iterate over rcu list of given type
......
...@@ -346,7 +346,7 @@ static inline void rcu_preempt_sleep_check(void) { } ...@@ -346,7 +346,7 @@ static inline void rcu_preempt_sleep_check(void) { }
#define __rcu_dereference_check(p, c, space) \ #define __rcu_dereference_check(p, c, space) \
({ \ ({ \
/* Dependency order vs. p above. */ \ /* Dependency order vs. p above. */ \
typeof(*p) *________p1 = (typeof(*p) *__force)lockless_dereference(p); \ typeof(*p) *________p1 = (typeof(*p) *__force)READ_ONCE(p); \
RCU_LOCKDEP_WARN(!(c), "suspicious rcu_dereference_check() usage"); \ RCU_LOCKDEP_WARN(!(c), "suspicious rcu_dereference_check() usage"); \
rcu_dereference_sparse(p, space); \ rcu_dereference_sparse(p, space); \
((typeof(*p) __force __kernel *)(________p1)); \ ((typeof(*p) __force __kernel *)(________p1)); \
...@@ -360,7 +360,7 @@ static inline void rcu_preempt_sleep_check(void) { } ...@@ -360,7 +360,7 @@ static inline void rcu_preempt_sleep_check(void) { }
#define rcu_dereference_raw(p) \ #define rcu_dereference_raw(p) \
({ \ ({ \
/* Dependency order vs. p above. */ \ /* Dependency order vs. p above. */ \
typeof(p) ________p1 = lockless_dereference(p); \ typeof(p) ________p1 = READ_ONCE(p); \
((typeof(*p) __force __kernel *)(________p1)); \ ((typeof(*p) __force __kernel *)(________p1)); \
}) })
......
...@@ -4233,7 +4233,7 @@ static void perf_remove_from_owner(struct perf_event *event) ...@@ -4233,7 +4233,7 @@ static void perf_remove_from_owner(struct perf_event *event)
* indeed free this event, otherwise we need to serialize on * indeed free this event, otherwise we need to serialize on
* owner->perf_event_mutex. * owner->perf_event_mutex.
*/ */
owner = lockless_dereference(event->owner); owner = READ_ONCE(event->owner);
if (owner) { if (owner) {
/* /*
* Since delayed_put_task_struct() also drops the last * Since delayed_put_task_struct() also drops the last
...@@ -4330,7 +4330,7 @@ int perf_event_release_kernel(struct perf_event *event) ...@@ -4330,7 +4330,7 @@ int perf_event_release_kernel(struct perf_event *event)
* Cannot change, child events are not migrated, see the * Cannot change, child events are not migrated, see the
* comment with perf_event_ctx_lock_nested(). * comment with perf_event_ctx_lock_nested().
*/ */
ctx = lockless_dereference(child->ctx); ctx = READ_ONCE(child->ctx);
/* /*
* Since child_mutex nests inside ctx::mutex, we must jump * Since child_mutex nests inside ctx::mutex, we must jump
* through hoops. We start by grabbing a reference on the ctx. * through hoops. We start by grabbing a reference on the ctx.
......
...@@ -190,7 +190,7 @@ static u32 seccomp_run_filters(const struct seccomp_data *sd, ...@@ -190,7 +190,7 @@ static u32 seccomp_run_filters(const struct seccomp_data *sd,
u32 ret = SECCOMP_RET_ALLOW; u32 ret = SECCOMP_RET_ALLOW;
/* Make sure cross-thread synced filter points somewhere sane. */ /* Make sure cross-thread synced filter points somewhere sane. */
struct seccomp_filter *f = struct seccomp_filter *f =
lockless_dereference(current->seccomp.filter); READ_ONCE(current->seccomp.filter);
/* Ensure unexpected behavior doesn't result in failing open. */ /* Ensure unexpected behavior doesn't result in failing open. */
if (unlikely(WARN_ON(f == NULL))) if (unlikely(WARN_ON(f == NULL)))
......
...@@ -68,7 +68,7 @@ task_work_cancel(struct task_struct *task, task_work_func_t func) ...@@ -68,7 +68,7 @@ task_work_cancel(struct task_struct *task, task_work_func_t func)
* we raced with task_work_run(), *pprev == NULL/exited. * we raced with task_work_run(), *pprev == NULL/exited.
*/ */
raw_spin_lock_irqsave(&task->pi_lock, flags); raw_spin_lock_irqsave(&task->pi_lock, flags);
while ((work = lockless_dereference(*pprev))) { while ((work = READ_ONCE(*pprev))) {
if (work->func != func) if (work->func != func)
pprev = &work->next; pprev = &work->next;
else if (cmpxchg(pprev, work, work->next) == work) else if (cmpxchg(pprev, work, work->next) == work)
......
...@@ -259,7 +259,7 @@ cache_from_memcg_idx(struct kmem_cache *s, int idx) ...@@ -259,7 +259,7 @@ cache_from_memcg_idx(struct kmem_cache *s, int idx)
* memcg_caches issues a write barrier to match this (see * memcg_caches issues a write barrier to match this (see
* memcg_create_kmem_cache()). * memcg_create_kmem_cache()).
*/ */
cachep = lockless_dereference(arr->entries[idx]); cachep = READ_ONCE(arr->entries[idx]);
rcu_read_unlock(); rcu_read_unlock();
return cachep; return cachep;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment