Commit fa56b76f authored by Paul E. McKenney's avatar Paul E. McKenney Committed by Linus Torvalds

[PATCH] RCU: use rcu_assign_pointer()

This patch uses the rcu_assign_pointer() API to eliminate a number of explicit
memory barriers from code using RCU.  This has been tested successfully on
i386 and ppc64.

Signed-off-by: <paulmck@us.ibm.com>
Signed-off-by: default avatarAndrew Morton <akpm@osdl.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@osdl.org>
parent 85a8f4f2
...@@ -361,8 +361,7 @@ static ssize_t mce_read(struct file *filp, char __user *ubuf, size_t usize, loff ...@@ -361,8 +361,7 @@ static ssize_t mce_read(struct file *filp, char __user *ubuf, size_t usize, loff
memset(mcelog.entry, 0, next * sizeof(struct mce)); memset(mcelog.entry, 0, next * sizeof(struct mce));
mcelog.next = 0; mcelog.next = 0;
smp_wmb();
synchronize_kernel(); synchronize_kernel();
/* Collect entries that were still getting written before the synchronize. */ /* Collect entries that were still getting written before the synchronize. */
......
...@@ -568,8 +568,6 @@ static inline void hlist_del_init(struct hlist_node *n) ...@@ -568,8 +568,6 @@ static inline void hlist_del_init(struct hlist_node *n)
} }
} }
#define hlist_del_rcu_init hlist_del_init
static inline void hlist_add_head(struct hlist_node *n, struct hlist_head *h) static inline void hlist_add_head(struct hlist_node *n, struct hlist_head *h)
{ {
struct hlist_node *first = h->first; struct hlist_node *first = h->first;
......
...@@ -751,10 +751,9 @@ int nf_log_register(int pf, nf_logfn *logfn) ...@@ -751,10 +751,9 @@ int nf_log_register(int pf, nf_logfn *logfn)
/* Any setup of logging members must be done before /* Any setup of logging members must be done before
* substituting pointer. */ * substituting pointer. */
smp_wmb();
spin_lock(&nf_log_lock); spin_lock(&nf_log_lock);
if (!nf_logging[pf]) { if (!nf_logging[pf]) {
nf_logging[pf] = logfn; rcu_assign_pointer(nf_logging[pf], logfn);
ret = 0; ret = 0;
} }
spin_unlock(&nf_log_lock); spin_unlock(&nf_log_lock);
......
...@@ -287,10 +287,9 @@ static int dn_insert_route(struct dn_route *rt, unsigned hash, struct dn_route * ...@@ -287,10 +287,9 @@ static int dn_insert_route(struct dn_route *rt, unsigned hash, struct dn_route *
if (compare_keys(&rth->fl, &rt->fl)) { if (compare_keys(&rth->fl, &rt->fl)) {
/* Put it first */ /* Put it first */
*rthp = rth->u.rt_next; *rthp = rth->u.rt_next;
smp_wmb(); rcu_assign_pointer(rth->u.rt_next,
rth->u.rt_next = dn_rt_hash_table[hash].chain; dn_rt_hash_table[hash].chain);
smp_wmb(); rcu_assign_pointer(dn_rt_hash_table[hash].chain, rth);
dn_rt_hash_table[hash].chain = rth;
rth->u.dst.__use++; rth->u.dst.__use++;
dst_hold(&rth->u.dst); dst_hold(&rth->u.dst);
...@@ -304,10 +303,8 @@ static int dn_insert_route(struct dn_route *rt, unsigned hash, struct dn_route * ...@@ -304,10 +303,8 @@ static int dn_insert_route(struct dn_route *rt, unsigned hash, struct dn_route *
rthp = &rth->u.rt_next; rthp = &rth->u.rt_next;
} }
smp_wmb(); rcu_assign_pointer(rt->u.rt_next, dn_rt_hash_table[hash].chain);
rt->u.rt_next = dn_rt_hash_table[hash].chain; rcu_assign_pointer(dn_rt_hash_table[hash].chain, rt);
smp_wmb();
dn_rt_hash_table[hash].chain = rt;
dst_hold(&rt->u.dst); dst_hold(&rt->u.dst);
rt->u.dst.__use++; rt->u.dst.__use++;
......
...@@ -158,8 +158,7 @@ struct in_device *inetdev_init(struct net_device *dev) ...@@ -158,8 +158,7 @@ struct in_device *inetdev_init(struct net_device *dev)
/* Account for reference dev->ip_ptr */ /* Account for reference dev->ip_ptr */
in_dev_hold(in_dev); in_dev_hold(in_dev);
smp_wmb(); rcu_assign_pointer(dev->ip_ptr, in_dev);
dev->ip_ptr = in_dev;
#ifdef CONFIG_SYSCTL #ifdef CONFIG_SYSCTL
devinet_sysctl_register(in_dev, &in_dev->cnf); devinet_sysctl_register(in_dev, &in_dev->cnf);
......
...@@ -801,14 +801,13 @@ static int rt_intern_hash(unsigned hash, struct rtable *rt, struct rtable **rp) ...@@ -801,14 +801,13 @@ static int rt_intern_hash(unsigned hash, struct rtable *rt, struct rtable **rp)
* must be visible to another weakly ordered CPU before * must be visible to another weakly ordered CPU before
* the insertion at the start of the hash chain. * the insertion at the start of the hash chain.
*/ */
smp_wmb(); rcu_assign_pointer(rth->u.rt_next,
rth->u.rt_next = rt_hash_table[hash].chain; rt_hash_table[hash].chain);
/* /*
* Since lookup is lockfree, the update writes * Since lookup is lockfree, the update writes
* must be ordered for consistency on SMP. * must be ordered for consistency on SMP.
*/ */
smp_wmb(); rcu_assign_pointer(rt_hash_table[hash].chain, rth);
rt_hash_table[hash].chain = rth;
rth->u.dst.__use++; rth->u.dst.__use++;
dst_hold(&rth->u.dst); dst_hold(&rth->u.dst);
......
...@@ -453,10 +453,9 @@ qdisc_create(struct net_device *dev, u32 handle, struct rtattr **tca, int *errp) ...@@ -453,10 +453,9 @@ qdisc_create(struct net_device *dev, u32 handle, struct rtattr **tca, int *errp)
/* enqueue is accessed locklessly - make sure it's visible /* enqueue is accessed locklessly - make sure it's visible
* before we set a netdevice's qdisc pointer to sch */ * before we set a netdevice's qdisc pointer to sch */
smp_wmb();
if (!ops->init || (err = ops->init(sch, tca[TCA_OPTIONS-1])) == 0) { if (!ops->init || (err = ops->init(sch, tca[TCA_OPTIONS-1])) == 0) {
qdisc_lock_tree(dev); qdisc_lock_tree(dev);
list_add_tail(&sch->list, &dev->qdisc_list); list_add_tail_rcu(&sch->list, &dev->qdisc_list);
qdisc_unlock_tree(dev); qdisc_unlock_tree(dev);
#ifdef CONFIG_NET_ESTIMATOR #ifdef CONFIG_NET_ESTIMATOR
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment