Commit f338bad8 authored by Andrew Morton's avatar Andrew Morton Committed by David S. Miller

[NET]: Lock initializer unifying

From: <tglx@linutronix.de>

To make spinlock/rwlock initialization consistent all over the kernel,
this patch converts explicit lock-initializers into spin_lock_init() and
rwlock_init() calls.

Currently, spinlocks and rwlocks are initialized in two different ways:

  lock = SPIN_LOCK_UNLOCKED
  spin_lock_init(&lock)
  
  rwlock = RW_LOCK_UNLOCKED
  rwlock_init(&rwlock)

this patch converts all explicit lock initializations to
spin_lock_init() or rwlock_init(). (Besides consistency this also helps
automatic lock validators and debugging code.)

The conversion was done with a script, it was verified manually and it
was reviewed, compiled and tested as far as possible on x86, ARM, PPC.

There is no runtime overhead or actual code change resulting out of this
patch, because spin_lock_init() and rwlock_init() are macros and are
thus equivalent to the explicit initialization method.

That's the second batch of the unifying patches.
Signed-off-by: default avatarThomas Gleixner <tglx@linutronix.de>
Acked-by: default avatarIngo Molnar <mingo@elte.hu>
Signed-off-by: default avatarAndrew Morton <akpm@osdl.org>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 8452fcd1
......@@ -267,8 +267,8 @@ static struct mpoa_client *alloc_mpc(void)
if (mpc == NULL)
return NULL;
memset(mpc, 0, sizeof(struct mpoa_client));
mpc->ingress_lock = RW_LOCK_UNLOCKED;
mpc->egress_lock = RW_LOCK_UNLOCKED;
rwlock_init(&mpc->ingress_lock);
rwlock_init(&mpc->egress_lock);
mpc->next = mpcs;
atm_mpoa_init_cache(mpc);
......
......@@ -147,9 +147,9 @@ static struct net_device *new_bridge_dev(const char *name)
br = netdev_priv(dev);
br->dev = dev;
br->lock = SPIN_LOCK_UNLOCKED;
spin_lock_init(&br->lock);
INIT_LIST_HEAD(&br->port_list);
br->hash_lock = SPIN_LOCK_UNLOCKED;
spin_lock_init(&br->hash_lock);
br->bridge_id.prio[0] = 0x80;
br->bridge_id.prio[1] = 0x00;
......
......@@ -1168,7 +1168,7 @@ int ebt_register_table(struct ebt_table *table)
}
table->private = newinfo;
table->lock = RW_LOCK_UNLOCKED;
rwlock_init(&table->lock);
ret = down_interruptible(&ebt_mutex);
if (ret != 0)
goto free_chainstack;
......
......@@ -271,7 +271,7 @@ static struct neighbour *neigh_alloc(struct neigh_table *tbl)
memset(n, 0, tbl->entry_size);
skb_queue_head_init(&n->arp_queue);
n->lock = RW_LOCK_UNLOCKED;
rwlock_init(&n->lock);
n->updated = n->used = now;
n->nud_state = NUD_NONE;
n->output = neigh_blackhole;
......@@ -1091,7 +1091,7 @@ static void neigh_hh_init(struct neighbour *n, struct dst_entry *dst,
if (!hh && (hh = kmalloc(sizeof(*hh), GFP_ATOMIC)) != NULL) {
memset(hh, 0, sizeof(struct hh_cache));
hh->hh_lock = RW_LOCK_UNLOCKED;
rwlock_init(&hh->hh_lock);
hh->hh_type = protocol;
atomic_set(&hh->hh_refcnt, 0);
hh->hh_next = NULL;
......@@ -1367,7 +1367,7 @@ void neigh_table_init(struct neigh_table *tbl)
get_random_bytes(&tbl->hash_rnd, sizeof(tbl->hash_rnd));
tbl->lock = RW_LOCK_UNLOCKED;
rwlock_init(&tbl->lock);
init_timer(&tbl->gc_timer);
tbl->gc_timer.data = (unsigned long)tbl;
tbl->gc_timer.function = neigh_periodic_timer;
......
......@@ -1175,8 +1175,8 @@ void sock_init_data(struct socket *sock, struct sock *sk)
} else
sk->sk_sleep = NULL;
sk->sk_dst_lock = RW_LOCK_UNLOCKED;
sk->sk_callback_lock = RW_LOCK_UNLOCKED;
rwlock_init(&sk->sk_dst_lock);
rwlock_init(&sk->sk_callback_lock);
sk->sk_state_change = sock_def_wakeup;
sk->sk_data_ready = sock_def_readable;
......
......@@ -1822,7 +1822,7 @@ void __init dn_route_init(void)
dn_rt_hash_mask--;
for(i = 0; i <= dn_rt_hash_mask; i++) {
dn_rt_hash_table[i].lock = SPIN_LOCK_UNLOCKED;
spin_lock_init(&dn_rt_hash_table[i].lock);
dn_rt_hash_table[i].chain = NULL;
}
......
......@@ -1252,8 +1252,8 @@ void ip_mc_init_dev(struct in_device *in_dev)
in_dev->mr_qrv = IGMP_Unsolicited_Report_Count;
#endif
in_dev->mc_list_lock = RW_LOCK_UNLOCKED;
in_dev->mc_tomb_lock = SPIN_LOCK_UNLOCKED;
rwlock_init(&in_dev->mc_list_lock);
spin_lock_init(&in_dev->mc_tomb_lock);
}
/* Device going up */
......
......@@ -373,7 +373,7 @@ static struct ipq *ip_frag_create(unsigned hash, struct iphdr *iph)
init_timer(&qp->timer);
qp->timer.data = (unsigned long) qp; /* pointer to queue */
qp->timer.function = ip_expire; /* expire function */
qp->lock = SPIN_LOCK_UNLOCKED;
spin_lock_init(&qp->lock);
atomic_set(&qp->refcnt, 1);
return ip_frag_intern(hash, qp);
......
......@@ -580,7 +580,7 @@ ip_vs_conn_new(int proto, __u32 caddr, __u16 cport, __u32 vaddr, __u16 vport,
cp->daddr = daddr;
cp->dport = dport;
cp->flags = flags;
cp->lock = SPIN_LOCK_UNLOCKED;
spin_lock_init(&cp->lock);
/*
* Set the entry is referenced by the current thread before hashing
......@@ -894,7 +894,7 @@ int ip_vs_conn_init(void)
}
for (idx = 0; idx < CT_LOCKARRAY_SIZE; idx++) {
__ip_vs_conntbl_lock_array[idx].l = RW_LOCK_UNLOCKED;
rwlock_init(&__ip_vs_conntbl_lock_array[idx].l);
}
proc_net_fops_create("ip_vs_conn", 0, &ip_vs_conn_fops);
......
......@@ -746,8 +746,8 @@ ip_vs_new_dest(struct ip_vs_service *svc, struct ip_vs_dest_user *udest,
atomic_set(&dest->refcnt, 0);
INIT_LIST_HEAD(&dest->d_list);
dest->dst_lock = SPIN_LOCK_UNLOCKED;
dest->stats.lock = SPIN_LOCK_UNLOCKED;
spin_lock_init(&dest->dst_lock);
spin_lock_init(&dest->stats.lock);
__ip_vs_update_dest(svc, dest, udest);
ip_vs_new_estimator(&dest->stats);
......@@ -1062,8 +1062,8 @@ ip_vs_add_service(struct ip_vs_service_user *u, struct ip_vs_service **svc_p)
svc->netmask = u->netmask;
INIT_LIST_HEAD(&svc->destinations);
svc->sched_lock = RW_LOCK_UNLOCKED;
svc->stats.lock = SPIN_LOCK_UNLOCKED;
rwlock_init(&svc->sched_lock);
spin_lock_init(&svc->stats.lock);
/* Bind the scheduler */
ret = ip_vs_bind_scheduler(svc, sched);
......@@ -2357,7 +2357,7 @@ int ip_vs_control_init(void)
}
memset(&ip_vs_stats, 0, sizeof(ip_vs_stats));
ip_vs_stats.lock = SPIN_LOCK_UNLOCKED;
spin_lock_init(&ip_vs_stats.lock);
ip_vs_new_estimator(&ip_vs_stats);
/* Hook the defense timer */
......
......@@ -409,7 +409,7 @@ static int ip_vs_lblc_init_svc(struct ip_vs_service *svc)
for (i=0; i<IP_VS_LBLC_TAB_SIZE; i++) {
INIT_LIST_HEAD(&tbl->bucket[i]);
}
tbl->lock = RW_LOCK_UNLOCKED;
rwlock_init(&tbl->lock);
tbl->max_size = IP_VS_LBLC_TAB_SIZE*16;
tbl->rover = 0;
tbl->counter = 1;
......
......@@ -362,7 +362,7 @@ static inline struct ip_vs_lblcr_entry *ip_vs_lblcr_new(__u32 daddr)
/* initilize its dest set */
atomic_set(&(en->set.size), 0);
en->set.list = NULL;
en->set.lock = RW_LOCK_UNLOCKED;
rwlock_init(&en->set.lock);
return en;
}
......@@ -659,7 +659,7 @@ static int ip_vs_lblcr_init_svc(struct ip_vs_service *svc)
for (i=0; i<IP_VS_LBLCR_TAB_SIZE; i++) {
INIT_LIST_HEAD(&tbl->bucket[i]);
}
tbl->lock = RW_LOCK_UNLOCKED;
rwlock_init(&tbl->lock);
tbl->max_size = IP_VS_LBLCR_TAB_SIZE*16;
tbl->rover = 0;
tbl->counter = 1;
......
......@@ -1199,7 +1199,7 @@ int arpt_register_table(struct arpt_table *table)
/* save number of initial entries */
table->private->initial_entries = table->private->number;
table->lock = RW_LOCK_UNLOCKED;
rwlock_init(&table->lock);
list_prepend(&arpt_tables, table);
unlock:
......
......@@ -1430,7 +1430,7 @@ int ipt_register_table(struct ipt_table *table)
/* save number of initial entries */
table->private->initial_entries = table->private->number;
table->lock = RW_LOCK_UNLOCKED;
rwlock_init(&table->lock);
list_prepend(&ipt_tables, table);
unlock:
......
......@@ -214,7 +214,7 @@ static int htable_create(struct ipt_hashlimit_info *minfo)
atomic_set(&hinfo->count, 0);
atomic_set(&hinfo->use, 1);
hinfo->rnd = 0;
hinfo->lock = SPIN_LOCK_UNLOCKED;
spin_lock_init(&hinfo->lock);
hinfo->pde = create_proc_entry(minfo->name, 0, hashlimit_procdir);
if (!hinfo->pde) {
vfree(hinfo);
......
......@@ -715,7 +715,7 @@ checkentry(const char *tablename,
curr_table = vmalloc(sizeof(struct recent_ip_tables));
if(curr_table == NULL) return -ENOMEM;
curr_table->list_lock = SPIN_LOCK_UNLOCKED;
spin_lock_init(&curr_table->list_lock);
curr_table->next = NULL;
curr_table->count = 1;
curr_table->time_pos = 0;
......
......@@ -2755,7 +2755,7 @@ int __init ip_rt_init(void)
rt_hash_mask--;
for (i = 0; i <= rt_hash_mask; i++) {
rt_hash_table[i].lock = SPIN_LOCK_UNLOCKED;
spin_lock_init(&rt_hash_table[i].lock);
rt_hash_table[i].chain = NULL;
}
......
......@@ -467,7 +467,7 @@ int tcp_listen_start(struct sock *sk)
sk->sk_max_ack_backlog = 0;
sk->sk_ack_backlog = 0;
tp->accept_queue = tp->accept_queue_tail = NULL;
tp->syn_wait_lock = RW_LOCK_UNLOCKED;
rwlock_init(&tp->syn_wait_lock);
tcp_delack_init(tp);
lopt = kmalloc(sizeof(struct tcp_listen_opt), GFP_KERNEL);
......@@ -2309,7 +2309,7 @@ void __init tcp_init(void)
if (!tcp_ehash)
panic("Failed to allocate TCP established hash table\n");
for (i = 0; i < (tcp_ehash_size << 1); i++) {
tcp_ehash[i].lock = RW_LOCK_UNLOCKED;
rwlock_init(&tcp_ehash[i].lock);
INIT_HLIST_HEAD(&tcp_ehash[i].chain);
}
......@@ -2325,7 +2325,7 @@ void __init tcp_init(void)
if (!tcp_bhash)
panic("Failed to allocate TCP bind hash table\n");
for (i = 0; i < tcp_bhash_size; i++) {
tcp_bhash[i].lock = SPIN_LOCK_UNLOCKED;
spin_lock_init(&tcp_bhash[i].lock);
INIT_HLIST_HEAD(&tcp_bhash[i].chain);
}
......
......@@ -706,7 +706,7 @@ struct sock *tcp_create_openreq_child(struct sock *sk, struct open_request *req,
sock_lock_init(newsk);
bh_lock_sock(newsk);
newsk->sk_dst_lock = RW_LOCK_UNLOCKED;
rwlock_init(&newsk->sk_dst_lock);
atomic_set(&newsk->sk_rmem_alloc, 0);
skb_queue_head_init(&newsk->sk_receive_queue);
atomic_set(&newsk->sk_wmem_alloc, 0);
......@@ -719,7 +719,7 @@ struct sock *tcp_create_openreq_child(struct sock *sk, struct open_request *req,
newsk->sk_userlocks = sk->sk_userlocks & ~SOCK_BINDPORT_LOCK;
newsk->sk_backlog.head = newsk->sk_backlog.tail = NULL;
newsk->sk_send_head = NULL;
newsk->sk_callback_lock = RW_LOCK_UNLOCKED;
rwlock_init(&newsk->sk_callback_lock);
skb_queue_head_init(&newsk->sk_error_queue);
newsk->sk_write_space = sk_stream_write_space;
......
......@@ -326,7 +326,7 @@ static struct inet6_dev * ipv6_add_dev(struct net_device *dev)
if (ndev) {
memset(ndev, 0, sizeof(struct inet6_dev));
ndev->lock = RW_LOCK_UNLOCKED;
rwlock_init(&ndev->lock);
ndev->dev = dev;
memcpy(&ndev->cnf, &ipv6_devconf_dflt, sizeof(ndev->cnf));
ndev->cnf.mtu6 = dev->mtu;
......
......@@ -354,7 +354,7 @@ int ipv6_dev_ac_inc(struct net_device *dev, struct in6_addr *addr)
/* aca_tstamp should be updated upon changes */
aca->aca_cstamp = aca->aca_tstamp = jiffies;
atomic_set(&aca->aca_refcnt, 2);
aca->aca_lock = SPIN_LOCK_UNLOCKED;
spin_lock_init(&aca->aca_lock);
aca->aca_next = idev->ac_list;
idev->ac_list = aca;
......
......@@ -709,7 +709,7 @@ static void mld_add_delrec(struct inet6_dev *idev, struct ifmcaddr6 *im)
return;
memset(pmc, 0, sizeof(*pmc));
spin_lock_bh(&im->mca_lock);
pmc->mca_lock = SPIN_LOCK_UNLOCKED;
spin_lock_init(&pmc->mca_lock);
pmc->idev = im->idev;
in6_dev_hold(idev);
pmc->mca_addr = im->mca_addr;
......@@ -849,7 +849,7 @@ int ipv6_dev_mc_inc(struct net_device *dev, struct in6_addr *addr)
/* mca_stamp should be updated upon changes */
mc->mca_cstamp = mc->mca_tstamp = jiffies;
atomic_set(&mc->mca_refcnt, 2);
mc->mca_lock = SPIN_LOCK_UNLOCKED;
spin_lock_init(&mc->mca_lock);
/* initial mode is (EX, empty) */
mc->mca_sfmode = MCAST_EXCLUDE;
......@@ -2072,7 +2072,7 @@ void ipv6_mc_init_dev(struct inet6_dev *idev)
struct in6_addr maddr;
write_lock_bh(&idev->lock);
idev->mc_lock = RW_LOCK_UNLOCKED;
rwlock_init(&idev->mc_lock);
idev->mc_gq_running = 0;
init_timer(&idev->mc_gq_timer);
idev->mc_gq_timer.data = (unsigned long) idev;
......
......@@ -1510,7 +1510,7 @@ int ip6t_register_table(struct ip6t_table *table)
/* save number of initial entries */
table->private->initial_entries = table->private->number;
table->lock = RW_LOCK_UNLOCKED;
rwlock_init(&table->lock);
list_prepend(&ip6t_tables, table);
unlock:
......
......@@ -387,7 +387,7 @@ ip6_frag_create(unsigned int hash, u32 id, struct in6_addr *src, struct in6_addr
init_timer(&fq->timer);
fq->timer.function = ip6_frag_expire;
fq->timer.data = (long) fq;
fq->lock = SPIN_LOCK_UNLOCKED;
spin_lock_init(&fq->lock);
atomic_set(&fq->refcnt, 1);
return ip6_frag_intern(hash, fq);
......
......@@ -186,7 +186,7 @@ static int nr_add_node(ax25_address *nr, const char *mnemonic, ax25_address *ax2
nr_node->which = 0;
nr_node->count = 1;
atomic_set(&nr_node->refcount, 1);
nr_node->node_lock = SPIN_LOCK_UNLOCKED;
spin_lock_init(&nr_node->node_lock);
nr_node->routes[0].quality = quality;
nr_node->routes[0].obs_count = obs_count;
......
......@@ -125,7 +125,7 @@ struct sctp_association *sctp_association_init(struct sctp_association *asoc,
/* Initialize the bind addr area. */
sctp_bind_addr_init(&asoc->base.bind_addr, ep->base.bind_addr.port);
asoc->base.addr_lock = RW_LOCK_UNLOCKED;
rwlock_init(&asoc->base.addr_lock);
asoc->state = SCTP_STATE_CLOSED;
......
......@@ -113,7 +113,7 @@ struct sctp_endpoint *sctp_endpoint_init(struct sctp_endpoint *ep,
/* Initialize the bind addr area */
sctp_bind_addr_init(&ep->base.bind_addr, 0);
ep->base.addr_lock = RW_LOCK_UNLOCKED;
rwlock_init(&ep->base.addr_lock);
/* Remember who we are attached to. */
ep->base.sk = sk;
......
......@@ -1084,7 +1084,7 @@ __init int sctp_init(void)
goto err_ahash_alloc;
}
for (i = 0; i < sctp_assoc_hashsize; i++) {
sctp_assoc_hashtable[i].lock = RW_LOCK_UNLOCKED;
rwlock_init(&sctp_assoc_hashtable[i].lock);
sctp_assoc_hashtable[i].chain = NULL;
}
......@@ -1098,7 +1098,7 @@ __init int sctp_init(void)
goto err_ehash_alloc;
}
for (i = 0; i < sctp_ep_hashsize; i++) {
sctp_ep_hashtable[i].lock = RW_LOCK_UNLOCKED;
rwlock_init(&sctp_ep_hashtable[i].lock);
sctp_ep_hashtable[i].chain = NULL;
}
......@@ -1117,11 +1117,11 @@ __init int sctp_init(void)
goto err_bhash_alloc;
}
for (i = 0; i < sctp_port_hashsize; i++) {
sctp_port_hashtable[i].lock = SPIN_LOCK_UNLOCKED;
spin_lock_init(&sctp_port_hashtable[i].lock);
sctp_port_hashtable[i].chain = NULL;
}
sctp_port_alloc_lock = SPIN_LOCK_UNLOCKED;
spin_lock_init(&sctp_port_alloc_lock);
sctp_port_rover = sysctl_local_port_range[0] - 1;
printk(KERN_INFO "SCTP: Hash tables configured "
......@@ -1152,7 +1152,7 @@ __init int sctp_init(void)
/* Initialize the local address list. */
INIT_LIST_HEAD(&sctp_local_addr_list);
sctp_local_addr_lock = SPIN_LOCK_UNLOCKED;
spin_lock_init(&sctp_local_addr_lock);
/* Register notifier for inet address additions/deletions. */
register_inetaddr_notifier(&sctp_inetaddr_notifier);
......
......@@ -227,7 +227,7 @@ struct xfrm_policy *xfrm_policy_alloc(int gfp)
if (policy) {
memset(policy, 0, sizeof(struct xfrm_policy));
atomic_set(&policy->refcnt, 1);
policy->lock = RW_LOCK_UNLOCKED;
rwlock_init(&policy->lock);
init_timer(&policy->timer);
policy->timer.data = (unsigned long)policy;
policy->timer.function = xfrm_policy_timer;
......
......@@ -186,7 +186,7 @@ struct xfrm_state *xfrm_state_alloc(void)
x->lft.soft_packet_limit = XFRM_INF;
x->lft.hard_byte_limit = XFRM_INF;
x->lft.hard_packet_limit = XFRM_INF;
x->lock = SPIN_LOCK_UNLOCKED;
spin_lock_init(&x->lock);
}
return x;
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment