Commit 656edac6 authored by David S. Miller's avatar David S. Miller

Merge branch 'sctp'

Wang Weidong says:

====================
sctp: remove some macro locking wrappers

In sctp.h we can find some macro locking wrappers. As Neil point out that:

"Its because in the origional implementation of the sctp protocol, there was a
user space test harness which built the kernel module for userspace execution to
cary our some unit testing on the code.  It did so by redefining some of those
locking macros to user space friendly code.  IIRC we haven't use those unit
tests in years, and so should be removing them, not adding them to other
locations."

So I remove them.
====================
Acked-by: default avatarNeil Horman <nhorman@tuxdriver.com>
Acked-by: default avatarVlad Yasevich <vyasevich@gmail.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents d08f161a 5bc1d1b4
...@@ -713,11 +713,11 @@ static void process_sctp_notification(struct connection *con, ...@@ -713,11 +713,11 @@ static void process_sctp_notification(struct connection *con,
return; return;
/* Peel off a new sock */ /* Peel off a new sock */
sctp_lock_sock(con->sock->sk); lock_sock(con->sock->sk);
ret = sctp_do_peeloff(con->sock->sk, ret = sctp_do_peeloff(con->sock->sk,
sn->sn_assoc_change.sac_assoc_id, sn->sn_assoc_change.sac_assoc_id,
&new_con->sock); &new_con->sock);
sctp_release_sock(con->sock->sk); release_sock(con->sock->sk);
if (ret < 0) { if (ret < 0) {
log_print("Can't peel off a socket for " log_print("Can't peel off a socket for "
"connection %d to node %d: err=%d", "connection %d to node %d: err=%d",
......
...@@ -170,25 +170,6 @@ extern struct kmem_cache *sctp_bucket_cachep __read_mostly; ...@@ -170,25 +170,6 @@ extern struct kmem_cache *sctp_bucket_cachep __read_mostly;
* Section: Macros, externs, and inlines * Section: Macros, externs, and inlines
*/ */
/* spin lock wrappers. */
#define sctp_spin_lock_irqsave(lock, flags) spin_lock_irqsave(lock, flags)
#define sctp_spin_unlock_irqrestore(lock, flags) \
spin_unlock_irqrestore(lock, flags)
#define sctp_local_bh_disable() local_bh_disable()
#define sctp_local_bh_enable() local_bh_enable()
#define sctp_spin_lock(lock) spin_lock(lock)
#define sctp_spin_unlock(lock) spin_unlock(lock)
#define sctp_write_lock(lock) write_lock(lock)
#define sctp_write_unlock(lock) write_unlock(lock)
#define sctp_read_lock(lock) read_lock(lock)
#define sctp_read_unlock(lock) read_unlock(lock)
/* sock lock wrappers. */
#define sctp_lock_sock(sk) lock_sock(sk)
#define sctp_release_sock(sk) release_sock(sk)
#define sctp_bh_lock_sock(sk) bh_lock_sock(sk)
#define sctp_bh_unlock_sock(sk) bh_unlock_sock(sk)
/* SCTP SNMP MIB stats handlers */ /* SCTP SNMP MIB stats handlers */
#define SCTP_INC_STATS(net, field) SNMP_INC_STATS((net)->sctp.sctp_statistics, field) #define SCTP_INC_STATS(net, field) SNMP_INC_STATS((net)->sctp.sctp_statistics, field)
#define SCTP_INC_STATS_BH(net, field) SNMP_INC_STATS_BH((net)->sctp.sctp_statistics, field) #define SCTP_INC_STATS_BH(net, field) SNMP_INC_STATS_BH((net)->sctp.sctp_statistics, field)
...@@ -353,13 +334,13 @@ static inline void sctp_skb_list_tail(struct sk_buff_head *list, ...@@ -353,13 +334,13 @@ static inline void sctp_skb_list_tail(struct sk_buff_head *list,
{ {
unsigned long flags; unsigned long flags;
sctp_spin_lock_irqsave(&head->lock, flags); spin_lock_irqsave(&head->lock, flags);
sctp_spin_lock(&list->lock); spin_lock(&list->lock);
skb_queue_splice_tail_init(list, head); skb_queue_splice_tail_init(list, head);
sctp_spin_unlock(&list->lock); spin_unlock(&list->lock);
sctp_spin_unlock_irqrestore(&head->lock, flags); spin_unlock_irqrestore(&head->lock, flags);
} }
/** /**
......
...@@ -368,9 +368,9 @@ struct sctp_association *sctp_endpoint_lookup_assoc( ...@@ -368,9 +368,9 @@ struct sctp_association *sctp_endpoint_lookup_assoc(
{ {
struct sctp_association *asoc; struct sctp_association *asoc;
sctp_local_bh_disable(); local_bh_disable();
asoc = __sctp_endpoint_lookup_assoc(ep, paddr, transport); asoc = __sctp_endpoint_lookup_assoc(ep, paddr, transport);
sctp_local_bh_enable(); local_bh_enable();
return asoc; return asoc;
} }
......
...@@ -238,7 +238,7 @@ int sctp_rcv(struct sk_buff *skb) ...@@ -238,7 +238,7 @@ int sctp_rcv(struct sk_buff *skb)
* bottom halves on this lock, but a user may be in the lock too, * bottom halves on this lock, but a user may be in the lock too,
* so check if it is busy. * so check if it is busy.
*/ */
sctp_bh_lock_sock(sk); bh_lock_sock(sk);
if (sk != rcvr->sk) { if (sk != rcvr->sk) {
/* Our cached sk is different from the rcvr->sk. This is /* Our cached sk is different from the rcvr->sk. This is
...@@ -248,14 +248,14 @@ int sctp_rcv(struct sk_buff *skb) ...@@ -248,14 +248,14 @@ int sctp_rcv(struct sk_buff *skb)
* be doing something with the new socket. Switch our veiw * be doing something with the new socket. Switch our veiw
* of the current sk. * of the current sk.
*/ */
sctp_bh_unlock_sock(sk); bh_unlock_sock(sk);
sk = rcvr->sk; sk = rcvr->sk;
sctp_bh_lock_sock(sk); bh_lock_sock(sk);
} }
if (sock_owned_by_user(sk)) { if (sock_owned_by_user(sk)) {
if (sctp_add_backlog(sk, skb)) { if (sctp_add_backlog(sk, skb)) {
sctp_bh_unlock_sock(sk); bh_unlock_sock(sk);
sctp_chunk_free(chunk); sctp_chunk_free(chunk);
skb = NULL; /* sctp_chunk_free already freed the skb */ skb = NULL; /* sctp_chunk_free already freed the skb */
goto discard_release; goto discard_release;
...@@ -266,7 +266,7 @@ int sctp_rcv(struct sk_buff *skb) ...@@ -266,7 +266,7 @@ int sctp_rcv(struct sk_buff *skb)
sctp_inq_push(&chunk->rcvr->inqueue, chunk); sctp_inq_push(&chunk->rcvr->inqueue, chunk);
} }
sctp_bh_unlock_sock(sk); bh_unlock_sock(sk);
/* Release the asoc/ep ref we took in the lookup calls. */ /* Release the asoc/ep ref we took in the lookup calls. */
if (asoc) if (asoc)
...@@ -327,7 +327,7 @@ int sctp_backlog_rcv(struct sock *sk, struct sk_buff *skb) ...@@ -327,7 +327,7 @@ int sctp_backlog_rcv(struct sock *sk, struct sk_buff *skb)
*/ */
sk = rcvr->sk; sk = rcvr->sk;
sctp_bh_lock_sock(sk); bh_lock_sock(sk);
if (sock_owned_by_user(sk)) { if (sock_owned_by_user(sk)) {
if (sk_add_backlog(sk, skb, sk->sk_rcvbuf)) if (sk_add_backlog(sk, skb, sk->sk_rcvbuf))
...@@ -337,7 +337,7 @@ int sctp_backlog_rcv(struct sock *sk, struct sk_buff *skb) ...@@ -337,7 +337,7 @@ int sctp_backlog_rcv(struct sock *sk, struct sk_buff *skb)
} else } else
sctp_inq_push(inqueue, chunk); sctp_inq_push(inqueue, chunk);
sctp_bh_unlock_sock(sk); bh_unlock_sock(sk);
/* If the chunk was backloged again, don't drop refs */ /* If the chunk was backloged again, don't drop refs */
if (backloged) if (backloged)
...@@ -522,7 +522,7 @@ struct sock *sctp_err_lookup(struct net *net, int family, struct sk_buff *skb, ...@@ -522,7 +522,7 @@ struct sock *sctp_err_lookup(struct net *net, int family, struct sk_buff *skb,
goto out; goto out;
} }
sctp_bh_lock_sock(sk); bh_lock_sock(sk);
/* If too many ICMPs get dropped on busy /* If too many ICMPs get dropped on busy
* servers this needs to be solved differently. * servers this needs to be solved differently.
...@@ -542,7 +542,7 @@ struct sock *sctp_err_lookup(struct net *net, int family, struct sk_buff *skb, ...@@ -542,7 +542,7 @@ struct sock *sctp_err_lookup(struct net *net, int family, struct sk_buff *skb,
/* Common cleanup code for icmp/icmpv6 error handler. */ /* Common cleanup code for icmp/icmpv6 error handler. */
void sctp_err_finish(struct sock *sk, struct sctp_association *asoc) void sctp_err_finish(struct sock *sk, struct sctp_association *asoc)
{ {
sctp_bh_unlock_sock(sk); bh_unlock_sock(sk);
sctp_association_put(asoc); sctp_association_put(asoc);
} }
...@@ -718,17 +718,17 @@ static void __sctp_hash_endpoint(struct sctp_endpoint *ep) ...@@ -718,17 +718,17 @@ static void __sctp_hash_endpoint(struct sctp_endpoint *ep)
epb->hashent = sctp_ep_hashfn(net, epb->bind_addr.port); epb->hashent = sctp_ep_hashfn(net, epb->bind_addr.port);
head = &sctp_ep_hashtable[epb->hashent]; head = &sctp_ep_hashtable[epb->hashent];
sctp_write_lock(&head->lock); write_lock(&head->lock);
hlist_add_head(&epb->node, &head->chain); hlist_add_head(&epb->node, &head->chain);
sctp_write_unlock(&head->lock); write_unlock(&head->lock);
} }
/* Add an endpoint to the hash. Local BH-safe. */ /* Add an endpoint to the hash. Local BH-safe. */
void sctp_hash_endpoint(struct sctp_endpoint *ep) void sctp_hash_endpoint(struct sctp_endpoint *ep)
{ {
sctp_local_bh_disable(); local_bh_disable();
__sctp_hash_endpoint(ep); __sctp_hash_endpoint(ep);
sctp_local_bh_enable(); local_bh_enable();
} }
/* Remove endpoint from the hash table. */ /* Remove endpoint from the hash table. */
...@@ -744,17 +744,17 @@ static void __sctp_unhash_endpoint(struct sctp_endpoint *ep) ...@@ -744,17 +744,17 @@ static void __sctp_unhash_endpoint(struct sctp_endpoint *ep)
head = &sctp_ep_hashtable[epb->hashent]; head = &sctp_ep_hashtable[epb->hashent];
sctp_write_lock(&head->lock); write_lock(&head->lock);
hlist_del_init(&epb->node); hlist_del_init(&epb->node);
sctp_write_unlock(&head->lock); write_unlock(&head->lock);
} }
/* Remove endpoint from the hash. Local BH-safe. */ /* Remove endpoint from the hash. Local BH-safe. */
void sctp_unhash_endpoint(struct sctp_endpoint *ep) void sctp_unhash_endpoint(struct sctp_endpoint *ep)
{ {
sctp_local_bh_disable(); local_bh_disable();
__sctp_unhash_endpoint(ep); __sctp_unhash_endpoint(ep);
sctp_local_bh_enable(); local_bh_enable();
} }
/* Look up an endpoint. */ /* Look up an endpoint. */
...@@ -798,9 +798,9 @@ static void __sctp_hash_established(struct sctp_association *asoc) ...@@ -798,9 +798,9 @@ static void __sctp_hash_established(struct sctp_association *asoc)
head = &sctp_assoc_hashtable[epb->hashent]; head = &sctp_assoc_hashtable[epb->hashent];
sctp_write_lock(&head->lock); write_lock(&head->lock);
hlist_add_head(&epb->node, &head->chain); hlist_add_head(&epb->node, &head->chain);
sctp_write_unlock(&head->lock); write_unlock(&head->lock);
} }
/* Add an association to the hash. Local BH-safe. */ /* Add an association to the hash. Local BH-safe. */
...@@ -809,9 +809,9 @@ void sctp_hash_established(struct sctp_association *asoc) ...@@ -809,9 +809,9 @@ void sctp_hash_established(struct sctp_association *asoc)
if (asoc->temp) if (asoc->temp)
return; return;
sctp_local_bh_disable(); local_bh_disable();
__sctp_hash_established(asoc); __sctp_hash_established(asoc);
sctp_local_bh_enable(); local_bh_enable();
} }
/* Remove association from the hash table. */ /* Remove association from the hash table. */
...@@ -828,9 +828,9 @@ static void __sctp_unhash_established(struct sctp_association *asoc) ...@@ -828,9 +828,9 @@ static void __sctp_unhash_established(struct sctp_association *asoc)
head = &sctp_assoc_hashtable[epb->hashent]; head = &sctp_assoc_hashtable[epb->hashent];
sctp_write_lock(&head->lock); write_lock(&head->lock);
hlist_del_init(&epb->node); hlist_del_init(&epb->node);
sctp_write_unlock(&head->lock); write_unlock(&head->lock);
} }
/* Remove association from the hash table. Local BH-safe. */ /* Remove association from the hash table. Local BH-safe. */
...@@ -839,9 +839,9 @@ void sctp_unhash_established(struct sctp_association *asoc) ...@@ -839,9 +839,9 @@ void sctp_unhash_established(struct sctp_association *asoc)
if (asoc->temp) if (asoc->temp)
return; return;
sctp_local_bh_disable(); local_bh_disable();
__sctp_unhash_established(asoc); __sctp_unhash_established(asoc);
sctp_local_bh_enable(); local_bh_enable();
} }
/* Look up an association. */ /* Look up an association. */
...@@ -891,9 +891,9 @@ struct sctp_association *sctp_lookup_association(struct net *net, ...@@ -891,9 +891,9 @@ struct sctp_association *sctp_lookup_association(struct net *net,
{ {
struct sctp_association *asoc; struct sctp_association *asoc;
sctp_local_bh_disable(); local_bh_disable();
asoc = __sctp_lookup_association(net, laddr, paddr, transportp); asoc = __sctp_lookup_association(net, laddr, paddr, transportp);
sctp_local_bh_enable(); local_bh_enable();
return asoc; return asoc;
} }
......
...@@ -218,7 +218,7 @@ static int sctp_eps_seq_show(struct seq_file *seq, void *v) ...@@ -218,7 +218,7 @@ static int sctp_eps_seq_show(struct seq_file *seq, void *v)
return -ENOMEM; return -ENOMEM;
head = &sctp_ep_hashtable[hash]; head = &sctp_ep_hashtable[hash];
sctp_local_bh_disable(); local_bh_disable();
read_lock(&head->lock); read_lock(&head->lock);
sctp_for_each_hentry(epb, &head->chain) { sctp_for_each_hentry(epb, &head->chain) {
ep = sctp_ep(epb); ep = sctp_ep(epb);
...@@ -235,7 +235,7 @@ static int sctp_eps_seq_show(struct seq_file *seq, void *v) ...@@ -235,7 +235,7 @@ static int sctp_eps_seq_show(struct seq_file *seq, void *v)
seq_printf(seq, "\n"); seq_printf(seq, "\n");
} }
read_unlock(&head->lock); read_unlock(&head->lock);
sctp_local_bh_enable(); local_bh_enable();
return 0; return 0;
} }
...@@ -326,7 +326,7 @@ static int sctp_assocs_seq_show(struct seq_file *seq, void *v) ...@@ -326,7 +326,7 @@ static int sctp_assocs_seq_show(struct seq_file *seq, void *v)
return -ENOMEM; return -ENOMEM;
head = &sctp_assoc_hashtable[hash]; head = &sctp_assoc_hashtable[hash];
sctp_local_bh_disable(); local_bh_disable();
read_lock(&head->lock); read_lock(&head->lock);
sctp_for_each_hentry(epb, &head->chain) { sctp_for_each_hentry(epb, &head->chain) {
assoc = sctp_assoc(epb); assoc = sctp_assoc(epb);
...@@ -362,7 +362,7 @@ static int sctp_assocs_seq_show(struct seq_file *seq, void *v) ...@@ -362,7 +362,7 @@ static int sctp_assocs_seq_show(struct seq_file *seq, void *v)
seq_printf(seq, "\n"); seq_printf(seq, "\n");
} }
read_unlock(&head->lock); read_unlock(&head->lock);
sctp_local_bh_enable(); local_bh_enable();
return 0; return 0;
} }
...@@ -446,7 +446,7 @@ static int sctp_remaddr_seq_show(struct seq_file *seq, void *v) ...@@ -446,7 +446,7 @@ static int sctp_remaddr_seq_show(struct seq_file *seq, void *v)
return -ENOMEM; return -ENOMEM;
head = &sctp_assoc_hashtable[hash]; head = &sctp_assoc_hashtable[hash];
sctp_local_bh_disable(); local_bh_disable();
read_lock(&head->lock); read_lock(&head->lock);
rcu_read_lock(); rcu_read_lock();
sctp_for_each_hentry(epb, &head->chain) { sctp_for_each_hentry(epb, &head->chain) {
...@@ -505,7 +505,7 @@ static int sctp_remaddr_seq_show(struct seq_file *seq, void *v) ...@@ -505,7 +505,7 @@ static int sctp_remaddr_seq_show(struct seq_file *seq, void *v)
rcu_read_unlock(); rcu_read_unlock();
read_unlock(&head->lock); read_unlock(&head->lock);
sctp_local_bh_enable(); local_bh_enable();
return 0; return 0;
......
...@@ -634,10 +634,10 @@ static void sctp_addr_wq_timeout_handler(unsigned long arg) ...@@ -634,10 +634,10 @@ static void sctp_addr_wq_timeout_handler(unsigned long arg)
/* ignore bound-specific endpoints */ /* ignore bound-specific endpoints */
if (!sctp_is_ep_boundall(sk)) if (!sctp_is_ep_boundall(sk))
continue; continue;
sctp_bh_lock_sock(sk); bh_lock_sock(sk);
if (sctp_asconf_mgmt(sp, addrw) < 0) if (sctp_asconf_mgmt(sp, addrw) < 0)
pr_debug("%s: sctp_asconf_mgmt failed\n", __func__); pr_debug("%s: sctp_asconf_mgmt failed\n", __func__);
sctp_bh_unlock_sock(sk); bh_unlock_sock(sk);
} }
#if IS_ENABLED(CONFIG_IPV6) #if IS_ENABLED(CONFIG_IPV6)
free_next: free_next:
......
...@@ -248,7 +248,7 @@ void sctp_generate_t3_rtx_event(unsigned long peer) ...@@ -248,7 +248,7 @@ void sctp_generate_t3_rtx_event(unsigned long peer)
/* Check whether a task is in the sock. */ /* Check whether a task is in the sock. */
sctp_bh_lock_sock(asoc->base.sk); bh_lock_sock(asoc->base.sk);
if (sock_owned_by_user(asoc->base.sk)) { if (sock_owned_by_user(asoc->base.sk)) {
pr_debug("%s: sock is busy\n", __func__); pr_debug("%s: sock is busy\n", __func__);
...@@ -275,7 +275,7 @@ void sctp_generate_t3_rtx_event(unsigned long peer) ...@@ -275,7 +275,7 @@ void sctp_generate_t3_rtx_event(unsigned long peer)
asoc->base.sk->sk_err = -error; asoc->base.sk->sk_err = -error;
out_unlock: out_unlock:
sctp_bh_unlock_sock(asoc->base.sk); bh_unlock_sock(asoc->base.sk);
sctp_transport_put(transport); sctp_transport_put(transport);
} }
...@@ -288,7 +288,7 @@ static void sctp_generate_timeout_event(struct sctp_association *asoc, ...@@ -288,7 +288,7 @@ static void sctp_generate_timeout_event(struct sctp_association *asoc,
struct net *net = sock_net(asoc->base.sk); struct net *net = sock_net(asoc->base.sk);
int error = 0; int error = 0;
sctp_bh_lock_sock(asoc->base.sk); bh_lock_sock(asoc->base.sk);
if (sock_owned_by_user(asoc->base.sk)) { if (sock_owned_by_user(asoc->base.sk)) {
pr_debug("%s: sock is busy: timer %d\n", __func__, pr_debug("%s: sock is busy: timer %d\n", __func__,
timeout_type); timeout_type);
...@@ -315,7 +315,7 @@ static void sctp_generate_timeout_event(struct sctp_association *asoc, ...@@ -315,7 +315,7 @@ static void sctp_generate_timeout_event(struct sctp_association *asoc,
asoc->base.sk->sk_err = -error; asoc->base.sk->sk_err = -error;
out_unlock: out_unlock:
sctp_bh_unlock_sock(asoc->base.sk); bh_unlock_sock(asoc->base.sk);
sctp_association_put(asoc); sctp_association_put(asoc);
} }
...@@ -367,7 +367,7 @@ void sctp_generate_heartbeat_event(unsigned long data) ...@@ -367,7 +367,7 @@ void sctp_generate_heartbeat_event(unsigned long data)
struct sctp_association *asoc = transport->asoc; struct sctp_association *asoc = transport->asoc;
struct net *net = sock_net(asoc->base.sk); struct net *net = sock_net(asoc->base.sk);
sctp_bh_lock_sock(asoc->base.sk); bh_lock_sock(asoc->base.sk);
if (sock_owned_by_user(asoc->base.sk)) { if (sock_owned_by_user(asoc->base.sk)) {
pr_debug("%s: sock is busy\n", __func__); pr_debug("%s: sock is busy\n", __func__);
...@@ -392,7 +392,7 @@ void sctp_generate_heartbeat_event(unsigned long data) ...@@ -392,7 +392,7 @@ void sctp_generate_heartbeat_event(unsigned long data)
asoc->base.sk->sk_err = -error; asoc->base.sk->sk_err = -error;
out_unlock: out_unlock:
sctp_bh_unlock_sock(asoc->base.sk); bh_unlock_sock(asoc->base.sk);
sctp_transport_put(transport); sctp_transport_put(transport);
} }
...@@ -405,7 +405,7 @@ void sctp_generate_proto_unreach_event(unsigned long data) ...@@ -405,7 +405,7 @@ void sctp_generate_proto_unreach_event(unsigned long data)
struct sctp_association *asoc = transport->asoc; struct sctp_association *asoc = transport->asoc;
struct net *net = sock_net(asoc->base.sk); struct net *net = sock_net(asoc->base.sk);
sctp_bh_lock_sock(asoc->base.sk); bh_lock_sock(asoc->base.sk);
if (sock_owned_by_user(asoc->base.sk)) { if (sock_owned_by_user(asoc->base.sk)) {
pr_debug("%s: sock is busy\n", __func__); pr_debug("%s: sock is busy\n", __func__);
...@@ -427,7 +427,7 @@ void sctp_generate_proto_unreach_event(unsigned long data) ...@@ -427,7 +427,7 @@ void sctp_generate_proto_unreach_event(unsigned long data)
asoc->state, asoc->ep, asoc, transport, GFP_ATOMIC); asoc->state, asoc->ep, asoc, transport, GFP_ATOMIC);
out_unlock: out_unlock:
sctp_bh_unlock_sock(asoc->base.sk); bh_unlock_sock(asoc->base.sk);
sctp_association_put(asoc); sctp_association_put(asoc);
} }
......
...@@ -272,7 +272,7 @@ static int sctp_bind(struct sock *sk, struct sockaddr *addr, int addr_len) ...@@ -272,7 +272,7 @@ static int sctp_bind(struct sock *sk, struct sockaddr *addr, int addr_len)
{ {
int retval = 0; int retval = 0;
sctp_lock_sock(sk); lock_sock(sk);
pr_debug("%s: sk:%p, addr:%p, addr_len:%d\n", __func__, sk, pr_debug("%s: sk:%p, addr:%p, addr_len:%d\n", __func__, sk,
addr, addr_len); addr, addr_len);
...@@ -284,7 +284,7 @@ static int sctp_bind(struct sock *sk, struct sockaddr *addr, int addr_len) ...@@ -284,7 +284,7 @@ static int sctp_bind(struct sock *sk, struct sockaddr *addr, int addr_len)
else else
retval = -EINVAL; retval = -EINVAL;
sctp_release_sock(sk); release_sock(sk);
return retval; return retval;
} }
...@@ -1461,7 +1461,7 @@ static void sctp_close(struct sock *sk, long timeout) ...@@ -1461,7 +1461,7 @@ static void sctp_close(struct sock *sk, long timeout)
pr_debug("%s: sk:%p, timeout:%ld\n", __func__, sk, timeout); pr_debug("%s: sk:%p, timeout:%ld\n", __func__, sk, timeout);
sctp_lock_sock(sk); lock_sock(sk);
sk->sk_shutdown = SHUTDOWN_MASK; sk->sk_shutdown = SHUTDOWN_MASK;
sk->sk_state = SCTP_SS_CLOSING; sk->sk_state = SCTP_SS_CLOSING;
...@@ -1505,13 +1505,13 @@ static void sctp_close(struct sock *sk, long timeout) ...@@ -1505,13 +1505,13 @@ static void sctp_close(struct sock *sk, long timeout)
sctp_wait_for_close(sk, timeout); sctp_wait_for_close(sk, timeout);
/* This will run the backlog queue. */ /* This will run the backlog queue. */
sctp_release_sock(sk); release_sock(sk);
/* Supposedly, no process has access to the socket, but /* Supposedly, no process has access to the socket, but
* the net layers still may. * the net layers still may.
*/ */
sctp_local_bh_disable(); local_bh_disable();
sctp_bh_lock_sock(sk); bh_lock_sock(sk);
/* Hold the sock, since sk_common_release() will put sock_put() /* Hold the sock, since sk_common_release() will put sock_put()
* and we have just a little more cleanup. * and we have just a little more cleanup.
...@@ -1519,8 +1519,8 @@ static void sctp_close(struct sock *sk, long timeout) ...@@ -1519,8 +1519,8 @@ static void sctp_close(struct sock *sk, long timeout)
sock_hold(sk); sock_hold(sk);
sk_common_release(sk); sk_common_release(sk);
sctp_bh_unlock_sock(sk); bh_unlock_sock(sk);
sctp_local_bh_enable(); local_bh_enable();
sock_put(sk); sock_put(sk);
...@@ -1665,7 +1665,7 @@ static int sctp_sendmsg(struct kiocb *iocb, struct sock *sk, ...@@ -1665,7 +1665,7 @@ static int sctp_sendmsg(struct kiocb *iocb, struct sock *sk,
pr_debug("%s: about to look up association\n", __func__); pr_debug("%s: about to look up association\n", __func__);
sctp_lock_sock(sk); lock_sock(sk);
/* If a msg_name has been specified, assume this is to be used. */ /* If a msg_name has been specified, assume this is to be used. */
if (msg_name) { if (msg_name) {
...@@ -1949,7 +1949,7 @@ static int sctp_sendmsg(struct kiocb *iocb, struct sock *sk, ...@@ -1949,7 +1949,7 @@ static int sctp_sendmsg(struct kiocb *iocb, struct sock *sk,
sctp_association_free(asoc); sctp_association_free(asoc);
} }
out_unlock: out_unlock:
sctp_release_sock(sk); release_sock(sk);
out_nounlock: out_nounlock:
return sctp_error(sk, msg_flags, err); return sctp_error(sk, msg_flags, err);
...@@ -2035,7 +2035,7 @@ static int sctp_recvmsg(struct kiocb *iocb, struct sock *sk, ...@@ -2035,7 +2035,7 @@ static int sctp_recvmsg(struct kiocb *iocb, struct sock *sk,
"addr_len:%p)\n", __func__, sk, msg, len, noblock, flags, "addr_len:%p)\n", __func__, sk, msg, len, noblock, flags,
addr_len); addr_len);
sctp_lock_sock(sk); lock_sock(sk);
if (sctp_style(sk, TCP) && !sctp_sstate(sk, ESTABLISHED)) { if (sctp_style(sk, TCP) && !sctp_sstate(sk, ESTABLISHED)) {
err = -ENOTCONN; err = -ENOTCONN;
...@@ -2119,7 +2119,7 @@ static int sctp_recvmsg(struct kiocb *iocb, struct sock *sk, ...@@ -2119,7 +2119,7 @@ static int sctp_recvmsg(struct kiocb *iocb, struct sock *sk,
sctp_ulpevent_free(event); sctp_ulpevent_free(event);
} }
out: out:
sctp_release_sock(sk); release_sock(sk);
return err; return err;
} }
...@@ -3590,7 +3590,7 @@ static int sctp_setsockopt(struct sock *sk, int level, int optname, ...@@ -3590,7 +3590,7 @@ static int sctp_setsockopt(struct sock *sk, int level, int optname,
goto out_nounlock; goto out_nounlock;
} }
sctp_lock_sock(sk); lock_sock(sk);
switch (optname) { switch (optname) {
case SCTP_SOCKOPT_BINDX_ADD: case SCTP_SOCKOPT_BINDX_ADD:
...@@ -3708,7 +3708,7 @@ static int sctp_setsockopt(struct sock *sk, int level, int optname, ...@@ -3708,7 +3708,7 @@ static int sctp_setsockopt(struct sock *sk, int level, int optname,
break; break;
} }
sctp_release_sock(sk); release_sock(sk);
out_nounlock: out_nounlock:
return retval; return retval;
...@@ -3736,7 +3736,7 @@ static int sctp_connect(struct sock *sk, struct sockaddr *addr, ...@@ -3736,7 +3736,7 @@ static int sctp_connect(struct sock *sk, struct sockaddr *addr,
int err = 0; int err = 0;
struct sctp_af *af; struct sctp_af *af;
sctp_lock_sock(sk); lock_sock(sk);
pr_debug("%s: sk:%p, sockaddr:%p, addr_len:%d\n", __func__, sk, pr_debug("%s: sk:%p, sockaddr:%p, addr_len:%d\n", __func__, sk,
addr, addr_len); addr, addr_len);
...@@ -3752,7 +3752,7 @@ static int sctp_connect(struct sock *sk, struct sockaddr *addr, ...@@ -3752,7 +3752,7 @@ static int sctp_connect(struct sock *sk, struct sockaddr *addr,
err = __sctp_connect(sk, addr, af->sockaddr_len, NULL); err = __sctp_connect(sk, addr, af->sockaddr_len, NULL);
} }
sctp_release_sock(sk); release_sock(sk);
return err; return err;
} }
...@@ -3778,7 +3778,7 @@ static struct sock *sctp_accept(struct sock *sk, int flags, int *err) ...@@ -3778,7 +3778,7 @@ static struct sock *sctp_accept(struct sock *sk, int flags, int *err)
long timeo; long timeo;
int error = 0; int error = 0;
sctp_lock_sock(sk); lock_sock(sk);
sp = sctp_sk(sk); sp = sctp_sk(sk);
ep = sp->ep; ep = sp->ep;
...@@ -3816,7 +3816,7 @@ static struct sock *sctp_accept(struct sock *sk, int flags, int *err) ...@@ -3816,7 +3816,7 @@ static struct sock *sctp_accept(struct sock *sk, int flags, int *err)
sctp_sock_migrate(sk, newsk, asoc, SCTP_SOCKET_TCP); sctp_sock_migrate(sk, newsk, asoc, SCTP_SOCKET_TCP);
out: out:
sctp_release_sock(sk); release_sock(sk);
*err = error; *err = error;
return newsk; return newsk;
} }
...@@ -3826,7 +3826,7 @@ static int sctp_ioctl(struct sock *sk, int cmd, unsigned long arg) ...@@ -3826,7 +3826,7 @@ static int sctp_ioctl(struct sock *sk, int cmd, unsigned long arg)
{ {
int rc = -ENOTCONN; int rc = -ENOTCONN;
sctp_lock_sock(sk); lock_sock(sk);
/* /*
* SEQPACKET-style sockets in LISTENING state are valid, for * SEQPACKET-style sockets in LISTENING state are valid, for
...@@ -3856,7 +3856,7 @@ static int sctp_ioctl(struct sock *sk, int cmd, unsigned long arg) ...@@ -3856,7 +3856,7 @@ static int sctp_ioctl(struct sock *sk, int cmd, unsigned long arg)
break; break;
} }
out: out:
sctp_release_sock(sk); release_sock(sk);
return rc; return rc;
} }
...@@ -5754,7 +5754,7 @@ static int sctp_getsockopt(struct sock *sk, int level, int optname, ...@@ -5754,7 +5754,7 @@ static int sctp_getsockopt(struct sock *sk, int level, int optname,
if (get_user(len, optlen)) if (get_user(len, optlen))
return -EFAULT; return -EFAULT;
sctp_lock_sock(sk); lock_sock(sk);
switch (optname) { switch (optname) {
case SCTP_STATUS: case SCTP_STATUS:
...@@ -5878,7 +5878,7 @@ static int sctp_getsockopt(struct sock *sk, int level, int optname, ...@@ -5878,7 +5878,7 @@ static int sctp_getsockopt(struct sock *sk, int level, int optname,
break; break;
} }
sctp_release_sock(sk); release_sock(sk);
return retval; return retval;
} }
...@@ -5918,7 +5918,7 @@ static long sctp_get_port_local(struct sock *sk, union sctp_addr *addr) ...@@ -5918,7 +5918,7 @@ static long sctp_get_port_local(struct sock *sk, union sctp_addr *addr)
pr_debug("%s: begins, snum:%d\n", __func__, snum); pr_debug("%s: begins, snum:%d\n", __func__, snum);
sctp_local_bh_disable(); local_bh_disable();
if (snum == 0) { if (snum == 0) {
/* Search for an available port. */ /* Search for an available port. */
...@@ -5937,14 +5937,14 @@ static long sctp_get_port_local(struct sock *sk, union sctp_addr *addr) ...@@ -5937,14 +5937,14 @@ static long sctp_get_port_local(struct sock *sk, union sctp_addr *addr)
continue; continue;
index = sctp_phashfn(sock_net(sk), rover); index = sctp_phashfn(sock_net(sk), rover);
head = &sctp_port_hashtable[index]; head = &sctp_port_hashtable[index];
sctp_spin_lock(&head->lock); spin_lock(&head->lock);
sctp_for_each_hentry(pp, &head->chain) sctp_for_each_hentry(pp, &head->chain)
if ((pp->port == rover) && if ((pp->port == rover) &&
net_eq(sock_net(sk), pp->net)) net_eq(sock_net(sk), pp->net))
goto next; goto next;
break; break;
next: next:
sctp_spin_unlock(&head->lock); spin_unlock(&head->lock);
} while (--remaining > 0); } while (--remaining > 0);
/* Exhausted local port range during search? */ /* Exhausted local port range during search? */
...@@ -5965,7 +5965,7 @@ static long sctp_get_port_local(struct sock *sk, union sctp_addr *addr) ...@@ -5965,7 +5965,7 @@ static long sctp_get_port_local(struct sock *sk, union sctp_addr *addr)
* port iterator, pp being NULL. * port iterator, pp being NULL.
*/ */
head = &sctp_port_hashtable[sctp_phashfn(sock_net(sk), snum)]; head = &sctp_port_hashtable[sctp_phashfn(sock_net(sk), snum)];
sctp_spin_lock(&head->lock); spin_lock(&head->lock);
sctp_for_each_hentry(pp, &head->chain) { sctp_for_each_hentry(pp, &head->chain) {
if ((pp->port == snum) && net_eq(pp->net, sock_net(sk))) if ((pp->port == snum) && net_eq(pp->net, sock_net(sk)))
goto pp_found; goto pp_found;
...@@ -6049,10 +6049,10 @@ static long sctp_get_port_local(struct sock *sk, union sctp_addr *addr) ...@@ -6049,10 +6049,10 @@ static long sctp_get_port_local(struct sock *sk, union sctp_addr *addr)
ret = 0; ret = 0;
fail_unlock: fail_unlock:
sctp_spin_unlock(&head->lock); spin_unlock(&head->lock);
fail: fail:
sctp_local_bh_enable(); local_bh_enable();
return ret; return ret;
} }
...@@ -6144,7 +6144,7 @@ int sctp_inet_listen(struct socket *sock, int backlog) ...@@ -6144,7 +6144,7 @@ int sctp_inet_listen(struct socket *sock, int backlog)
if (unlikely(backlog < 0)) if (unlikely(backlog < 0))
return err; return err;
sctp_lock_sock(sk); lock_sock(sk);
/* Peeled-off sockets are not allowed to listen(). */ /* Peeled-off sockets are not allowed to listen(). */
if (sctp_style(sk, UDP_HIGH_BANDWIDTH)) if (sctp_style(sk, UDP_HIGH_BANDWIDTH))
...@@ -6177,7 +6177,7 @@ int sctp_inet_listen(struct socket *sock, int backlog) ...@@ -6177,7 +6177,7 @@ int sctp_inet_listen(struct socket *sock, int backlog)
err = 0; err = 0;
out: out:
sctp_release_sock(sk); release_sock(sk);
return err; return err;
} }
...@@ -6286,20 +6286,20 @@ static inline void __sctp_put_port(struct sock *sk) ...@@ -6286,20 +6286,20 @@ static inline void __sctp_put_port(struct sock *sk)
inet_sk(sk)->inet_num)]; inet_sk(sk)->inet_num)];
struct sctp_bind_bucket *pp; struct sctp_bind_bucket *pp;
sctp_spin_lock(&head->lock); spin_lock(&head->lock);
pp = sctp_sk(sk)->bind_hash; pp = sctp_sk(sk)->bind_hash;
__sk_del_bind_node(sk); __sk_del_bind_node(sk);
sctp_sk(sk)->bind_hash = NULL; sctp_sk(sk)->bind_hash = NULL;
inet_sk(sk)->inet_num = 0; inet_sk(sk)->inet_num = 0;
sctp_bucket_destroy(pp); sctp_bucket_destroy(pp);
sctp_spin_unlock(&head->lock); spin_unlock(&head->lock);
} }
void sctp_put_port(struct sock *sk) void sctp_put_port(struct sock *sk)
{ {
sctp_local_bh_disable(); local_bh_disable();
__sctp_put_port(sk); __sctp_put_port(sk);
sctp_local_bh_enable(); local_bh_enable();
} }
/* /*
...@@ -6474,9 +6474,9 @@ static int sctp_wait_for_packet(struct sock *sk, int *err, long *timeo_p) ...@@ -6474,9 +6474,9 @@ static int sctp_wait_for_packet(struct sock *sk, int *err, long *timeo_p)
* does not fit in the user's buffer, but this seems to be the * does not fit in the user's buffer, but this seems to be the
* only way to honor MSG_DONTWAIT realistically. * only way to honor MSG_DONTWAIT realistically.
*/ */
sctp_release_sock(sk); release_sock(sk);
*timeo_p = schedule_timeout(*timeo_p); *timeo_p = schedule_timeout(*timeo_p);
sctp_lock_sock(sk); lock_sock(sk);
ready: ready:
finish_wait(sk_sleep(sk), &wait); finish_wait(sk_sleep(sk), &wait);
...@@ -6659,10 +6659,10 @@ static int sctp_wait_for_sndbuf(struct sctp_association *asoc, long *timeo_p, ...@@ -6659,10 +6659,10 @@ static int sctp_wait_for_sndbuf(struct sctp_association *asoc, long *timeo_p,
/* Let another process have a go. Since we are going /* Let another process have a go. Since we are going
* to sleep anyway. * to sleep anyway.
*/ */
sctp_release_sock(sk); release_sock(sk);
current_timeo = schedule_timeout(current_timeo); current_timeo = schedule_timeout(current_timeo);
BUG_ON(sk != asoc->base.sk); BUG_ON(sk != asoc->base.sk);
sctp_lock_sock(sk); lock_sock(sk);
*timeo_p = current_timeo; *timeo_p = current_timeo;
} }
...@@ -6767,9 +6767,9 @@ static int sctp_wait_for_connect(struct sctp_association *asoc, long *timeo_p) ...@@ -6767,9 +6767,9 @@ static int sctp_wait_for_connect(struct sctp_association *asoc, long *timeo_p)
/* Let another process have a go. Since we are going /* Let another process have a go. Since we are going
* to sleep anyway. * to sleep anyway.
*/ */
sctp_release_sock(sk); release_sock(sk);
current_timeo = schedule_timeout(current_timeo); current_timeo = schedule_timeout(current_timeo);
sctp_lock_sock(sk); lock_sock(sk);
*timeo_p = current_timeo; *timeo_p = current_timeo;
} }
...@@ -6812,9 +6812,9 @@ static int sctp_wait_for_accept(struct sock *sk, long timeo) ...@@ -6812,9 +6812,9 @@ static int sctp_wait_for_accept(struct sock *sk, long timeo)
TASK_INTERRUPTIBLE); TASK_INTERRUPTIBLE);
if (list_empty(&ep->asocs)) { if (list_empty(&ep->asocs)) {
sctp_release_sock(sk); release_sock(sk);
timeo = schedule_timeout(timeo); timeo = schedule_timeout(timeo);
sctp_lock_sock(sk); lock_sock(sk);
} }
err = -EINVAL; err = -EINVAL;
...@@ -6847,9 +6847,9 @@ static void sctp_wait_for_close(struct sock *sk, long timeout) ...@@ -6847,9 +6847,9 @@ static void sctp_wait_for_close(struct sock *sk, long timeout)
prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
if (list_empty(&sctp_sk(sk)->ep->asocs)) if (list_empty(&sctp_sk(sk)->ep->asocs))
break; break;
sctp_release_sock(sk); release_sock(sk);
timeout = schedule_timeout(timeout); timeout = schedule_timeout(timeout);
sctp_lock_sock(sk); lock_sock(sk);
} while (!signal_pending(current) && timeout); } while (!signal_pending(current) && timeout);
finish_wait(sk_sleep(sk), &wait); finish_wait(sk_sleep(sk), &wait);
...@@ -6950,14 +6950,14 @@ static void sctp_sock_migrate(struct sock *oldsk, struct sock *newsk, ...@@ -6950,14 +6950,14 @@ static void sctp_sock_migrate(struct sock *oldsk, struct sock *newsk,
/* Hook this new socket in to the bind_hash list. */ /* Hook this new socket in to the bind_hash list. */
head = &sctp_port_hashtable[sctp_phashfn(sock_net(oldsk), head = &sctp_port_hashtable[sctp_phashfn(sock_net(oldsk),
inet_sk(oldsk)->inet_num)]; inet_sk(oldsk)->inet_num)];
sctp_local_bh_disable(); local_bh_disable();
sctp_spin_lock(&head->lock); spin_lock(&head->lock);
pp = sctp_sk(oldsk)->bind_hash; pp = sctp_sk(oldsk)->bind_hash;
sk_add_bind_node(newsk, &pp->owner); sk_add_bind_node(newsk, &pp->owner);
sctp_sk(newsk)->bind_hash = pp; sctp_sk(newsk)->bind_hash = pp;
inet_sk(newsk)->inet_num = inet_sk(oldsk)->inet_num; inet_sk(newsk)->inet_num = inet_sk(oldsk)->inet_num;
sctp_spin_unlock(&head->lock); spin_unlock(&head->lock);
sctp_local_bh_enable(); local_bh_enable();
/* Copy the bind_addr list from the original endpoint to the new /* Copy the bind_addr list from the original endpoint to the new
* endpoint so that we can handle restarts properly * endpoint so that we can handle restarts properly
...@@ -7046,7 +7046,7 @@ static void sctp_sock_migrate(struct sock *oldsk, struct sock *newsk, ...@@ -7046,7 +7046,7 @@ static void sctp_sock_migrate(struct sock *oldsk, struct sock *newsk,
newsk->sk_shutdown |= RCV_SHUTDOWN; newsk->sk_shutdown |= RCV_SHUTDOWN;
newsk->sk_state = SCTP_SS_ESTABLISHED; newsk->sk_state = SCTP_SS_ESTABLISHED;
sctp_release_sock(newsk); release_sock(newsk);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment