Commit 5bc1d1b4 authored by wangweidong's avatar wangweidong Committed by David S. Miller

sctp: remove macros sctp_bh_[un]lock_sock

Redefined bh_[un]lock_sock to sctp_bh[un]lock_sock for user
space friendly code which we haven't use in years, so removing them.
Signed-off-by: default avatarWang Weidong <wangweidong1@huawei.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 048ed4b6
...@@ -170,10 +170,6 @@ extern struct kmem_cache *sctp_bucket_cachep __read_mostly; ...@@ -170,10 +170,6 @@ extern struct kmem_cache *sctp_bucket_cachep __read_mostly;
* Section: Macros, externs, and inlines * Section: Macros, externs, and inlines
*/ */
/* sock lock wrappers. */
#define sctp_bh_lock_sock(sk) bh_lock_sock(sk)
#define sctp_bh_unlock_sock(sk) bh_unlock_sock(sk)
/* SCTP SNMP MIB stats handlers */ /* SCTP SNMP MIB stats handlers */
#define SCTP_INC_STATS(net, field) SNMP_INC_STATS((net)->sctp.sctp_statistics, field) #define SCTP_INC_STATS(net, field) SNMP_INC_STATS((net)->sctp.sctp_statistics, field)
#define SCTP_INC_STATS_BH(net, field) SNMP_INC_STATS_BH((net)->sctp.sctp_statistics, field) #define SCTP_INC_STATS_BH(net, field) SNMP_INC_STATS_BH((net)->sctp.sctp_statistics, field)
......
...@@ -238,7 +238,7 @@ int sctp_rcv(struct sk_buff *skb) ...@@ -238,7 +238,7 @@ int sctp_rcv(struct sk_buff *skb)
* bottom halves on this lock, but a user may be in the lock too, * bottom halves on this lock, but a user may be in the lock too,
* so check if it is busy. * so check if it is busy.
*/ */
sctp_bh_lock_sock(sk); bh_lock_sock(sk);
if (sk != rcvr->sk) { if (sk != rcvr->sk) {
/* Our cached sk is different from the rcvr->sk. This is /* Our cached sk is different from the rcvr->sk. This is
...@@ -248,14 +248,14 @@ int sctp_rcv(struct sk_buff *skb) ...@@ -248,14 +248,14 @@ int sctp_rcv(struct sk_buff *skb)
* be doing something with the new socket. Switch our veiw * be doing something with the new socket. Switch our veiw
* of the current sk. * of the current sk.
*/ */
sctp_bh_unlock_sock(sk); bh_unlock_sock(sk);
sk = rcvr->sk; sk = rcvr->sk;
sctp_bh_lock_sock(sk); bh_lock_sock(sk);
} }
if (sock_owned_by_user(sk)) { if (sock_owned_by_user(sk)) {
if (sctp_add_backlog(sk, skb)) { if (sctp_add_backlog(sk, skb)) {
sctp_bh_unlock_sock(sk); bh_unlock_sock(sk);
sctp_chunk_free(chunk); sctp_chunk_free(chunk);
skb = NULL; /* sctp_chunk_free already freed the skb */ skb = NULL; /* sctp_chunk_free already freed the skb */
goto discard_release; goto discard_release;
...@@ -266,7 +266,7 @@ int sctp_rcv(struct sk_buff *skb) ...@@ -266,7 +266,7 @@ int sctp_rcv(struct sk_buff *skb)
sctp_inq_push(&chunk->rcvr->inqueue, chunk); sctp_inq_push(&chunk->rcvr->inqueue, chunk);
} }
sctp_bh_unlock_sock(sk); bh_unlock_sock(sk);
/* Release the asoc/ep ref we took in the lookup calls. */ /* Release the asoc/ep ref we took in the lookup calls. */
if (asoc) if (asoc)
...@@ -327,7 +327,7 @@ int sctp_backlog_rcv(struct sock *sk, struct sk_buff *skb) ...@@ -327,7 +327,7 @@ int sctp_backlog_rcv(struct sock *sk, struct sk_buff *skb)
*/ */
sk = rcvr->sk; sk = rcvr->sk;
sctp_bh_lock_sock(sk); bh_lock_sock(sk);
if (sock_owned_by_user(sk)) { if (sock_owned_by_user(sk)) {
if (sk_add_backlog(sk, skb, sk->sk_rcvbuf)) if (sk_add_backlog(sk, skb, sk->sk_rcvbuf))
...@@ -337,7 +337,7 @@ int sctp_backlog_rcv(struct sock *sk, struct sk_buff *skb) ...@@ -337,7 +337,7 @@ int sctp_backlog_rcv(struct sock *sk, struct sk_buff *skb)
} else } else
sctp_inq_push(inqueue, chunk); sctp_inq_push(inqueue, chunk);
sctp_bh_unlock_sock(sk); bh_unlock_sock(sk);
/* If the chunk was backloged again, don't drop refs */ /* If the chunk was backloged again, don't drop refs */
if (backloged) if (backloged)
...@@ -522,7 +522,7 @@ struct sock *sctp_err_lookup(struct net *net, int family, struct sk_buff *skb, ...@@ -522,7 +522,7 @@ struct sock *sctp_err_lookup(struct net *net, int family, struct sk_buff *skb,
goto out; goto out;
} }
sctp_bh_lock_sock(sk); bh_lock_sock(sk);
/* If too many ICMPs get dropped on busy /* If too many ICMPs get dropped on busy
* servers this needs to be solved differently. * servers this needs to be solved differently.
...@@ -542,7 +542,7 @@ struct sock *sctp_err_lookup(struct net *net, int family, struct sk_buff *skb, ...@@ -542,7 +542,7 @@ struct sock *sctp_err_lookup(struct net *net, int family, struct sk_buff *skb,
/* Common cleanup code for icmp/icmpv6 error handler. */ /* Common cleanup code for icmp/icmpv6 error handler. */
void sctp_err_finish(struct sock *sk, struct sctp_association *asoc) void sctp_err_finish(struct sock *sk, struct sctp_association *asoc)
{ {
sctp_bh_unlock_sock(sk); bh_unlock_sock(sk);
sctp_association_put(asoc); sctp_association_put(asoc);
} }
......
...@@ -634,10 +634,10 @@ static void sctp_addr_wq_timeout_handler(unsigned long arg) ...@@ -634,10 +634,10 @@ static void sctp_addr_wq_timeout_handler(unsigned long arg)
/* ignore bound-specific endpoints */ /* ignore bound-specific endpoints */
if (!sctp_is_ep_boundall(sk)) if (!sctp_is_ep_boundall(sk))
continue; continue;
sctp_bh_lock_sock(sk); bh_lock_sock(sk);
if (sctp_asconf_mgmt(sp, addrw) < 0) if (sctp_asconf_mgmt(sp, addrw) < 0)
pr_debug("%s: sctp_asconf_mgmt failed\n", __func__); pr_debug("%s: sctp_asconf_mgmt failed\n", __func__);
sctp_bh_unlock_sock(sk); bh_unlock_sock(sk);
} }
#if IS_ENABLED(CONFIG_IPV6) #if IS_ENABLED(CONFIG_IPV6)
free_next: free_next:
......
...@@ -248,7 +248,7 @@ void sctp_generate_t3_rtx_event(unsigned long peer) ...@@ -248,7 +248,7 @@ void sctp_generate_t3_rtx_event(unsigned long peer)
/* Check whether a task is in the sock. */ /* Check whether a task is in the sock. */
sctp_bh_lock_sock(asoc->base.sk); bh_lock_sock(asoc->base.sk);
if (sock_owned_by_user(asoc->base.sk)) { if (sock_owned_by_user(asoc->base.sk)) {
pr_debug("%s: sock is busy\n", __func__); pr_debug("%s: sock is busy\n", __func__);
...@@ -275,7 +275,7 @@ void sctp_generate_t3_rtx_event(unsigned long peer) ...@@ -275,7 +275,7 @@ void sctp_generate_t3_rtx_event(unsigned long peer)
asoc->base.sk->sk_err = -error; asoc->base.sk->sk_err = -error;
out_unlock: out_unlock:
sctp_bh_unlock_sock(asoc->base.sk); bh_unlock_sock(asoc->base.sk);
sctp_transport_put(transport); sctp_transport_put(transport);
} }
...@@ -288,7 +288,7 @@ static void sctp_generate_timeout_event(struct sctp_association *asoc, ...@@ -288,7 +288,7 @@ static void sctp_generate_timeout_event(struct sctp_association *asoc,
struct net *net = sock_net(asoc->base.sk); struct net *net = sock_net(asoc->base.sk);
int error = 0; int error = 0;
sctp_bh_lock_sock(asoc->base.sk); bh_lock_sock(asoc->base.sk);
if (sock_owned_by_user(asoc->base.sk)) { if (sock_owned_by_user(asoc->base.sk)) {
pr_debug("%s: sock is busy: timer %d\n", __func__, pr_debug("%s: sock is busy: timer %d\n", __func__,
timeout_type); timeout_type);
...@@ -315,7 +315,7 @@ static void sctp_generate_timeout_event(struct sctp_association *asoc, ...@@ -315,7 +315,7 @@ static void sctp_generate_timeout_event(struct sctp_association *asoc,
asoc->base.sk->sk_err = -error; asoc->base.sk->sk_err = -error;
out_unlock: out_unlock:
sctp_bh_unlock_sock(asoc->base.sk); bh_unlock_sock(asoc->base.sk);
sctp_association_put(asoc); sctp_association_put(asoc);
} }
...@@ -367,7 +367,7 @@ void sctp_generate_heartbeat_event(unsigned long data) ...@@ -367,7 +367,7 @@ void sctp_generate_heartbeat_event(unsigned long data)
struct sctp_association *asoc = transport->asoc; struct sctp_association *asoc = transport->asoc;
struct net *net = sock_net(asoc->base.sk); struct net *net = sock_net(asoc->base.sk);
sctp_bh_lock_sock(asoc->base.sk); bh_lock_sock(asoc->base.sk);
if (sock_owned_by_user(asoc->base.sk)) { if (sock_owned_by_user(asoc->base.sk)) {
pr_debug("%s: sock is busy\n", __func__); pr_debug("%s: sock is busy\n", __func__);
...@@ -392,7 +392,7 @@ void sctp_generate_heartbeat_event(unsigned long data) ...@@ -392,7 +392,7 @@ void sctp_generate_heartbeat_event(unsigned long data)
asoc->base.sk->sk_err = -error; asoc->base.sk->sk_err = -error;
out_unlock: out_unlock:
sctp_bh_unlock_sock(asoc->base.sk); bh_unlock_sock(asoc->base.sk);
sctp_transport_put(transport); sctp_transport_put(transport);
} }
...@@ -405,7 +405,7 @@ void sctp_generate_proto_unreach_event(unsigned long data) ...@@ -405,7 +405,7 @@ void sctp_generate_proto_unreach_event(unsigned long data)
struct sctp_association *asoc = transport->asoc; struct sctp_association *asoc = transport->asoc;
struct net *net = sock_net(asoc->base.sk); struct net *net = sock_net(asoc->base.sk);
sctp_bh_lock_sock(asoc->base.sk); bh_lock_sock(asoc->base.sk);
if (sock_owned_by_user(asoc->base.sk)) { if (sock_owned_by_user(asoc->base.sk)) {
pr_debug("%s: sock is busy\n", __func__); pr_debug("%s: sock is busy\n", __func__);
...@@ -427,7 +427,7 @@ void sctp_generate_proto_unreach_event(unsigned long data) ...@@ -427,7 +427,7 @@ void sctp_generate_proto_unreach_event(unsigned long data)
asoc->state, asoc->ep, asoc, transport, GFP_ATOMIC); asoc->state, asoc->ep, asoc, transport, GFP_ATOMIC);
out_unlock: out_unlock:
sctp_bh_unlock_sock(asoc->base.sk); bh_unlock_sock(asoc->base.sk);
sctp_association_put(asoc); sctp_association_put(asoc);
} }
......
...@@ -1511,7 +1511,7 @@ static void sctp_close(struct sock *sk, long timeout) ...@@ -1511,7 +1511,7 @@ static void sctp_close(struct sock *sk, long timeout)
* the net layers still may. * the net layers still may.
*/ */
local_bh_disable(); local_bh_disable();
sctp_bh_lock_sock(sk); bh_lock_sock(sk);
/* Hold the sock, since sk_common_release() will put sock_put() /* Hold the sock, since sk_common_release() will put sock_put()
* and we have just a little more cleanup. * and we have just a little more cleanup.
...@@ -1519,7 +1519,7 @@ static void sctp_close(struct sock *sk, long timeout) ...@@ -1519,7 +1519,7 @@ static void sctp_close(struct sock *sk, long timeout)
sock_hold(sk); sock_hold(sk);
sk_common_release(sk); sk_common_release(sk);
sctp_bh_unlock_sock(sk); bh_unlock_sock(sk);
local_bh_enable(); local_bh_enable();
sock_put(sk); sock_put(sk);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment