Commit 9ea88a15 authored by Dmitry Popov's avatar Dmitry Popov Committed by David S. Miller

tcp: md5: check md5 signature without socket lock

Since a8afca03 (tcp: md5: protects md5sig_info with RCU) tcp_md5_do_lookup
doesn't require socket lock, rcu_read_lock is enough. Therefore socket lock is
no longer required for tcp_v{4,6}_inbound_md5_hash too, so we can move these
calls (wrapped with rcu_read_{,un}lock) before bh_lock_sock:
from tcp_v{4,6}_do_rcv to tcp_v{4,6}_rcv.
Signed-off-by: default avatarDmitry Popov <ixaphire@qrator.net>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 269f8cb2
...@@ -1167,7 +1167,8 @@ int tcp_v4_md5_hash_skb(char *md5_hash, struct tcp_md5sig_key *key, ...@@ -1167,7 +1167,8 @@ int tcp_v4_md5_hash_skb(char *md5_hash, struct tcp_md5sig_key *key,
} }
EXPORT_SYMBOL(tcp_v4_md5_hash_skb); EXPORT_SYMBOL(tcp_v4_md5_hash_skb);
static bool tcp_v4_inbound_md5_hash(struct sock *sk, const struct sk_buff *skb) static bool __tcp_v4_inbound_md5_hash(struct sock *sk,
const struct sk_buff *skb)
{ {
/* /*
* This gets called for each TCP segment that arrives * This gets called for each TCP segment that arrives
...@@ -1220,6 +1221,17 @@ static bool tcp_v4_inbound_md5_hash(struct sock *sk, const struct sk_buff *skb) ...@@ -1220,6 +1221,17 @@ static bool tcp_v4_inbound_md5_hash(struct sock *sk, const struct sk_buff *skb)
return false; return false;
} }
static bool tcp_v4_inbound_md5_hash(struct sock *sk, const struct sk_buff *skb)
{
bool ret;
rcu_read_lock();
ret = __tcp_v4_inbound_md5_hash(sk, skb);
rcu_read_unlock();
return ret;
}
#endif #endif
static void tcp_v4_init_req(struct request_sock *req, struct sock *sk, static void tcp_v4_init_req(struct request_sock *req, struct sock *sk,
...@@ -1432,16 +1444,6 @@ static struct sock *tcp_v4_hnd_req(struct sock *sk, struct sk_buff *skb) ...@@ -1432,16 +1444,6 @@ static struct sock *tcp_v4_hnd_req(struct sock *sk, struct sk_buff *skb)
int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb) int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
{ {
struct sock *rsk; struct sock *rsk;
#ifdef CONFIG_TCP_MD5SIG
/*
* We really want to reject the packet as early as possible
* if:
* o We're expecting an MD5'd packet and this is no MD5 tcp option
* o There is an MD5 option and we're not expecting one
*/
if (tcp_v4_inbound_md5_hash(sk, skb))
goto discard;
#endif
if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */ if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
struct dst_entry *dst = sk->sk_rx_dst; struct dst_entry *dst = sk->sk_rx_dst;
...@@ -1644,6 +1646,18 @@ int tcp_v4_rcv(struct sk_buff *skb) ...@@ -1644,6 +1646,18 @@ int tcp_v4_rcv(struct sk_buff *skb)
if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb)) if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb))
goto discard_and_relse; goto discard_and_relse;
#ifdef CONFIG_TCP_MD5SIG
/*
* We really want to reject the packet as early as possible
* if:
* o We're expecting an MD5'd packet and this is no MD5 tcp option
* o There is an MD5 option and we're not expecting one
*/
if (tcp_v4_inbound_md5_hash(sk, skb))
goto discard_and_relse;
#endif
nf_reset(skb); nf_reset(skb);
if (sk_filter(sk, skb)) if (sk_filter(sk, skb))
......
...@@ -667,7 +667,8 @@ static int tcp_v6_md5_hash_skb(char *md5_hash, struct tcp_md5sig_key *key, ...@@ -667,7 +667,8 @@ static int tcp_v6_md5_hash_skb(char *md5_hash, struct tcp_md5sig_key *key,
return 1; return 1;
} }
static int tcp_v6_inbound_md5_hash(struct sock *sk, const struct sk_buff *skb) static int __tcp_v6_inbound_md5_hash(struct sock *sk,
const struct sk_buff *skb)
{ {
const __u8 *hash_location = NULL; const __u8 *hash_location = NULL;
struct tcp_md5sig_key *hash_expected; struct tcp_md5sig_key *hash_expected;
...@@ -707,6 +708,18 @@ static int tcp_v6_inbound_md5_hash(struct sock *sk, const struct sk_buff *skb) ...@@ -707,6 +708,18 @@ static int tcp_v6_inbound_md5_hash(struct sock *sk, const struct sk_buff *skb)
} }
return 0; return 0;
} }
static int tcp_v6_inbound_md5_hash(struct sock *sk, const struct sk_buff *skb)
{
int ret;
rcu_read_lock();
ret = __tcp_v6_inbound_md5_hash(sk, skb);
rcu_read_unlock();
return ret;
}
#endif #endif
static void tcp_v6_init_req(struct request_sock *req, struct sock *sk, static void tcp_v6_init_req(struct request_sock *req, struct sock *sk,
...@@ -1247,11 +1260,6 @@ static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb) ...@@ -1247,11 +1260,6 @@ static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
if (skb->protocol == htons(ETH_P_IP)) if (skb->protocol == htons(ETH_P_IP))
return tcp_v4_do_rcv(sk, skb); return tcp_v4_do_rcv(sk, skb);
#ifdef CONFIG_TCP_MD5SIG
if (tcp_v6_inbound_md5_hash(sk, skb))
goto discard;
#endif
if (sk_filter(sk, skb)) if (sk_filter(sk, skb))
goto discard; goto discard;
...@@ -1424,6 +1432,11 @@ static int tcp_v6_rcv(struct sk_buff *skb) ...@@ -1424,6 +1432,11 @@ static int tcp_v6_rcv(struct sk_buff *skb)
if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb)) if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb))
goto discard_and_relse; goto discard_and_relse;
#ifdef CONFIG_TCP_MD5SIG
if (tcp_v6_inbound_md5_hash(sk, skb))
goto discard_and_relse;
#endif
if (sk_filter(sk, skb)) if (sk_filter(sk, skb))
goto discard_and_relse; goto discard_and_relse;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment