Commit ba8e275a authored by Eric Dumazet's avatar Eric Dumazet Committed by David S. Miller

tcp: cleanup tcp_v[46]_inbound_md5_hash()

We'll soon have to call tcp_v[46]_inbound_md5_hash() twice.
Also add const attribute to the socket, as it might be the
unlocked listener for SYN packets.
Signed-off-by: default avatarEric Dumazet <edumazet@google.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent b267cdd1
...@@ -1112,10 +1112,13 @@ int tcp_v4_md5_hash_skb(char *md5_hash, const struct tcp_md5sig_key *key, ...@@ -1112,10 +1112,13 @@ int tcp_v4_md5_hash_skb(char *md5_hash, const struct tcp_md5sig_key *key,
} }
EXPORT_SYMBOL(tcp_v4_md5_hash_skb); EXPORT_SYMBOL(tcp_v4_md5_hash_skb);
#endif
/* Called with rcu_read_lock() */ /* Called with rcu_read_lock() */
static bool tcp_v4_inbound_md5_hash(struct sock *sk, static bool tcp_v4_inbound_md5_hash(const struct sock *sk,
const struct sk_buff *skb) const struct sk_buff *skb)
{ {
#ifdef CONFIG_TCP_MD5SIG
/* /*
* This gets called for each TCP segment that arrives * This gets called for each TCP segment that arrives
* so we want to be efficient. * so we want to be efficient.
...@@ -1165,8 +1168,9 @@ static bool tcp_v4_inbound_md5_hash(struct sock *sk, ...@@ -1165,8 +1168,9 @@ static bool tcp_v4_inbound_md5_hash(struct sock *sk,
return true; return true;
} }
return false; return false;
}
#endif #endif
return false;
}
static void tcp_v4_init_req(struct request_sock *req, static void tcp_v4_init_req(struct request_sock *req,
const struct sock *sk_listener, const struct sock *sk_listener,
...@@ -1607,16 +1611,8 @@ int tcp_v4_rcv(struct sk_buff *skb) ...@@ -1607,16 +1611,8 @@ int tcp_v4_rcv(struct sk_buff *skb)
if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb)) if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb))
goto discard_and_relse; goto discard_and_relse;
#ifdef CONFIG_TCP_MD5SIG
/*
* We really want to reject the packet as early as possible
* if:
* o We're expecting an MD5'd packet and this is no MD5 tcp option
* o There is an MD5 option and we're not expecting one
*/
if (tcp_v4_inbound_md5_hash(sk, skb)) if (tcp_v4_inbound_md5_hash(sk, skb))
goto discard_and_relse; goto discard_and_relse;
#endif
nf_reset(skb); nf_reset(skb);
......
...@@ -622,8 +622,12 @@ static int tcp_v6_md5_hash_skb(char *md5_hash, ...@@ -622,8 +622,12 @@ static int tcp_v6_md5_hash_skb(char *md5_hash,
return 1; return 1;
} }
static bool tcp_v6_inbound_md5_hash(struct sock *sk, const struct sk_buff *skb) #endif
static bool tcp_v6_inbound_md5_hash(const struct sock *sk,
const struct sk_buff *skb)
{ {
#ifdef CONFIG_TCP_MD5SIG
const __u8 *hash_location = NULL; const __u8 *hash_location = NULL;
struct tcp_md5sig_key *hash_expected; struct tcp_md5sig_key *hash_expected;
const struct ipv6hdr *ip6h = ipv6_hdr(skb); const struct ipv6hdr *ip6h = ipv6_hdr(skb);
...@@ -660,9 +664,9 @@ static bool tcp_v6_inbound_md5_hash(struct sock *sk, const struct sk_buff *skb) ...@@ -660,9 +664,9 @@ static bool tcp_v6_inbound_md5_hash(struct sock *sk, const struct sk_buff *skb)
&ip6h->daddr, ntohs(th->dest)); &ip6h->daddr, ntohs(th->dest));
return true; return true;
} }
#endif
return false; return false;
} }
#endif
static void tcp_v6_init_req(struct request_sock *req, static void tcp_v6_init_req(struct request_sock *req,
const struct sock *sk_listener, const struct sock *sk_listener,
...@@ -1408,10 +1412,8 @@ static int tcp_v6_rcv(struct sk_buff *skb) ...@@ -1408,10 +1412,8 @@ static int tcp_v6_rcv(struct sk_buff *skb)
tcp_v6_fill_cb(skb, hdr, th); tcp_v6_fill_cb(skb, hdr, th);
#ifdef CONFIG_TCP_MD5SIG
if (tcp_v6_inbound_md5_hash(sk, skb)) if (tcp_v6_inbound_md5_hash(sk, skb))
goto discard_and_relse; goto discard_and_relse;
#endif
if (sk_filter(sk, skb)) if (sk_filter(sk, skb))
goto discard_and_relse; goto discard_and_relse;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment