Commit 9349d600 authored by Petar Penkov's avatar Petar Penkov Committed by Alexei Starovoitov

tcp: add skb-less helpers to retrieve SYN cookie

This patch allows generation of a SYN cookie before an SKB has been
allocated, as is the case at XDP.
Signed-off-by: default avatarPetar Penkov <ppenkov@google.com>
Reviewed-by: default avatarLorenz Bauer <lmb@cloudflare.com>
Signed-off-by: default avatarAlexei Starovoitov <ast@kernel.org>
parent 96511278
...@@ -414,6 +414,16 @@ void tcp_parse_options(const struct net *net, const struct sk_buff *skb, ...@@ -414,6 +414,16 @@ void tcp_parse_options(const struct net *net, const struct sk_buff *skb,
int estab, struct tcp_fastopen_cookie *foc); int estab, struct tcp_fastopen_cookie *foc);
const u8 *tcp_parse_md5sig_option(const struct tcphdr *th); const u8 *tcp_parse_md5sig_option(const struct tcphdr *th);
/*
* BPF SKB-less helpers
*/
u16 tcp_v4_get_syncookie(struct sock *sk, struct iphdr *iph,
struct tcphdr *th, u32 *cookie);
u16 tcp_v6_get_syncookie(struct sock *sk, struct ipv6hdr *iph,
struct tcphdr *th, u32 *cookie);
u16 tcp_get_syncookie_mss(struct request_sock_ops *rsk_ops,
const struct tcp_request_sock_ops *af_ops,
struct sock *sk, struct tcphdr *th);
/* /*
* TCP v4 functions exported for the inet6 API * TCP v4 functions exported for the inet6 API
*/ */
......
...@@ -3782,6 +3782,49 @@ static void smc_parse_options(const struct tcphdr *th, ...@@ -3782,6 +3782,49 @@ static void smc_parse_options(const struct tcphdr *th,
#endif #endif
} }
/* Try to parse the MSS option from the TCP header. Return 0 on failure, clamped
* value on success.
*/
static u16 tcp_parse_mss_option(const struct tcphdr *th, u16 user_mss)
{
const unsigned char *ptr = (const unsigned char *)(th + 1);
int length = (th->doff * 4) - sizeof(struct tcphdr);
u16 mss = 0;
while (length > 0) {
int opcode = *ptr++;
int opsize;
switch (opcode) {
case TCPOPT_EOL:
return mss;
case TCPOPT_NOP: /* Ref: RFC 793 section 3.1 */
length--;
continue;
default:
if (length < 2)
return mss;
opsize = *ptr++;
if (opsize < 2) /* "silly options" */
return mss;
if (opsize > length)
return mss; /* fail on partial options */
if (opcode == TCPOPT_MSS && opsize == TCPOLEN_MSS) {
u16 in_mss = get_unaligned_be16(ptr);
if (in_mss) {
if (user_mss && user_mss < in_mss)
in_mss = user_mss;
mss = in_mss;
}
}
ptr += opsize - 2;
length -= opsize;
}
}
return mss;
}
/* Look for tcp options. Normally only called on SYN and SYNACK packets. /* Look for tcp options. Normally only called on SYN and SYNACK packets.
* But, this can also be called on packets in the established flow when * But, this can also be called on packets in the established flow when
* the fast version below fails. * the fast version below fails.
...@@ -6464,6 +6507,36 @@ static void tcp_reqsk_record_syn(const struct sock *sk, ...@@ -6464,6 +6507,36 @@ static void tcp_reqsk_record_syn(const struct sock *sk,
} }
} }
/* If a SYN cookie is required and supported, returns a clamped MSS value to be
* used for SYN cookie generation.
*/
u16 tcp_get_syncookie_mss(struct request_sock_ops *rsk_ops,
const struct tcp_request_sock_ops *af_ops,
struct sock *sk, struct tcphdr *th)
{
struct tcp_sock *tp = tcp_sk(sk);
u16 mss;
if (sock_net(sk)->ipv4.sysctl_tcp_syncookies != 2 &&
!inet_csk_reqsk_queue_is_full(sk))
return 0;
if (!tcp_syn_flood_action(sk, rsk_ops->slab_name))
return 0;
if (sk_acceptq_is_full(sk)) {
NET_INC_STATS(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
return 0;
}
mss = tcp_parse_mss_option(th, tp->rx_opt.user_mss);
if (!mss)
mss = af_ops->mss_clamp;
return mss;
}
EXPORT_SYMBOL_GPL(tcp_get_syncookie_mss);
int tcp_conn_request(struct request_sock_ops *rsk_ops, int tcp_conn_request(struct request_sock_ops *rsk_ops,
const struct tcp_request_sock_ops *af_ops, const struct tcp_request_sock_ops *af_ops,
struct sock *sk, struct sk_buff *skb) struct sock *sk, struct sk_buff *skb)
......
...@@ -1515,6 +1515,21 @@ static struct sock *tcp_v4_cookie_check(struct sock *sk, struct sk_buff *skb) ...@@ -1515,6 +1515,21 @@ static struct sock *tcp_v4_cookie_check(struct sock *sk, struct sk_buff *skb)
return sk; return sk;
} }
u16 tcp_v4_get_syncookie(struct sock *sk, struct iphdr *iph,
struct tcphdr *th, u32 *cookie)
{
u16 mss = 0;
#ifdef CONFIG_SYN_COOKIES
mss = tcp_get_syncookie_mss(&tcp_request_sock_ops,
&tcp_request_sock_ipv4_ops, sk, th);
if (mss) {
*cookie = __cookie_v4_init_sequence(iph, th, &mss);
tcp_synq_overflow(sk);
}
#endif
return mss;
}
/* The socket must have it's spinlock held when we get /* The socket must have it's spinlock held when we get
* here, unless it is a TCP_LISTEN socket. * here, unless it is a TCP_LISTEN socket.
* *
......
...@@ -1063,6 +1063,21 @@ static struct sock *tcp_v6_cookie_check(struct sock *sk, struct sk_buff *skb) ...@@ -1063,6 +1063,21 @@ static struct sock *tcp_v6_cookie_check(struct sock *sk, struct sk_buff *skb)
return sk; return sk;
} }
u16 tcp_v6_get_syncookie(struct sock *sk, struct ipv6hdr *iph,
struct tcphdr *th, u32 *cookie)
{
u16 mss = 0;
#ifdef CONFIG_SYN_COOKIES
mss = tcp_get_syncookie_mss(&tcp6_request_sock_ops,
&tcp_request_sock_ipv6_ops, sk, th);
if (mss) {
*cookie = __cookie_v6_init_sequence(iph, th, &mss);
tcp_synq_overflow(sk);
}
#endif
return mss;
}
static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb) static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
{ {
if (skb->protocol == htons(ETH_P_IP)) if (skb->protocol == htons(ETH_P_IP))
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment