Commit 493007c1 authored by Jakub Kicinski's avatar Jakub Kicinski

Merge branch 'gtp'

Jonas Bonn says:

====================
There's ongoing work in this driver to provide support for IPv6, GRO,
GSO, and "collect metadata" mode operation.  In order to facilitate this
work going forward, this short series accumulates already ACK:ed patches
that are ready for the next merge window.

All of these patches should be uncontroversial at this point, including
the first one in the series that reverts a recently added change to
introduce "collect metadata" mode.  As that patch produces 'broken'
packets when common GTP headers are in place, it seems better to revert
it and rethink things a bit before inclusion.
====================

Link: https://lore.kernel.org/r/20210203070805.281321-1-jonas@norrbonn.seSigned-off-by: default avatarJakub Kicinski <kuba@kernel.org>
parents 3dd344ea 9716178a
...@@ -21,7 +21,6 @@ ...@@ -21,7 +21,6 @@
#include <linux/file.h> #include <linux/file.h>
#include <linux/gtp.h> #include <linux/gtp.h>
#include <net/dst_metadata.h>
#include <net/net_namespace.h> #include <net/net_namespace.h>
#include <net/protocol.h> #include <net/protocol.h>
#include <net/ip.h> #include <net/ip.h>
...@@ -74,9 +73,6 @@ struct gtp_dev { ...@@ -74,9 +73,6 @@ struct gtp_dev {
unsigned int hash_size; unsigned int hash_size;
struct hlist_head *tid_hash; struct hlist_head *tid_hash;
struct hlist_head *addr_hash; struct hlist_head *addr_hash;
/* Used by LWT tunnel. */
bool collect_md;
struct socket *collect_md_sock;
}; };
static unsigned int gtp_net_id __read_mostly; static unsigned int gtp_net_id __read_mostly;
...@@ -183,120 +179,38 @@ static bool gtp_check_ms(struct sk_buff *skb, struct pdp_ctx *pctx, ...@@ -183,120 +179,38 @@ static bool gtp_check_ms(struct sk_buff *skb, struct pdp_ctx *pctx,
return false; return false;
} }
static int gtp_set_tun_dst(struct gtp_dev *gtp, struct sk_buff *skb, static int gtp_rx(struct pdp_ctx *pctx, struct sk_buff *skb,
unsigned int hdrlen, u8 gtp_version, unsigned int hdrlen, unsigned int role)
__be64 tid, u8 flags)
{ {
struct metadata_dst *tun_dst; if (!gtp_check_ms(skb, pctx, hdrlen, role)) {
int opts_len = 0; netdev_dbg(pctx->dev, "No PDP ctx for this MS\n");
return 1;
if (unlikely(flags & GTP1_F_MASK))
opts_len = sizeof(struct gtpu_metadata);
tun_dst = udp_tun_rx_dst(skb, gtp->sk1u->sk_family, TUNNEL_KEY, tid, opts_len);
if (!tun_dst) {
netdev_dbg(gtp->dev, "Failed to allocate tun_dst");
goto err;
} }
netdev_dbg(gtp->dev, "attaching metadata_dst to skb, gtp ver %d hdrlen %d\n",
gtp_version, hdrlen);
if (unlikely(opts_len)) {
struct gtpu_metadata *opts;
struct gtp1_header *gtp1;
opts = ip_tunnel_info_opts(&tun_dst->u.tun_info);
gtp1 = (struct gtp1_header *)(skb->data + sizeof(struct udphdr));
opts->ver = GTP_METADATA_V1;
opts->flags = gtp1->flags;
opts->type = gtp1->type;
netdev_dbg(gtp->dev, "recved control pkt: flag %x type: %d\n",
opts->flags, opts->type);
tun_dst->u.tun_info.key.tun_flags |= TUNNEL_GTPU_OPT;
tun_dst->u.tun_info.options_len = opts_len;
skb->protocol = htons(0xffff); /* Unknown */
}
/* Get rid of the GTP + UDP headers. */ /* Get rid of the GTP + UDP headers. */
if (iptunnel_pull_header(skb, hdrlen, skb->protocol, if (iptunnel_pull_header(skb, hdrlen, skb->protocol,
!net_eq(sock_net(gtp->sk1u), dev_net(gtp->dev)))) { !net_eq(sock_net(pctx->sk), dev_net(pctx->dev)))) {
gtp->dev->stats.rx_length_errors++; pctx->dev->stats.rx_length_errors++;
goto err; goto err;
} }
skb_dst_set(skb, &tun_dst->dst); netdev_dbg(pctx->dev, "forwarding packet from GGSN to uplink\n");
return 0;
err:
return -1;
}
static int gtp_rx(struct gtp_dev *gtp, struct sk_buff *skb,
unsigned int hdrlen, u8 gtp_version, unsigned int role,
__be64 tid, u8 flags, u8 type)
{
if (ip_tunnel_collect_metadata() || gtp->collect_md) {
int err;
err = gtp_set_tun_dst(gtp, skb, hdrlen, gtp_version, tid, flags);
if (err)
goto err;
} else {
struct pdp_ctx *pctx;
if (flags & GTP1_F_MASK)
hdrlen += 4;
if (type != GTP_TPDU)
return 1;
if (gtp_version == GTP_V0)
pctx = gtp0_pdp_find(gtp, be64_to_cpu(tid));
else
pctx = gtp1_pdp_find(gtp, be64_to_cpu(tid));
if (!pctx) {
netdev_dbg(gtp->dev, "No PDP ctx to decap skb=%p\n", skb);
return 1;
}
if (!gtp_check_ms(skb, pctx, hdrlen, role)) {
netdev_dbg(pctx->dev, "No PDP ctx for this MS\n");
return 1;
}
/* Get rid of the GTP + UDP headers. */
if (iptunnel_pull_header(skb, hdrlen, skb->protocol,
!net_eq(sock_net(pctx->sk), dev_net(gtp->dev)))) {
gtp->dev->stats.rx_length_errors++;
goto err;
}
}
netdev_dbg(gtp->dev, "forwarding packet from GGSN to uplink\n");
/* Now that the UDP and the GTP header have been removed, set up the /* Now that the UDP and the GTP header have been removed, set up the
* new network header. This is required by the upper layer to * new network header. This is required by the upper layer to
* calculate the transport header. * calculate the transport header.
*/ */
skb_reset_network_header(skb); skb_reset_network_header(skb);
if (pskb_may_pull(skb, sizeof(struct iphdr))) {
struct iphdr *iph;
iph = ip_hdr(skb);
if (iph->version == 4) {
netdev_dbg(gtp->dev, "inner pkt: ipv4");
skb->protocol = htons(ETH_P_IP);
} else if (iph->version == 6) {
netdev_dbg(gtp->dev, "inner pkt: ipv6");
skb->protocol = htons(ETH_P_IPV6);
} else {
netdev_dbg(gtp->dev, "inner pkt error: Unknown type");
}
}
skb->dev = gtp->dev; skb->dev = pctx->dev;
dev_sw_netstats_rx_add(gtp->dev, skb->len);
dev_sw_netstats_rx_add(pctx->dev, skb->len);
netif_rx(skb); netif_rx(skb);
return 0; return 0;
err: err:
gtp->dev->stats.rx_dropped++; pctx->dev->stats.rx_dropped++;
return -1; return -1;
} }
...@@ -306,6 +220,7 @@ static int gtp0_udp_encap_recv(struct gtp_dev *gtp, struct sk_buff *skb) ...@@ -306,6 +220,7 @@ static int gtp0_udp_encap_recv(struct gtp_dev *gtp, struct sk_buff *skb)
unsigned int hdrlen = sizeof(struct udphdr) + unsigned int hdrlen = sizeof(struct udphdr) +
sizeof(struct gtp0_header); sizeof(struct gtp0_header);
struct gtp0_header *gtp0; struct gtp0_header *gtp0;
struct pdp_ctx *pctx;
if (!pskb_may_pull(skb, hdrlen)) if (!pskb_may_pull(skb, hdrlen))
return -1; return -1;
...@@ -315,7 +230,16 @@ static int gtp0_udp_encap_recv(struct gtp_dev *gtp, struct sk_buff *skb) ...@@ -315,7 +230,16 @@ static int gtp0_udp_encap_recv(struct gtp_dev *gtp, struct sk_buff *skb)
if ((gtp0->flags >> 5) != GTP_V0) if ((gtp0->flags >> 5) != GTP_V0)
return 1; return 1;
return gtp_rx(gtp, skb, hdrlen, GTP_V0, gtp->role, gtp0->tid, gtp0->flags, gtp0->type); if (gtp0->type != GTP_TPDU)
return 1;
pctx = gtp0_pdp_find(gtp, be64_to_cpu(gtp0->tid));
if (!pctx) {
netdev_dbg(gtp->dev, "No PDP ctx to decap skb=%p\n", skb);
return 1;
}
return gtp_rx(pctx, skb, hdrlen, gtp->role);
} }
static int gtp1u_udp_encap_recv(struct gtp_dev *gtp, struct sk_buff *skb) static int gtp1u_udp_encap_recv(struct gtp_dev *gtp, struct sk_buff *skb)
...@@ -323,30 +247,41 @@ static int gtp1u_udp_encap_recv(struct gtp_dev *gtp, struct sk_buff *skb) ...@@ -323,30 +247,41 @@ static int gtp1u_udp_encap_recv(struct gtp_dev *gtp, struct sk_buff *skb)
unsigned int hdrlen = sizeof(struct udphdr) + unsigned int hdrlen = sizeof(struct udphdr) +
sizeof(struct gtp1_header); sizeof(struct gtp1_header);
struct gtp1_header *gtp1; struct gtp1_header *gtp1;
struct pdp_ctx *pctx;
if (!pskb_may_pull(skb, hdrlen)) if (!pskb_may_pull(skb, hdrlen))
return -1; return -1;
gtp1 = (struct gtp1_header *)(skb->data + sizeof(struct udphdr)); gtp1 = (struct gtp1_header *)(skb->data + sizeof(struct udphdr));
netdev_dbg(gtp->dev, "GTPv1 recv: flags %x\n", gtp1->flags);
if ((gtp1->flags >> 5) != GTP_V1) if ((gtp1->flags >> 5) != GTP_V1)
return 1; return 1;
if (gtp1->type != GTP_TPDU)
return 1;
/* From 29.060: "This field shall be present if and only if any one or /* From 29.060: "This field shall be present if and only if any one or
* more of the S, PN and E flags are set.". * more of the S, PN and E flags are set.".
* *
* If any of the bit is set, then the remaining ones also have to be * If any of the bit is set, then the remaining ones also have to be
* set. * set.
*/ */
if (gtp1->flags & GTP1_F_MASK)
hdrlen += 4;
/* Make sure the header is larger enough, including extensions. */ /* Make sure the header is larger enough, including extensions. */
if (!pskb_may_pull(skb, hdrlen)) if (!pskb_may_pull(skb, hdrlen))
return -1; return -1;
gtp1 = (struct gtp1_header *)(skb->data + sizeof(struct udphdr)); gtp1 = (struct gtp1_header *)(skb->data + sizeof(struct udphdr));
return gtp_rx(gtp, skb, hdrlen, GTP_V1, gtp->role, pctx = gtp1_pdp_find(gtp, ntohl(gtp1->tid));
key32_to_tunnel_id(gtp1->tid), gtp1->flags, gtp1->type); if (!pctx) {
netdev_dbg(gtp->dev, "No PDP ctx to decap skb=%p\n", skb);
return 1;
}
return gtp_rx(pctx, skb, hdrlen, gtp->role);
} }
static void __gtp_encap_destroy(struct sock *sk) static void __gtp_encap_destroy(struct sock *sk)
...@@ -386,11 +321,6 @@ static void gtp_encap_disable(struct gtp_dev *gtp) ...@@ -386,11 +321,6 @@ static void gtp_encap_disable(struct gtp_dev *gtp)
{ {
gtp_encap_disable_sock(gtp->sk0); gtp_encap_disable_sock(gtp->sk0);
gtp_encap_disable_sock(gtp->sk1u); gtp_encap_disable_sock(gtp->sk1u);
if (gtp->collect_md_sock) {
udp_tunnel_sock_release(gtp->collect_md_sock);
gtp->collect_md_sock = NULL;
netdev_dbg(gtp->dev, "GTP socket released.\n");
}
} }
/* UDP encapsulation receive handler. See net/ipv4/udp.c. /* UDP encapsulation receive handler. See net/ipv4/udp.c.
...@@ -405,8 +335,7 @@ static int gtp_encap_recv(struct sock *sk, struct sk_buff *skb) ...@@ -405,8 +335,7 @@ static int gtp_encap_recv(struct sock *sk, struct sk_buff *skb)
if (!gtp) if (!gtp)
return 1; return 1;
netdev_dbg(gtp->dev, "encap_recv sk=%p type %d\n", netdev_dbg(gtp->dev, "encap_recv sk=%p\n", sk);
sk, udp_sk(sk)->encap_type);
switch (udp_sk(sk)->encap_type) { switch (udp_sk(sk)->encap_type) {
case UDP_ENCAP_GTP0: case UDP_ENCAP_GTP0:
...@@ -460,13 +389,12 @@ static void gtp_dev_uninit(struct net_device *dev) ...@@ -460,13 +389,12 @@ static void gtp_dev_uninit(struct net_device *dev)
static struct rtable *ip4_route_output_gtp(struct flowi4 *fl4, static struct rtable *ip4_route_output_gtp(struct flowi4 *fl4,
const struct sock *sk, const struct sock *sk,
__be32 daddr, __be32 daddr)
__be32 saddr)
{ {
memset(fl4, 0, sizeof(*fl4)); memset(fl4, 0, sizeof(*fl4));
fl4->flowi4_oif = sk->sk_bound_dev_if; fl4->flowi4_oif = sk->sk_bound_dev_if;
fl4->daddr = daddr; fl4->daddr = daddr;
fl4->saddr = saddr; fl4->saddr = inet_sk(sk)->inet_saddr;
fl4->flowi4_tos = RT_CONN_FLAGS(sk); fl4->flowi4_tos = RT_CONN_FLAGS(sk);
fl4->flowi4_proto = sk->sk_protocol; fl4->flowi4_proto = sk->sk_protocol;
...@@ -490,7 +418,7 @@ static inline void gtp0_push_header(struct sk_buff *skb, struct pdp_ctx *pctx) ...@@ -490,7 +418,7 @@ static inline void gtp0_push_header(struct sk_buff *skb, struct pdp_ctx *pctx)
gtp0->tid = cpu_to_be64(pctx->u.v0.tid); gtp0->tid = cpu_to_be64(pctx->u.v0.tid);
} }
static inline void gtp1_push_header(struct sk_buff *skb, __be32 tid) static inline void gtp1_push_header(struct sk_buff *skb, struct pdp_ctx *pctx)
{ {
int payload_len = skb->len; int payload_len = skb->len;
struct gtp1_header *gtp1; struct gtp1_header *gtp1;
...@@ -506,63 +434,46 @@ static inline void gtp1_push_header(struct sk_buff *skb, __be32 tid) ...@@ -506,63 +434,46 @@ static inline void gtp1_push_header(struct sk_buff *skb, __be32 tid)
gtp1->flags = 0x30; /* v1, GTP-non-prime. */ gtp1->flags = 0x30; /* v1, GTP-non-prime. */
gtp1->type = GTP_TPDU; gtp1->type = GTP_TPDU;
gtp1->length = htons(payload_len); gtp1->length = htons(payload_len);
gtp1->tid = tid; gtp1->tid = htonl(pctx->u.v1.o_tei);
/* TODO: Suppport for extension header, sequence number and N-PDU. /* TODO: Suppport for extension header, sequence number and N-PDU.
* Update the length field if any of them is available. * Update the length field if any of them is available.
*/ */
} }
static inline int gtp1_push_control_header(struct sk_buff *skb, struct gtp_pktinfo {
__be32 tid, struct sock *sk;
struct gtpu_metadata *opts, struct iphdr *iph;
struct net_device *dev) struct flowi4 fl4;
{ struct rtable *rt;
struct gtp1_header *gtp1c; struct pdp_ctx *pctx;
int payload_len; struct net_device *dev;
__be16 gtph_port;
if (opts->ver != GTP_METADATA_V1) };
return -ENOENT;
if (opts->type == 0xFE) { static void gtp_push_header(struct sk_buff *skb, struct gtp_pktinfo *pktinfo)
/* for end marker ignore skb data. */ {
netdev_dbg(dev, "xmit pkt with null data"); switch (pktinfo->pctx->gtp_version) {
pskb_trim(skb, 0); case GTP_V0:
pktinfo->gtph_port = htons(GTP0_PORT);
gtp0_push_header(skb, pktinfo->pctx);
break;
case GTP_V1:
pktinfo->gtph_port = htons(GTP1U_PORT);
gtp1_push_header(skb, pktinfo->pctx);
break;
} }
if (skb_cow_head(skb, sizeof(*gtp1c)) < 0)
return -ENOMEM;
payload_len = skb->len;
gtp1c = skb_push(skb, sizeof(*gtp1c));
gtp1c->flags = opts->flags;
gtp1c->type = opts->type;
gtp1c->length = htons(payload_len);
gtp1c->tid = tid;
netdev_dbg(dev, "xmit control pkt: ver %d flags %x type %x pkt len %d tid %x",
opts->ver, opts->flags, opts->type, skb->len, tid);
return 0;
} }
struct gtp_pktinfo {
struct sock *sk;
__u8 tos;
struct flowi4 fl4;
struct rtable *rt;
struct net_device *dev;
__be16 gtph_port;
};
static inline void gtp_set_pktinfo_ipv4(struct gtp_pktinfo *pktinfo, static inline void gtp_set_pktinfo_ipv4(struct gtp_pktinfo *pktinfo,
struct sock *sk, struct sock *sk, struct iphdr *iph,
__u8 tos, struct pdp_ctx *pctx, struct rtable *rt,
struct rtable *rt,
struct flowi4 *fl4, struct flowi4 *fl4,
struct net_device *dev) struct net_device *dev)
{ {
pktinfo->sk = sk; pktinfo->sk = sk;
pktinfo->tos = tos; pktinfo->iph = iph;
pktinfo->pctx = pctx;
pktinfo->rt = rt; pktinfo->rt = rt;
pktinfo->fl4 = *fl4; pktinfo->fl4 = *fl4;
pktinfo->dev = dev; pktinfo->dev = dev;
...@@ -572,110 +483,50 @@ static int gtp_build_skb_ip4(struct sk_buff *skb, struct net_device *dev, ...@@ -572,110 +483,50 @@ static int gtp_build_skb_ip4(struct sk_buff *skb, struct net_device *dev,
struct gtp_pktinfo *pktinfo) struct gtp_pktinfo *pktinfo)
{ {
struct gtp_dev *gtp = netdev_priv(dev); struct gtp_dev *gtp = netdev_priv(dev);
struct gtpu_metadata *opts = NULL;
struct sock *sk = NULL;
struct pdp_ctx *pctx; struct pdp_ctx *pctx;
struct rtable *rt; struct rtable *rt;
struct flowi4 fl4; struct flowi4 fl4;
u8 gtp_version; struct iphdr *iph;
__be16 df = 0; __be16 df;
__be32 tun_id;
__be32 daddr;
__be32 saddr;
__u8 tos;
int mtu; int mtu;
if (gtp->collect_md) { /* Read the IP destination address and resolve the PDP context.
/* LWT GTP1U encap */ * Prepend PDP header with TEI/TID from PDP ctx.
struct ip_tunnel_info *info = NULL; */
iph = ip_hdr(skb);
info = skb_tunnel_info(skb); if (gtp->role == GTP_ROLE_SGSN)
if (!info) { pctx = ipv4_pdp_find(gtp, iph->saddr);
netdev_dbg(dev, "missing tunnel info"); else
return -ENOENT; pctx = ipv4_pdp_find(gtp, iph->daddr);
}
if (info->key.tp_dst && ntohs(info->key.tp_dst) != GTP1U_PORT) {
netdev_dbg(dev, "unexpected GTP dst port: %d", ntohs(info->key.tp_dst));
return -EOPNOTSUPP;
}
pctx = NULL;
gtp_version = GTP_V1;
tun_id = tunnel_id_to_key32(info->key.tun_id);
daddr = info->key.u.ipv4.dst;
saddr = info->key.u.ipv4.src;
sk = gtp->sk1u;
if (!sk) {
netdev_dbg(dev, "missing tunnel sock");
return -EOPNOTSUPP;
}
tos = info->key.tos;
if (info->key.tun_flags & TUNNEL_DONT_FRAGMENT)
df = htons(IP_DF);
if (info->options_len != 0) {
if (info->key.tun_flags & TUNNEL_GTPU_OPT) {
opts = ip_tunnel_info_opts(info);
} else {
netdev_dbg(dev, "missing tunnel metadata for control pkt");
return -EOPNOTSUPP;
}
}
netdev_dbg(dev, "flow-based GTP1U encap: tunnel id %d\n",
be32_to_cpu(tun_id));
} else {
struct iphdr *iph;
if (ntohs(skb->protocol) != ETH_P_IP)
return -EOPNOTSUPP;
iph = ip_hdr(skb);
/* Read the IP destination address and resolve the PDP context.
* Prepend PDP header with TEI/TID from PDP ctx.
*/
if (gtp->role == GTP_ROLE_SGSN)
pctx = ipv4_pdp_find(gtp, iph->saddr);
else
pctx = ipv4_pdp_find(gtp, iph->daddr);
if (!pctx) { if (!pctx) {
netdev_dbg(dev, "no PDP ctx found for %pI4, skip\n", netdev_dbg(dev, "no PDP ctx found for %pI4, skip\n",
&iph->daddr); &iph->daddr);
return -ENOENT; return -ENOENT;
}
sk = pctx->sk;
netdev_dbg(dev, "found PDP context %p\n", pctx);
gtp_version = pctx->gtp_version;
tun_id = htonl(pctx->u.v1.o_tei);
daddr = pctx->peer_addr_ip4.s_addr;
saddr = inet_sk(sk)->inet_saddr;
tos = iph->tos;
df = iph->frag_off;
netdev_dbg(dev, "gtp -> IP src: %pI4 dst: %pI4\n",
&iph->saddr, &iph->daddr);
} }
netdev_dbg(dev, "found PDP context %p\n", pctx);
rt = ip4_route_output_gtp(&fl4, sk, daddr, saddr); rt = ip4_route_output_gtp(&fl4, pctx->sk, pctx->peer_addr_ip4.s_addr);
if (IS_ERR(rt)) { if (IS_ERR(rt)) {
netdev_dbg(dev, "no route to SSGN %pI4\n", &daddr); netdev_dbg(dev, "no route to SSGN %pI4\n",
&pctx->peer_addr_ip4.s_addr);
dev->stats.tx_carrier_errors++; dev->stats.tx_carrier_errors++;
goto err; goto err;
} }
if (rt->dst.dev == dev) { if (rt->dst.dev == dev) {
netdev_dbg(dev, "circular route to SSGN %pI4\n", &daddr); netdev_dbg(dev, "circular route to SSGN %pI4\n",
&pctx->peer_addr_ip4.s_addr);
dev->stats.collisions++; dev->stats.collisions++;
goto err_rt; goto err_rt;
} }
skb_dst_drop(skb);
/* This is similar to tnl_update_pmtu(). */ /* This is similar to tnl_update_pmtu(). */
df = iph->frag_off;
if (df) { if (df) {
mtu = dst_mtu(&rt->dst) - dev->hard_header_len - mtu = dst_mtu(&rt->dst) - dev->hard_header_len -
sizeof(struct iphdr) - sizeof(struct udphdr); sizeof(struct iphdr) - sizeof(struct udphdr);
switch (gtp_version) { switch (pctx->gtp_version) {
case GTP_V0: case GTP_V0:
mtu -= sizeof(struct gtp0_header); mtu -= sizeof(struct gtp0_header);
break; break;
...@@ -689,38 +540,17 @@ static int gtp_build_skb_ip4(struct sk_buff *skb, struct net_device *dev, ...@@ -689,38 +540,17 @@ static int gtp_build_skb_ip4(struct sk_buff *skb, struct net_device *dev,
rt->dst.ops->update_pmtu(&rt->dst, NULL, skb, mtu, false); rt->dst.ops->update_pmtu(&rt->dst, NULL, skb, mtu, false);
if (!skb_is_gso(skb) && (df & htons(IP_DF)) && mtu < skb->len) { if (!skb_is_gso(skb) && (iph->frag_off & htons(IP_DF)) &&
netdev_dbg(dev, "packet too big, fragmentation needed"); mtu < ntohs(iph->tot_len)) {
netdev_dbg(dev, "packet too big, fragmentation needed\n");
memset(IPCB(skb), 0, sizeof(*IPCB(skb))); memset(IPCB(skb), 0, sizeof(*IPCB(skb)));
icmp_ndo_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED, icmp_ndo_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED,
htonl(mtu)); htonl(mtu));
goto err_rt; goto err_rt;
} }
gtp_set_pktinfo_ipv4(pktinfo, sk, tos, rt, &fl4, dev); gtp_set_pktinfo_ipv4(pktinfo, pctx->sk, iph, pctx, rt, &fl4, dev);
gtp_push_header(skb, pktinfo);
if (unlikely(opts)) {
int err;
pktinfo->gtph_port = htons(GTP1U_PORT);
err = gtp1_push_control_header(skb, tun_id, opts, dev);
if (err) {
netdev_info(dev, "cntr pkt error %d", err);
goto err_rt;
}
return 0;
}
switch (gtp_version) {
case GTP_V0:
pktinfo->gtph_port = htons(GTP0_PORT);
gtp0_push_header(skb, pctx);
break;
case GTP_V1:
pktinfo->gtph_port = htons(GTP1U_PORT);
gtp1_push_header(skb, tun_id);
break;
}
return 0; return 0;
err_rt: err_rt:
...@@ -731,6 +561,7 @@ static int gtp_build_skb_ip4(struct sk_buff *skb, struct net_device *dev, ...@@ -731,6 +561,7 @@ static int gtp_build_skb_ip4(struct sk_buff *skb, struct net_device *dev,
static netdev_tx_t gtp_dev_xmit(struct sk_buff *skb, struct net_device *dev) static netdev_tx_t gtp_dev_xmit(struct sk_buff *skb, struct net_device *dev)
{ {
unsigned int proto = ntohs(skb->protocol);
struct gtp_pktinfo pktinfo; struct gtp_pktinfo pktinfo;
int err; int err;
...@@ -742,22 +573,34 @@ static netdev_tx_t gtp_dev_xmit(struct sk_buff *skb, struct net_device *dev) ...@@ -742,22 +573,34 @@ static netdev_tx_t gtp_dev_xmit(struct sk_buff *skb, struct net_device *dev)
/* PDP context lookups in gtp_build_skb_*() need rcu read-side lock. */ /* PDP context lookups in gtp_build_skb_*() need rcu read-side lock. */
rcu_read_lock(); rcu_read_lock();
err = gtp_build_skb_ip4(skb, dev, &pktinfo); switch (proto) {
case ETH_P_IP:
err = gtp_build_skb_ip4(skb, dev, &pktinfo);
break;
default:
err = -EOPNOTSUPP;
break;
}
rcu_read_unlock(); rcu_read_unlock();
if (err < 0) if (err < 0)
goto tx_err; goto tx_err;
udp_tunnel_xmit_skb(pktinfo.rt, pktinfo.sk, skb, switch (proto) {
pktinfo.fl4.saddr, case ETH_P_IP:
pktinfo.fl4.daddr, netdev_dbg(pktinfo.dev, "gtp -> IP src: %pI4 dst: %pI4\n",
pktinfo.tos, &pktinfo.iph->saddr, &pktinfo.iph->daddr);
ip4_dst_hoplimit(&pktinfo.rt->dst), udp_tunnel_xmit_skb(pktinfo.rt, pktinfo.sk, skb,
0, pktinfo.fl4.saddr, pktinfo.fl4.daddr,
pktinfo.gtph_port, pktinfo.iph->tos,
pktinfo.gtph_port, ip4_dst_hoplimit(&pktinfo.rt->dst),
true, 0,
false); pktinfo.gtph_port, pktinfo.gtph_port,
!net_eq(sock_net(pktinfo.pctx->sk),
dev_net(dev)),
false);
break;
}
return NETDEV_TX_OK; return NETDEV_TX_OK;
tx_err: tx_err:
...@@ -773,26 +616,23 @@ static const struct net_device_ops gtp_netdev_ops = { ...@@ -773,26 +616,23 @@ static const struct net_device_ops gtp_netdev_ops = {
.ndo_get_stats64 = dev_get_tstats64, .ndo_get_stats64 = dev_get_tstats64,
}; };
static struct gtp_dev *gtp_find_flow_based_dev(struct net *net) static const struct device_type gtp_type = {
{ .name = "gtp",
struct gtp_net *gn = net_generic(net, gtp_net_id); };
struct gtp_dev *gtp;
list_for_each_entry(gtp, &gn->gtp_dev_list, list) {
if (gtp->collect_md)
return gtp;
}
return NULL;
}
static void gtp_link_setup(struct net_device *dev) static void gtp_link_setup(struct net_device *dev)
{ {
unsigned int max_gtp_header_len = sizeof(struct iphdr) +
sizeof(struct udphdr) +
sizeof(struct gtp0_header);
dev->netdev_ops = &gtp_netdev_ops; dev->netdev_ops = &gtp_netdev_ops;
dev->needs_free_netdev = true; dev->needs_free_netdev = true;
SET_NETDEV_DEVTYPE(dev, &gtp_type);
dev->hard_header_len = 0; dev->hard_header_len = 0;
dev->addr_len = 0; dev->addr_len = 0;
dev->mtu = ETH_DATA_LEN - max_gtp_header_len;
/* Zero header length. */ /* Zero header length. */
dev->type = ARPHRD_NONE; dev->type = ARPHRD_NONE;
...@@ -802,15 +642,11 @@ static void gtp_link_setup(struct net_device *dev) ...@@ -802,15 +642,11 @@ static void gtp_link_setup(struct net_device *dev)
dev->features |= NETIF_F_LLTX; dev->features |= NETIF_F_LLTX;
netif_keep_dst(dev); netif_keep_dst(dev);
/* Assume largest header, ie. GTPv0. */ dev->needed_headroom = LL_MAX_HEADER + max_gtp_header_len;
dev->needed_headroom = LL_MAX_HEADER +
sizeof(struct iphdr) +
sizeof(struct udphdr) +
sizeof(struct gtp0_header);
} }
static int gtp_hashtable_new(struct gtp_dev *gtp, int hsize); static int gtp_hashtable_new(struct gtp_dev *gtp, int hsize);
static int gtp_encap_enable(struct gtp_dev *gtp, struct net_device *dev, struct nlattr *data[]); static int gtp_encap_enable(struct gtp_dev *gtp, struct nlattr *data[]);
static void gtp_destructor(struct net_device *dev) static void gtp_destructor(struct net_device *dev)
{ {
...@@ -828,24 +664,11 @@ static int gtp_newlink(struct net *src_net, struct net_device *dev, ...@@ -828,24 +664,11 @@ static int gtp_newlink(struct net *src_net, struct net_device *dev,
struct gtp_net *gn; struct gtp_net *gn;
int hashsize, err; int hashsize, err;
if (!data[IFLA_GTP_FD0] && !data[IFLA_GTP_FD1] && if (!data[IFLA_GTP_FD0] && !data[IFLA_GTP_FD1])
!data[IFLA_GTP_COLLECT_METADATA])
return -EINVAL; return -EINVAL;
gtp = netdev_priv(dev); gtp = netdev_priv(dev);
if (data[IFLA_GTP_COLLECT_METADATA]) {
if (data[IFLA_GTP_FD0]) {
netdev_dbg(dev, "LWT device does not support setting v0 socket");
return -EINVAL;
}
if (gtp_find_flow_based_dev(src_net)) {
netdev_dbg(dev, "LWT device already exist");
return -EBUSY;
}
gtp->collect_md = true;
}
if (!data[IFLA_GTP_PDP_HASHSIZE]) { if (!data[IFLA_GTP_PDP_HASHSIZE]) {
hashsize = 1024; hashsize = 1024;
} else { } else {
...@@ -858,7 +681,7 @@ static int gtp_newlink(struct net *src_net, struct net_device *dev, ...@@ -858,7 +681,7 @@ static int gtp_newlink(struct net *src_net, struct net_device *dev,
if (err < 0) if (err < 0)
return err; return err;
err = gtp_encap_enable(gtp, dev, data); err = gtp_encap_enable(gtp, data);
if (err < 0) if (err < 0)
goto out_hashtable; goto out_hashtable;
...@@ -872,7 +695,7 @@ static int gtp_newlink(struct net *src_net, struct net_device *dev, ...@@ -872,7 +695,7 @@ static int gtp_newlink(struct net *src_net, struct net_device *dev,
list_add_rcu(&gtp->list, &gn->gtp_dev_list); list_add_rcu(&gtp->list, &gn->gtp_dev_list);
dev->priv_destructor = gtp_destructor; dev->priv_destructor = gtp_destructor;
netdev_dbg(dev, "registered new GTP interface %s\n", dev->name); netdev_dbg(dev, "registered new GTP interface\n");
return 0; return 0;
...@@ -903,7 +726,6 @@ static const struct nla_policy gtp_policy[IFLA_GTP_MAX + 1] = { ...@@ -903,7 +726,6 @@ static const struct nla_policy gtp_policy[IFLA_GTP_MAX + 1] = {
[IFLA_GTP_FD1] = { .type = NLA_U32 }, [IFLA_GTP_FD1] = { .type = NLA_U32 },
[IFLA_GTP_PDP_HASHSIZE] = { .type = NLA_U32 }, [IFLA_GTP_PDP_HASHSIZE] = { .type = NLA_U32 },
[IFLA_GTP_ROLE] = { .type = NLA_U32 }, [IFLA_GTP_ROLE] = { .type = NLA_U32 },
[IFLA_GTP_COLLECT_METADATA] = { .type = NLA_FLAG },
}; };
static int gtp_validate(struct nlattr *tb[], struct nlattr *data[], static int gtp_validate(struct nlattr *tb[], struct nlattr *data[],
...@@ -917,7 +739,8 @@ static int gtp_validate(struct nlattr *tb[], struct nlattr *data[], ...@@ -917,7 +739,8 @@ static int gtp_validate(struct nlattr *tb[], struct nlattr *data[],
static size_t gtp_get_size(const struct net_device *dev) static size_t gtp_get_size(const struct net_device *dev)
{ {
return nla_total_size(sizeof(__u32)); /* IFLA_GTP_PDP_HASHSIZE */ return nla_total_size(sizeof(__u32)) + /* IFLA_GTP_PDP_HASHSIZE */
nla_total_size(sizeof(__u32)); /* IFLA_GTP_ROLE */
} }
static int gtp_fill_info(struct sk_buff *skb, const struct net_device *dev) static int gtp_fill_info(struct sk_buff *skb, const struct net_device *dev)
...@@ -926,8 +749,7 @@ static int gtp_fill_info(struct sk_buff *skb, const struct net_device *dev) ...@@ -926,8 +749,7 @@ static int gtp_fill_info(struct sk_buff *skb, const struct net_device *dev)
if (nla_put_u32(skb, IFLA_GTP_PDP_HASHSIZE, gtp->hash_size)) if (nla_put_u32(skb, IFLA_GTP_PDP_HASHSIZE, gtp->hash_size))
goto nla_put_failure; goto nla_put_failure;
if (nla_put_u32(skb, IFLA_GTP_ROLE, gtp->role))
if (gtp->collect_md && nla_put_flag(skb, IFLA_GTP_COLLECT_METADATA))
goto nla_put_failure; goto nla_put_failure;
return 0; return 0;
...@@ -975,24 +797,35 @@ static int gtp_hashtable_new(struct gtp_dev *gtp, int hsize) ...@@ -975,24 +797,35 @@ static int gtp_hashtable_new(struct gtp_dev *gtp, int hsize)
return -ENOMEM; return -ENOMEM;
} }
static int __gtp_encap_enable_socket(struct socket *sock, int type, static struct sock *gtp_encap_enable_socket(int fd, int type,
struct gtp_dev *gtp) struct gtp_dev *gtp)
{ {
struct udp_tunnel_sock_cfg tuncfg = {NULL}; struct udp_tunnel_sock_cfg tuncfg = {NULL};
struct socket *sock;
struct sock *sk; struct sock *sk;
int err;
pr_debug("enable gtp on %d, %d\n", fd, type);
sock = sockfd_lookup(fd, &err);
if (!sock) {
pr_debug("gtp socket fd=%d not found\n", fd);
return NULL;
}
sk = sock->sk; sk = sock->sk;
if (sk->sk_protocol != IPPROTO_UDP || if (sk->sk_protocol != IPPROTO_UDP ||
sk->sk_type != SOCK_DGRAM || sk->sk_type != SOCK_DGRAM ||
(sk->sk_family != AF_INET && sk->sk_family != AF_INET6)) { (sk->sk_family != AF_INET && sk->sk_family != AF_INET6)) {
pr_debug("socket not UDP\n"); pr_debug("socket fd=%d not UDP\n", fd);
return -EINVAL; sk = ERR_PTR(-EINVAL);
goto out_sock;
} }
lock_sock(sk); lock_sock(sk);
if (sk->sk_user_data) { if (sk->sk_user_data) {
release_sock(sock->sk); sk = ERR_PTR(-EBUSY);
return -EBUSY; goto out_rel_sock;
} }
sock_hold(sk); sock_hold(sk);
...@@ -1003,58 +836,15 @@ static int __gtp_encap_enable_socket(struct socket *sock, int type, ...@@ -1003,58 +836,15 @@ static int __gtp_encap_enable_socket(struct socket *sock, int type,
tuncfg.encap_destroy = gtp_encap_destroy; tuncfg.encap_destroy = gtp_encap_destroy;
setup_udp_tunnel_sock(sock_net(sock->sk), sock, &tuncfg); setup_udp_tunnel_sock(sock_net(sock->sk), sock, &tuncfg);
release_sock(sock->sk);
return 0;
}
static struct sock *gtp_encap_enable_socket(int fd, int type, out_rel_sock:
struct gtp_dev *gtp) release_sock(sock->sk);
{ out_sock:
struct socket *sock;
int err;
pr_debug("enable gtp on %d, %d\n", fd, type);
sock = sockfd_lookup(fd, &err);
if (!sock) {
pr_debug("gtp socket fd=%d not found\n", fd);
return NULL;
}
err = __gtp_encap_enable_socket(sock, type, gtp);
sockfd_put(sock); sockfd_put(sock);
if (err) return sk;
return ERR_PTR(err);
return sock->sk;
}
static struct socket *gtp_create_gtp_socket(struct gtp_dev *gtp, struct net_device *dev)
{
struct udp_port_cfg udp_conf;
struct socket *sock;
int err;
memset(&udp_conf, 0, sizeof(udp_conf));
udp_conf.family = AF_INET;
udp_conf.local_ip.s_addr = htonl(INADDR_ANY);
udp_conf.local_udp_port = htons(GTP1U_PORT);
err = udp_sock_create(dev_net(dev), &udp_conf, &sock);
if (err < 0) {
pr_debug("create gtp sock failed: %d\n", err);
return ERR_PTR(err);
}
err = __gtp_encap_enable_socket(sock, UDP_ENCAP_GTP1U, gtp);
if (err) {
pr_debug("enable gtp sock encap failed: %d\n", err);
udp_tunnel_sock_release(sock);
return ERR_PTR(err);
}
pr_debug("create gtp sock done\n");
return sock;
} }
static int gtp_encap_enable(struct gtp_dev *gtp, struct net_device *dev, struct nlattr *data[]) static int gtp_encap_enable(struct gtp_dev *gtp, struct nlattr *data[])
{ {
struct sock *sk1u = NULL; struct sock *sk1u = NULL;
struct sock *sk0 = NULL; struct sock *sk0 = NULL;
...@@ -1078,25 +868,11 @@ static int gtp_encap_enable(struct gtp_dev *gtp, struct net_device *dev, struct ...@@ -1078,25 +868,11 @@ static int gtp_encap_enable(struct gtp_dev *gtp, struct net_device *dev, struct
} }
} }
if (data[IFLA_GTP_COLLECT_METADATA]) {
struct socket *sock;
if (!sk1u) {
sock = gtp_create_gtp_socket(gtp, dev);
if (IS_ERR(sock))
return PTR_ERR(sock);
gtp->collect_md_sock = sock;
sk1u = sock->sk;
} else {
gtp->collect_md_sock = NULL;
}
}
if (data[IFLA_GTP_ROLE]) { if (data[IFLA_GTP_ROLE]) {
role = nla_get_u32(data[IFLA_GTP_ROLE]); role = nla_get_u32(data[IFLA_GTP_ROLE]);
if (role > GTP_ROLE_SGSN) { if (role > GTP_ROLE_SGSN) {
gtp_encap_disable(gtp); gtp_encap_disable_sock(sk0);
gtp_encap_disable_sock(sk1u);
return -EINVAL; return -EINVAL;
} }
} }
...@@ -1655,7 +1431,7 @@ static int __init gtp_init(void) ...@@ -1655,7 +1431,7 @@ static int __init gtp_init(void)
if (err < 0) if (err < 0)
goto unreg_genl_family; goto unreg_genl_family;
pr_info("GTP module loaded (pdp ctx size %zd bytes) with tnl-md support\n", pr_info("GTP module loaded (pdp ctx size %zd bytes)\n",
sizeof(struct pdp_ctx)); sizeof(struct pdp_ctx));
return 0; return 0;
......
...@@ -2,8 +2,6 @@ ...@@ -2,8 +2,6 @@
#ifndef _UAPI_LINUX_GTP_H_ #ifndef _UAPI_LINUX_GTP_H_
#define _UAPI_LINUX_GTP_H_ #define _UAPI_LINUX_GTP_H_
#include <linux/types.h>
#define GTP_GENL_MCGRP_NAME "gtp" #define GTP_GENL_MCGRP_NAME "gtp"
enum gtp_genl_cmds { enum gtp_genl_cmds {
...@@ -36,14 +34,4 @@ enum gtp_attrs { ...@@ -36,14 +34,4 @@ enum gtp_attrs {
}; };
#define GTPA_MAX (__GTPA_MAX + 1) #define GTPA_MAX (__GTPA_MAX + 1)
enum {
GTP_METADATA_V1
};
struct gtpu_metadata {
__u8 ver;
__u8 flags;
__u8 type;
};
#endif /* _UAPI_LINUX_GTP_H_ */ #endif /* _UAPI_LINUX_GTP_H_ */
...@@ -811,7 +811,6 @@ enum { ...@@ -811,7 +811,6 @@ enum {
IFLA_GTP_FD1, IFLA_GTP_FD1,
IFLA_GTP_PDP_HASHSIZE, IFLA_GTP_PDP_HASHSIZE,
IFLA_GTP_ROLE, IFLA_GTP_ROLE,
IFLA_GTP_COLLECT_METADATA,
__IFLA_GTP_MAX, __IFLA_GTP_MAX,
}; };
#define IFLA_GTP_MAX (__IFLA_GTP_MAX - 1) #define IFLA_GTP_MAX (__IFLA_GTP_MAX - 1)
......
...@@ -176,7 +176,6 @@ enum { ...@@ -176,7 +176,6 @@ enum {
#define TUNNEL_VXLAN_OPT __cpu_to_be16(0x1000) #define TUNNEL_VXLAN_OPT __cpu_to_be16(0x1000)
#define TUNNEL_NOCACHE __cpu_to_be16(0x2000) #define TUNNEL_NOCACHE __cpu_to_be16(0x2000)
#define TUNNEL_ERSPAN_OPT __cpu_to_be16(0x4000) #define TUNNEL_ERSPAN_OPT __cpu_to_be16(0x4000)
#define TUNNEL_GTPU_OPT __cpu_to_be16(0x8000)
#define TUNNEL_OPTIONS_PRESENT \ #define TUNNEL_OPTIONS_PRESENT \
(TUNNEL_GENEVE_OPT | TUNNEL_VXLAN_OPT | TUNNEL_ERSPAN_OPT) (TUNNEL_GENEVE_OPT | TUNNEL_VXLAN_OPT | TUNNEL_ERSPAN_OPT)
......
...@@ -617,7 +617,6 @@ enum { ...@@ -617,7 +617,6 @@ enum {
IFLA_GTP_FD1, IFLA_GTP_FD1,
IFLA_GTP_PDP_HASHSIZE, IFLA_GTP_PDP_HASHSIZE,
IFLA_GTP_ROLE, IFLA_GTP_ROLE,
IFLA_GTP_COLLECT_METADATA,
__IFLA_GTP_MAX, __IFLA_GTP_MAX,
}; };
#define IFLA_GTP_MAX (__IFLA_GTP_MAX - 1) #define IFLA_GTP_MAX (__IFLA_GTP_MAX - 1)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment