Commit ba35855e authored by David S. Miller's avatar David S. Miller

Merge branch 'gro-in-udp'

Tom Herbert says:

====================
udp: GRO in UDP sockets

This patch set adds GRO functions (gro_receive and gro_complete) to UDP
sockets and removes udp_offload infrastructure.

Add GRO functions (gro_receive and gro_complete) to UDP sockets. In
udp_gro_receive and udp_gro_complete a socket lookup is done instead of
looking up the port number in udp_offloads.  If a socket is found and
there are GRO functions for it then those are called. This feature
allows binding GRO functions to more than just a port number.
Eventually, we will be able to use this technique to allow application
defined GRO for an application protocol by attaching BPF porgrams to UDP
sockets for doing GRO.

In order to implement these functions, we added exported
udp6_lib_lookup_skb and udp4_lib_lookup_skb functions in ipv4/udp.c and
ipv6/udp.c. Also, inet_iif and references to skb_dst() were changed to
check that dst is set in skbuf before derefencing. In the GRO path there
is now a UDP socket lookup performed before dst is set, to the get the
device in that case we simply use skb->dev.

Tested:

Ran various combinations of VXLAN and GUE TCP_STREAM and TCP_RR tests.
Did not see any material regression.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 1fbbe1a8 46aa2f30
......@@ -87,7 +87,6 @@ struct geneve_sock {
struct socket *sock;
struct rcu_head rcu;
int refcnt;
struct udp_offload udp_offloads;
struct hlist_head vni_list[VNI_HASH_SIZE];
u32 flags;
};
......@@ -409,14 +408,6 @@ static void geneve_notify_add_rx_port(struct geneve_sock *gs)
struct net *net = sock_net(sk);
sa_family_t sa_family = geneve_get_sk_family(gs);
__be16 port = inet_sk(sk)->inet_sport;
int err;
if (sa_family == AF_INET) {
err = udp_add_offload(sock_net(sk), &gs->udp_offloads);
if (err)
pr_warn("geneve: udp_add_offload failed with status %d\n",
err);
}
rcu_read_lock();
for_each_netdev_rcu(net, dev) {
......@@ -432,9 +423,9 @@ static int geneve_hlen(struct genevehdr *gh)
return sizeof(*gh) + gh->opt_len * 4;
}
static struct sk_buff **geneve_gro_receive(struct sk_buff **head,
struct sk_buff *skb,
struct udp_offload *uoff)
static struct sk_buff **geneve_gro_receive(struct sock *sk,
struct sk_buff **head,
struct sk_buff *skb)
{
struct sk_buff *p, **pp = NULL;
struct genevehdr *gh, *gh2;
......@@ -495,8 +486,8 @@ static struct sk_buff **geneve_gro_receive(struct sk_buff **head,
return pp;
}
static int geneve_gro_complete(struct sk_buff *skb, int nhoff,
struct udp_offload *uoff)
static int geneve_gro_complete(struct sock *sk, struct sk_buff *skb,
int nhoff)
{
struct genevehdr *gh;
struct packet_offload *ptype;
......@@ -545,14 +536,14 @@ static struct geneve_sock *geneve_socket_create(struct net *net, __be16 port,
INIT_HLIST_HEAD(&gs->vni_list[h]);
/* Initialize the geneve udp offloads structure */
gs->udp_offloads.port = port;
gs->udp_offloads.callbacks.gro_receive = geneve_gro_receive;
gs->udp_offloads.callbacks.gro_complete = geneve_gro_complete;
geneve_notify_add_rx_port(gs);
/* Mark socket as an encapsulation socket */
memset(&tunnel_cfg, 0, sizeof(tunnel_cfg));
tunnel_cfg.sk_user_data = gs;
tunnel_cfg.encap_type = 1;
tunnel_cfg.gro_receive = geneve_gro_receive;
tunnel_cfg.gro_complete = geneve_gro_complete;
tunnel_cfg.encap_rcv = geneve_udp_encap_recv;
tunnel_cfg.encap_destroy = NULL;
setup_udp_tunnel_sock(net, sock, &tunnel_cfg);
......@@ -576,9 +567,6 @@ static void geneve_notify_del_rx_port(struct geneve_sock *gs)
}
rcu_read_unlock();
if (sa_family == AF_INET)
udp_del_offload(&gs->udp_offloads);
}
static void __geneve_sock_release(struct geneve_sock *gs)
......
......@@ -551,16 +551,15 @@ static struct vxlanhdr *vxlan_gro_remcsum(struct sk_buff *skb,
return vh;
}
static struct sk_buff **vxlan_gro_receive(struct sk_buff **head,
struct sk_buff *skb,
struct udp_offload *uoff)
static struct sk_buff **vxlan_gro_receive(struct sock *sk,
struct sk_buff **head,
struct sk_buff *skb)
{
struct sk_buff *p, **pp = NULL;
struct vxlanhdr *vh, *vh2;
unsigned int hlen, off_vx;
int flush = 1;
struct vxlan_sock *vs = container_of(uoff, struct vxlan_sock,
udp_offloads);
struct vxlan_sock *vs = rcu_dereference_sk_user_data(sk);
__be32 flags;
struct gro_remcsum grc;
......@@ -613,8 +612,7 @@ static struct sk_buff **vxlan_gro_receive(struct sk_buff **head,
return pp;
}
static int vxlan_gro_complete(struct sk_buff *skb, int nhoff,
struct udp_offload *uoff)
static int vxlan_gro_complete(struct sock *sk, struct sk_buff *skb, int nhoff)
{
udp_tunnel_gro_complete(skb, nhoff);
......@@ -629,13 +627,6 @@ static void vxlan_notify_add_rx_port(struct vxlan_sock *vs)
struct net *net = sock_net(sk);
sa_family_t sa_family = vxlan_get_sk_family(vs);
__be16 port = inet_sk(sk)->inet_sport;
int err;
if (sa_family == AF_INET) {
err = udp_add_offload(net, &vs->udp_offloads);
if (err)
pr_warn("vxlan: udp_add_offload failed with status %d\n", err);
}
rcu_read_lock();
for_each_netdev_rcu(net, dev) {
......@@ -662,9 +653,6 @@ static void vxlan_notify_del_rx_port(struct vxlan_sock *vs)
port);
}
rcu_read_unlock();
if (sa_family == AF_INET)
udp_del_offload(&vs->udp_offloads);
}
/* Add new entry to forwarding table -- assumes lock held */
......@@ -2752,21 +2740,19 @@ static struct vxlan_sock *vxlan_socket_create(struct net *net, bool ipv6,
atomic_set(&vs->refcnt, 1);
vs->flags = (flags & VXLAN_F_RCV_FLAGS);
/* Initialize the vxlan udp offloads structure */
vs->udp_offloads.port = port;
vs->udp_offloads.callbacks.gro_receive = vxlan_gro_receive;
vs->udp_offloads.callbacks.gro_complete = vxlan_gro_complete;
spin_lock(&vn->sock_lock);
hlist_add_head_rcu(&vs->hlist, vs_head(net, port));
vxlan_notify_add_rx_port(vs);
spin_unlock(&vn->sock_lock);
/* Mark socket as an encapsulation socket. */
memset(&tunnel_cfg, 0, sizeof(tunnel_cfg));
tunnel_cfg.sk_user_data = vs;
tunnel_cfg.encap_type = 1;
tunnel_cfg.encap_rcv = vxlan_rcv;
tunnel_cfg.encap_destroy = NULL;
tunnel_cfg.gro_receive = vxlan_gro_receive;
tunnel_cfg.gro_complete = vxlan_gro_complete;
setup_udp_tunnel_sock(net, sock, &tunnel_cfg);
......
......@@ -2159,23 +2159,6 @@ struct packet_offload {
struct list_head list;
};
struct udp_offload;
struct udp_offload_callbacks {
struct sk_buff **(*gro_receive)(struct sk_buff **head,
struct sk_buff *skb,
struct udp_offload *uoff);
int (*gro_complete)(struct sk_buff *skb,
int nhoff,
struct udp_offload *uoff);
};
struct udp_offload {
__be16 port;
u8 ipproto;
struct udp_offload_callbacks callbacks;
};
/* often modified stats are per-CPU, other are shared (netdev->stats) */
struct pcpu_sw_netstats {
u64 rx_packets;
......
......@@ -71,6 +71,14 @@ struct udp_sock {
*/
int (*encap_rcv)(struct sock *sk, struct sk_buff *skb);
void (*encap_destroy)(struct sock *sk);
/* GRO functions for UDP socket */
struct sk_buff ** (*gro_receive)(struct sock *sk,
struct sk_buff **head,
struct sk_buff *skb);
int (*gro_complete)(struct sock *sk,
struct sk_buff *skb,
int nhoff);
};
static inline struct udp_sock *udp_sk(const struct sock *sk)
......
......@@ -107,9 +107,6 @@ int inet_del_offload(const struct net_offload *prot, unsigned char num);
void inet_register_protosw(struct inet_protosw *p);
void inet_unregister_protosw(struct inet_protosw *p);
int udp_add_offload(struct net *net, struct udp_offload *prot);
void udp_del_offload(struct udp_offload *prot);
#if IS_ENABLED(CONFIG_IPV6)
int inet6_add_protocol(const struct inet6_protocol *prot, unsigned char num);
int inet6_del_protocol(const struct inet6_protocol *prot, unsigned char num);
......
......@@ -322,10 +322,11 @@ static inline struct rtable *ip_route_newports(struct flowi4 *fl4, struct rtable
static inline int inet_iif(const struct sk_buff *skb)
{
int iif = skb_rtable(skb)->rt_iif;
struct rtable *rt = skb_rtable(skb);
if (rt && rt->rt_iif)
return rt->rt_iif;
if (iif)
return iif;
return skb->skb_iif;
}
......
......@@ -167,9 +167,12 @@ static inline void udp_csum_pull_header(struct sk_buff *skb)
UDP_SKB_CB(skb)->cscov -= sizeof(struct udphdr);
}
typedef struct sock *(*udp_lookup_t)(struct sk_buff *skb, __be16 sport,
__be16 dport);
struct sk_buff **udp_gro_receive(struct sk_buff **head, struct sk_buff *skb,
struct udphdr *uh);
int udp_gro_complete(struct sk_buff *skb, int nhoff);
struct udphdr *uh, udp_lookup_t lookup);
int udp_gro_complete(struct sk_buff *skb, int nhoff, udp_lookup_t lookup);
static inline struct udphdr *udp_gro_udphdr(struct sk_buff *skb)
{
......@@ -269,6 +272,8 @@ struct sock *udp4_lib_lookup(struct net *net, __be32 saddr, __be16 sport,
struct sock *__udp4_lib_lookup(struct net *net, __be32 saddr, __be16 sport,
__be32 daddr, __be16 dport, int dif,
struct udp_table *tbl, struct sk_buff *skb);
struct sock *udp4_lib_lookup_skb(struct sk_buff *skb,
__be16 sport, __be16 dport);
struct sock *udp6_lib_lookup(struct net *net,
const struct in6_addr *saddr, __be16 sport,
const struct in6_addr *daddr, __be16 dport,
......@@ -278,6 +283,8 @@ struct sock *__udp6_lib_lookup(struct net *net,
const struct in6_addr *daddr, __be16 dport,
int dif, struct udp_table *tbl,
struct sk_buff *skb);
struct sock *udp6_lib_lookup_skb(struct sk_buff *skb,
__be16 sport, __be16 dport);
/*
* SNMP statistics for UDP and UDP-Lite
......
......@@ -64,6 +64,11 @@ static inline int udp_sock_create(struct net *net,
typedef int (*udp_tunnel_encap_rcv_t)(struct sock *sk, struct sk_buff *skb);
typedef void (*udp_tunnel_encap_destroy_t)(struct sock *sk);
typedef struct sk_buff **(*udp_tunnel_gro_receive_t)(struct sock *sk,
struct sk_buff **head,
struct sk_buff *skb);
typedef int (*udp_tunnel_gro_complete_t)(struct sock *sk, struct sk_buff *skb,
int nhoff);
struct udp_tunnel_sock_cfg {
void *sk_user_data; /* user data used by encap_rcv call back */
......@@ -71,6 +76,8 @@ struct udp_tunnel_sock_cfg {
__u8 encap_type;
udp_tunnel_encap_rcv_t encap_rcv;
udp_tunnel_encap_destroy_t encap_destroy;
udp_tunnel_gro_receive_t gro_receive;
udp_tunnel_gro_complete_t gro_complete;
};
/* Setup the given (UDP) sock to receive UDP encapsulated packets */
......
......@@ -189,7 +189,6 @@ struct vxlan_sock {
struct rcu_head rcu;
struct hlist_head vni_list[VNI_HASH_SIZE];
atomic_t refcnt;
struct udp_offload udp_offloads;
u32 flags;
};
......
......@@ -22,7 +22,6 @@ struct fou {
u8 flags;
__be16 port;
u16 type;
struct udp_offload udp_offloads;
struct list_head list;
struct rcu_head rcu;
};
......@@ -186,13 +185,13 @@ static int gue_udp_recv(struct sock *sk, struct sk_buff *skb)
return 0;
}
static struct sk_buff **fou_gro_receive(struct sk_buff **head,
struct sk_buff *skb,
struct udp_offload *uoff)
static struct sk_buff **fou_gro_receive(struct sock *sk,
struct sk_buff **head,
struct sk_buff *skb)
{
const struct net_offload *ops;
struct sk_buff **pp = NULL;
u8 proto = NAPI_GRO_CB(skb)->proto;
u8 proto = fou_from_sock(sk)->protocol;
const struct net_offload **offloads;
/* We can clear the encap_mark for FOU as we are essentially doing
......@@ -217,11 +216,11 @@ static struct sk_buff **fou_gro_receive(struct sk_buff **head,
return pp;
}
static int fou_gro_complete(struct sk_buff *skb, int nhoff,
struct udp_offload *uoff)
static int fou_gro_complete(struct sock *sk, struct sk_buff *skb,
int nhoff)
{
const struct net_offload *ops;
u8 proto = NAPI_GRO_CB(skb)->proto;
u8 proto = fou_from_sock(sk)->protocol;
int err = -ENOSYS;
const struct net_offload **offloads;
......@@ -264,9 +263,9 @@ static struct guehdr *gue_gro_remcsum(struct sk_buff *skb, unsigned int off,
return guehdr;
}
static struct sk_buff **gue_gro_receive(struct sk_buff **head,
struct sk_buff *skb,
struct udp_offload *uoff)
static struct sk_buff **gue_gro_receive(struct sock *sk,
struct sk_buff **head,
struct sk_buff *skb)
{
const struct net_offload **offloads;
const struct net_offload *ops;
......@@ -277,7 +276,7 @@ static struct sk_buff **gue_gro_receive(struct sk_buff **head,
void *data;
u16 doffset = 0;
int flush = 1;
struct fou *fou = container_of(uoff, struct fou, udp_offloads);
struct fou *fou = fou_from_sock(sk);
struct gro_remcsum grc;
skb_gro_remcsum_init(&grc);
......@@ -386,8 +385,7 @@ static struct sk_buff **gue_gro_receive(struct sk_buff **head,
return pp;
}
static int gue_gro_complete(struct sk_buff *skb, int nhoff,
struct udp_offload *uoff)
static int gue_gro_complete(struct sock *sk, struct sk_buff *skb, int nhoff)
{
const struct net_offload **offloads;
struct guehdr *guehdr = (struct guehdr *)(skb->data + nhoff);
......@@ -435,10 +433,7 @@ static int fou_add_to_port_list(struct net *net, struct fou *fou)
static void fou_release(struct fou *fou)
{
struct socket *sock = fou->sock;
struct sock *sk = sock->sk;
if (sk->sk_family == AF_INET)
udp_del_offload(&fou->udp_offloads);
list_del(&fou->list);
udp_tunnel_sock_release(sock);
......@@ -448,11 +443,9 @@ static void fou_release(struct fou *fou)
static int fou_encap_init(struct sock *sk, struct fou *fou, struct fou_cfg *cfg)
{
udp_sk(sk)->encap_rcv = fou_udp_recv;
fou->protocol = cfg->protocol;
fou->udp_offloads.callbacks.gro_receive = fou_gro_receive;
fou->udp_offloads.callbacks.gro_complete = fou_gro_complete;
fou->udp_offloads.port = cfg->udp_config.local_udp_port;
fou->udp_offloads.ipproto = cfg->protocol;
udp_sk(sk)->gro_receive = fou_gro_receive;
udp_sk(sk)->gro_complete = fou_gro_complete;
fou_from_sock(sk)->protocol = cfg->protocol;
return 0;
}
......@@ -460,9 +453,8 @@ static int fou_encap_init(struct sock *sk, struct fou *fou, struct fou_cfg *cfg)
static int gue_encap_init(struct sock *sk, struct fou *fou, struct fou_cfg *cfg)
{
udp_sk(sk)->encap_rcv = gue_udp_recv;
fou->udp_offloads.callbacks.gro_receive = gue_gro_receive;
fou->udp_offloads.callbacks.gro_complete = gue_gro_complete;
fou->udp_offloads.port = cfg->udp_config.local_udp_port;
udp_sk(sk)->gro_receive = gue_gro_receive;
udp_sk(sk)->gro_complete = gue_gro_complete;
return 0;
}
......@@ -521,12 +513,6 @@ static int fou_create(struct net *net, struct fou_cfg *cfg,
sk->sk_allocation = GFP_ATOMIC;
if (cfg->udp_config.family == AF_INET) {
err = udp_add_offload(net, &fou->udp_offloads);
if (err)
goto error;
}
err = fou_add_to_port_list(net, fou);
if (err)
goto error;
......
......@@ -604,6 +604,19 @@ static inline struct sock *__udp4_lib_lookup_skb(struct sk_buff *skb,
udptable, skb);
}
struct sock *udp4_lib_lookup_skb(struct sk_buff *skb,
__be16 sport, __be16 dport)
{
const struct iphdr *iph = ip_hdr(skb);
const struct net_device *dev =
skb_dst(skb) ? skb_dst(skb)->dev : skb->dev;
return __udp4_lib_lookup(dev_net(dev), iph->saddr, sport,
iph->daddr, dport, inet_iif(skb),
&udp_table, skb);
}
EXPORT_SYMBOL_GPL(udp4_lib_lookup_skb);
/* Must be called under rcu_read_lock().
* Does increment socket refcount.
*/
......
......@@ -14,18 +14,6 @@
#include <net/udp.h>
#include <net/protocol.h>
static DEFINE_SPINLOCK(udp_offload_lock);
static struct udp_offload_priv __rcu *udp_offload_base __read_mostly;
#define udp_deref_protected(X) rcu_dereference_protected(X, lockdep_is_held(&udp_offload_lock))
struct udp_offload_priv {
struct udp_offload *offload;
possible_net_t net;
struct rcu_head rcu;
struct udp_offload_priv __rcu *next;
};
static struct sk_buff *__skb_udp_tunnel_segment(struct sk_buff *skb,
netdev_features_t features,
struct sk_buff *(*gso_inner_segment)(struct sk_buff *skb,
......@@ -179,6 +167,7 @@ struct sk_buff *skb_udp_tunnel_segment(struct sk_buff *skb,
return segs;
}
EXPORT_SYMBOL(skb_udp_tunnel_segment);
static struct sk_buff *udp4_ufo_fragment(struct sk_buff *skb,
netdev_features_t features)
......@@ -253,64 +242,14 @@ static struct sk_buff *udp4_ufo_fragment(struct sk_buff *skb,
return segs;
}
int udp_add_offload(struct net *net, struct udp_offload *uo)
{
struct udp_offload_priv *new_offload = kzalloc(sizeof(*new_offload), GFP_ATOMIC);
if (!new_offload)
return -ENOMEM;
write_pnet(&new_offload->net, net);
new_offload->offload = uo;
spin_lock(&udp_offload_lock);
new_offload->next = udp_offload_base;
rcu_assign_pointer(udp_offload_base, new_offload);
spin_unlock(&udp_offload_lock);
return 0;
}
EXPORT_SYMBOL(udp_add_offload);
static void udp_offload_free_routine(struct rcu_head *head)
{
struct udp_offload_priv *ou_priv = container_of(head, struct udp_offload_priv, rcu);
kfree(ou_priv);
}
void udp_del_offload(struct udp_offload *uo)
{
struct udp_offload_priv __rcu **head = &udp_offload_base;
struct udp_offload_priv *uo_priv;
spin_lock(&udp_offload_lock);
uo_priv = udp_deref_protected(*head);
for (; uo_priv != NULL;
uo_priv = udp_deref_protected(*head)) {
if (uo_priv->offload == uo) {
rcu_assign_pointer(*head,
udp_deref_protected(uo_priv->next));
goto unlock;
}
head = &uo_priv->next;
}
pr_warn("udp_del_offload: didn't find offload for port %d\n", ntohs(uo->port));
unlock:
spin_unlock(&udp_offload_lock);
if (uo_priv)
call_rcu(&uo_priv->rcu, udp_offload_free_routine);
}
EXPORT_SYMBOL(udp_del_offload);
struct sk_buff **udp_gro_receive(struct sk_buff **head, struct sk_buff *skb,
struct udphdr *uh)
struct udphdr *uh, udp_lookup_t lookup)
{
struct udp_offload_priv *uo_priv;
struct sk_buff *p, **pp = NULL;
struct udphdr *uh2;
unsigned int off = skb_gro_offset(skb);
int flush = 1;
struct sock *sk;
if (NAPI_GRO_CB(skb)->encap_mark ||
(skb->ip_summed != CHECKSUM_PARTIAL &&
......@@ -322,13 +261,10 @@ struct sk_buff **udp_gro_receive(struct sk_buff **head, struct sk_buff *skb,
NAPI_GRO_CB(skb)->encap_mark = 1;
rcu_read_lock();
uo_priv = rcu_dereference(udp_offload_base);
for (; uo_priv != NULL; uo_priv = rcu_dereference(uo_priv->next)) {
if (net_eq(read_pnet(&uo_priv->net), dev_net(skb->dev)) &&
uo_priv->offload->port == uh->dest &&
uo_priv->offload->callbacks.gro_receive)
sk = (*lookup)(skb, uh->source, uh->dest);
if (sk && udp_sk(sk)->gro_receive)
goto unflush;
}
goto out_unlock;
unflush:
......@@ -352,9 +288,7 @@ struct sk_buff **udp_gro_receive(struct sk_buff **head, struct sk_buff *skb,
skb_gro_pull(skb, sizeof(struct udphdr)); /* pull encapsulating udp header */
skb_gro_postpull_rcsum(skb, uh, sizeof(struct udphdr));
NAPI_GRO_CB(skb)->proto = uo_priv->offload->ipproto;
pp = uo_priv->offload->callbacks.gro_receive(head, skb,
uo_priv->offload);
pp = udp_sk(sk)->gro_receive(sk, head, skb);
out_unlock:
rcu_read_unlock();
......@@ -362,6 +296,7 @@ struct sk_buff **udp_gro_receive(struct sk_buff **head, struct sk_buff *skb,
NAPI_GRO_CB(skb)->flush |= flush;
return pp;
}
EXPORT_SYMBOL(udp_gro_receive);
static struct sk_buff **udp4_gro_receive(struct sk_buff **head,
struct sk_buff *skb)
......@@ -383,39 +318,28 @@ static struct sk_buff **udp4_gro_receive(struct sk_buff **head,
inet_gro_compute_pseudo);
skip:
NAPI_GRO_CB(skb)->is_ipv6 = 0;
return udp_gro_receive(head, skb, uh);
return udp_gro_receive(head, skb, uh, udp4_lib_lookup_skb);
flush:
NAPI_GRO_CB(skb)->flush = 1;
return NULL;
}
int udp_gro_complete(struct sk_buff *skb, int nhoff)
int udp_gro_complete(struct sk_buff *skb, int nhoff,
udp_lookup_t lookup)
{
struct udp_offload_priv *uo_priv;
__be16 newlen = htons(skb->len - nhoff);
struct udphdr *uh = (struct udphdr *)(skb->data + nhoff);
int err = -ENOSYS;
struct sock *sk;
uh->len = newlen;
rcu_read_lock();
uo_priv = rcu_dereference(udp_offload_base);
for (; uo_priv != NULL; uo_priv = rcu_dereference(uo_priv->next)) {
if (net_eq(read_pnet(&uo_priv->net), dev_net(skb->dev)) &&
uo_priv->offload->port == uh->dest &&
uo_priv->offload->callbacks.gro_complete)
break;
}
if (uo_priv) {
NAPI_GRO_CB(skb)->proto = uo_priv->offload->ipproto;
err = uo_priv->offload->callbacks.gro_complete(skb,
nhoff + sizeof(struct udphdr),
uo_priv->offload);
}
sk = (*lookup)(skb, uh->source, uh->dest);
if (sk && udp_sk(sk)->gro_complete)
err = udp_sk(sk)->gro_complete(sk, skb,
nhoff + sizeof(struct udphdr));
rcu_read_unlock();
if (skb->remcsum_offload)
......@@ -426,6 +350,7 @@ int udp_gro_complete(struct sk_buff *skb, int nhoff)
return err;
}
EXPORT_SYMBOL(udp_gro_complete);
static int udp4_gro_complete(struct sk_buff *skb, int nhoff)
{
......@@ -440,7 +365,7 @@ static int udp4_gro_complete(struct sk_buff *skb, int nhoff)
skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_TUNNEL;
}
return udp_gro_complete(skb, nhoff);
return udp_gro_complete(skb, nhoff, udp4_lib_lookup_skb);
}
static const struct net_offload udpv4_offload = {
......
......@@ -69,6 +69,8 @@ void setup_udp_tunnel_sock(struct net *net, struct socket *sock,
udp_sk(sk)->encap_type = cfg->encap_type;
udp_sk(sk)->encap_rcv = cfg->encap_rcv;
udp_sk(sk)->encap_destroy = cfg->encap_destroy;
udp_sk(sk)->gro_receive = cfg->gro_receive;
udp_sk(sk)->gro_complete = cfg->gro_complete;
udp_tunnel_encap_enable(sock);
}
......
......@@ -8,9 +8,10 @@ ipv6-objs := af_inet6.o anycast.o ip6_output.o ip6_input.o addrconf.o \
addrlabel.o \
route.o ip6_fib.o ipv6_sockglue.o ndisc.o udp.o udplite.o \
raw.o icmp.o mcast.o reassembly.o tcp_ipv6.o ping.o \
exthdrs.o datagram.o ip6_flowlabel.o inet6_connection_sock.o
exthdrs.o datagram.o ip6_flowlabel.o inet6_connection_sock.o \
udp_offload.o
ipv6-offload := ip6_offload.o tcpv6_offload.o udp_offload.o exthdrs_offload.o
ipv6-offload := ip6_offload.o tcpv6_offload.o exthdrs_offload.o
ipv6-$(CONFIG_SYSCTL) = sysctl_net_ipv6.o
ipv6-$(CONFIG_IPV6_MROUTE) += ip6mr.o
......
......@@ -64,6 +64,8 @@
#include <asm/uaccess.h>
#include <linux/mroute6.h>
#include "ip6_offload.h"
MODULE_AUTHOR("Cast of dozens");
MODULE_DESCRIPTION("IPv6 protocol stack for Linux");
MODULE_LICENSE("GPL");
......@@ -959,6 +961,10 @@ static int __init inet6_init(void)
if (err)
goto udplitev6_fail;
err = udpv6_offload_init();
if (err)
goto udpv6_offload_fail;
err = tcpv6_init();
if (err)
goto tcpv6_fail;
......@@ -988,6 +994,8 @@ static int __init inet6_init(void)
ipv6_packet_fail:
tcpv6_exit();
tcpv6_fail:
udpv6_offload_exit();
udpv6_offload_fail:
udplitev6_exit();
udplitev6_fail:
udpv6_exit();
......
......@@ -325,8 +325,6 @@ static int __init ipv6_offload_init(void)
if (tcpv6_offload_init() < 0)
pr_crit("%s: Cannot add TCP protocol offload\n", __func__);
if (udp_offload_init() < 0)
pr_crit("%s: Cannot add UDP protocol offload\n", __func__);
if (ipv6_exthdrs_offload_init() < 0)
pr_crit("%s: Cannot add EXTHDRS protocol offload\n", __func__);
......
......@@ -12,7 +12,8 @@
#define __ip6_offload_h
int ipv6_exthdrs_offload_init(void);
int udp_offload_init(void);
int udpv6_offload_init(void);
int udpv6_offload_exit(void);
int tcpv6_offload_init(void);
#endif
......@@ -326,6 +326,19 @@ static struct sock *__udp6_lib_lookup_skb(struct sk_buff *skb,
udptable, skb);
}
struct sock *udp6_lib_lookup_skb(struct sk_buff *skb,
__be16 sport, __be16 dport)
{
const struct ipv6hdr *iph = ipv6_hdr(skb);
const struct net_device *dev =
skb_dst(skb) ? skb_dst(skb)->dev : skb->dev;
return __udp6_lib_lookup(dev_net(dev), &iph->saddr, sport,
&iph->daddr, dport, inet6_iif(skb),
&udp_table, skb);
}
EXPORT_SYMBOL_GPL(udp6_lib_lookup_skb);
/* Must be called under rcu_read_lock().
* Does increment socket refcount.
*/
......
......@@ -153,7 +153,7 @@ static struct sk_buff **udp6_gro_receive(struct sk_buff **head,
skip:
NAPI_GRO_CB(skb)->is_ipv6 = 1;
return udp_gro_receive(head, skb, uh);
return udp_gro_receive(head, skb, uh, udp6_lib_lookup_skb);
flush:
NAPI_GRO_CB(skb)->flush = 1;
......@@ -173,7 +173,7 @@ static int udp6_gro_complete(struct sk_buff *skb, int nhoff)
skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_TUNNEL;
}
return udp_gro_complete(skb, nhoff);
return udp_gro_complete(skb, nhoff, udp6_lib_lookup_skb);
}
static const struct net_offload udpv6_offload = {
......@@ -184,7 +184,12 @@ static const struct net_offload udpv6_offload = {
},
};
int __init udp_offload_init(void)
int udpv6_offload_init(void)
{
return inet6_add_offload(&udpv6_offload, IPPROTO_UDP);
}
int udpv6_offload_exit(void)
{
return inet6_del_offload(&udpv6_offload, IPPROTO_UDP);
}
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment