Commit 9fb426a6 authored by David S. Miller's avatar David S. Miller

Merge branch 'gso_send_check'

Tom Herbert says:

====================
net: Eliminate gso_send_check

gso_send_check presents a lot of complexity for what it is being used
for. It seems that there are only two cases where it might be effective:
TCP and UFO paths. In these cases, the gso_send_check function
initializes the TCP or UDP checksum respectively to the pseudo header
checksum so that the checksum computation is appropriately offloaded or
computed in the gso_segment functions. The gso_send_check functions
are only called from dev.c in skb_mac_gso_segment when ip_summed !=
CHECKSUM_PARTIAL (which seems very unlikely in TCP case). We can move
the logic of this into the respective gso_segment functions where the
checksum is initialized if ip_summed != CHECKSUM_PARTIAL.

With the above cases handled, gso_send_check is no longer needed, so
we can remove all uses of it and the fields in the offload callbacks.
With this change, ip_summed in the skb should be preserved though all
the layers of gso_segment calls.

In follow-on patches, we may be able to remove the check setup code in
tcp_gso_segment if we can guarantee that ip_summed will always be
CHECKSUM_PARTIAL (verify all paths and probably add an assert in
tcp_gro_segment).

Tested these patches by:
  - netperf TCP_STREAM test with GSO enabled
  - Forced ip_summed != CHECKSUM_PARTIAL with above
  - Ran UDP_RR with 10000 request size over GRE tunnel. This exercised
    UFO path.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 2fdbfea5 53e50398
...@@ -1911,7 +1911,6 @@ struct packet_type { ...@@ -1911,7 +1911,6 @@ struct packet_type {
struct offload_callbacks { struct offload_callbacks {
struct sk_buff *(*gso_segment)(struct sk_buff *skb, struct sk_buff *(*gso_segment)(struct sk_buff *skb,
netdev_features_t features); netdev_features_t features);
int (*gso_send_check)(struct sk_buff *skb);
struct sk_buff **(*gro_receive)(struct sk_buff **head, struct sk_buff **(*gro_receive)(struct sk_buff **head,
struct sk_buff *skb); struct sk_buff *skb);
int (*gro_complete)(struct sk_buff *skb, int nhoff); int (*gro_complete)(struct sk_buff *skb, int nhoff);
......
...@@ -2422,16 +2422,6 @@ struct sk_buff *skb_mac_gso_segment(struct sk_buff *skb, ...@@ -2422,16 +2422,6 @@ struct sk_buff *skb_mac_gso_segment(struct sk_buff *skb,
rcu_read_lock(); rcu_read_lock();
list_for_each_entry_rcu(ptype, &offload_base, list) { list_for_each_entry_rcu(ptype, &offload_base, list) {
if (ptype->type == type && ptype->callbacks.gso_segment) { if (ptype->type == type && ptype->callbacks.gso_segment) {
if (unlikely(skb->ip_summed != CHECKSUM_PARTIAL)) {
int err;
err = ptype->callbacks.gso_send_check(skb);
segs = ERR_PTR(err);
if (err || skb_gso_ok(skb, features))
break;
__skb_push(skb, (skb->data -
skb_network_header(skb)));
}
segs = ptype->callbacks.gso_segment(skb, features); segs = ptype->callbacks.gso_segment(skb, features);
break; break;
} }
......
...@@ -1197,40 +1197,6 @@ int inet_sk_rebuild_header(struct sock *sk) ...@@ -1197,40 +1197,6 @@ int inet_sk_rebuild_header(struct sock *sk)
} }
EXPORT_SYMBOL(inet_sk_rebuild_header); EXPORT_SYMBOL(inet_sk_rebuild_header);
static int inet_gso_send_check(struct sk_buff *skb)
{
const struct net_offload *ops;
const struct iphdr *iph;
int proto;
int ihl;
int err = -EINVAL;
if (unlikely(!pskb_may_pull(skb, sizeof(*iph))))
goto out;
iph = ip_hdr(skb);
ihl = iph->ihl * 4;
if (ihl < sizeof(*iph))
goto out;
proto = iph->protocol;
/* Warning: after this point, iph might be no longer valid */
if (unlikely(!pskb_may_pull(skb, ihl)))
goto out;
__skb_pull(skb, ihl);
skb_reset_transport_header(skb);
err = -EPROTONOSUPPORT;
ops = rcu_dereference(inet_offloads[proto]);
if (likely(ops && ops->callbacks.gso_send_check))
err = ops->callbacks.gso_send_check(skb);
out:
return err;
}
static struct sk_buff *inet_gso_segment(struct sk_buff *skb, static struct sk_buff *inet_gso_segment(struct sk_buff *skb,
netdev_features_t features) netdev_features_t features)
{ {
...@@ -1655,7 +1621,6 @@ static int ipv4_proc_init(void); ...@@ -1655,7 +1621,6 @@ static int ipv4_proc_init(void);
static struct packet_offload ip_packet_offload __read_mostly = { static struct packet_offload ip_packet_offload __read_mostly = {
.type = cpu_to_be16(ETH_P_IP), .type = cpu_to_be16(ETH_P_IP),
.callbacks = { .callbacks = {
.gso_send_check = inet_gso_send_check,
.gso_segment = inet_gso_segment, .gso_segment = inet_gso_segment,
.gro_receive = inet_gro_receive, .gro_receive = inet_gro_receive,
.gro_complete = inet_gro_complete, .gro_complete = inet_gro_complete,
...@@ -1664,7 +1629,6 @@ static struct packet_offload ip_packet_offload __read_mostly = { ...@@ -1664,7 +1629,6 @@ static struct packet_offload ip_packet_offload __read_mostly = {
static const struct net_offload ipip_offload = { static const struct net_offload ipip_offload = {
.callbacks = { .callbacks = {
.gso_send_check = inet_gso_send_check,
.gso_segment = inet_gso_segment, .gso_segment = inet_gso_segment,
.gro_receive = inet_gro_receive, .gro_receive = inet_gro_receive,
.gro_complete = inet_gro_complete, .gro_complete = inet_gro_complete,
......
...@@ -15,13 +15,6 @@ ...@@ -15,13 +15,6 @@
#include <net/protocol.h> #include <net/protocol.h>
#include <net/gre.h> #include <net/gre.h>
static int gre_gso_send_check(struct sk_buff *skb)
{
if (!skb->encapsulation)
return -EINVAL;
return 0;
}
static struct sk_buff *gre_gso_segment(struct sk_buff *skb, static struct sk_buff *gre_gso_segment(struct sk_buff *skb,
netdev_features_t features) netdev_features_t features)
{ {
...@@ -46,6 +39,9 @@ static struct sk_buff *gre_gso_segment(struct sk_buff *skb, ...@@ -46,6 +39,9 @@ static struct sk_buff *gre_gso_segment(struct sk_buff *skb,
SKB_GSO_IPIP))) SKB_GSO_IPIP)))
goto out; goto out;
if (!skb->encapsulation)
goto out;
if (unlikely(!pskb_may_pull(skb, sizeof(*greh)))) if (unlikely(!pskb_may_pull(skb, sizeof(*greh))))
goto out; goto out;
...@@ -256,7 +252,6 @@ static int gre_gro_complete(struct sk_buff *skb, int nhoff) ...@@ -256,7 +252,6 @@ static int gre_gro_complete(struct sk_buff *skb, int nhoff)
static const struct net_offload gre_offload = { static const struct net_offload gre_offload = {
.callbacks = { .callbacks = {
.gso_send_check = gre_gso_send_check,
.gso_segment = gre_gso_segment, .gso_segment = gre_gso_segment,
.gro_receive = gre_gro_receive, .gro_receive = gre_gro_receive,
.gro_complete = gre_gro_complete, .gro_complete = gre_gro_complete,
......
...@@ -29,6 +29,28 @@ static void tcp_gso_tstamp(struct sk_buff *skb, unsigned int ts_seq, ...@@ -29,6 +29,28 @@ static void tcp_gso_tstamp(struct sk_buff *skb, unsigned int ts_seq,
} }
} }
struct sk_buff *tcp4_gso_segment(struct sk_buff *skb,
netdev_features_t features)
{
if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
return ERR_PTR(-EINVAL);
if (unlikely(skb->ip_summed != CHECKSUM_PARTIAL)) {
const struct iphdr *iph = ip_hdr(skb);
struct tcphdr *th = tcp_hdr(skb);
/* Set up checksum pseudo header, usually expect stack to
* have done this already.
*/
th->check = 0;
skb->ip_summed = CHECKSUM_PARTIAL;
__tcp_v4_send_check(skb, iph->saddr, iph->daddr);
}
return tcp_gso_segment(skb, features);
}
struct sk_buff *tcp_gso_segment(struct sk_buff *skb, struct sk_buff *tcp_gso_segment(struct sk_buff *skb,
netdev_features_t features) netdev_features_t features)
{ {
...@@ -44,9 +66,6 @@ struct sk_buff *tcp_gso_segment(struct sk_buff *skb, ...@@ -44,9 +66,6 @@ struct sk_buff *tcp_gso_segment(struct sk_buff *skb,
__sum16 newcheck; __sum16 newcheck;
bool ooo_okay, copy_destructor; bool ooo_okay, copy_destructor;
if (!pskb_may_pull(skb, sizeof(*th)))
goto out;
th = tcp_hdr(skb); th = tcp_hdr(skb);
thlen = th->doff * 4; thlen = th->doff * 4;
if (thlen < sizeof(*th)) if (thlen < sizeof(*th))
...@@ -269,23 +288,6 @@ int tcp_gro_complete(struct sk_buff *skb) ...@@ -269,23 +288,6 @@ int tcp_gro_complete(struct sk_buff *skb)
} }
EXPORT_SYMBOL(tcp_gro_complete); EXPORT_SYMBOL(tcp_gro_complete);
static int tcp_v4_gso_send_check(struct sk_buff *skb)
{
const struct iphdr *iph;
struct tcphdr *th;
if (!pskb_may_pull(skb, sizeof(*th)))
return -EINVAL;
iph = ip_hdr(skb);
th = tcp_hdr(skb);
th->check = 0;
skb->ip_summed = CHECKSUM_PARTIAL;
__tcp_v4_send_check(skb, iph->saddr, iph->daddr);
return 0;
}
static struct sk_buff **tcp4_gro_receive(struct sk_buff **head, struct sk_buff *skb) static struct sk_buff **tcp4_gro_receive(struct sk_buff **head, struct sk_buff *skb)
{ {
/* Don't bother verifying checksum if we're going to flush anyway. */ /* Don't bother verifying checksum if we're going to flush anyway. */
...@@ -313,8 +315,7 @@ static int tcp4_gro_complete(struct sk_buff *skb, int thoff) ...@@ -313,8 +315,7 @@ static int tcp4_gro_complete(struct sk_buff *skb, int thoff)
static const struct net_offload tcpv4_offload = { static const struct net_offload tcpv4_offload = {
.callbacks = { .callbacks = {
.gso_send_check = tcp_v4_gso_send_check, .gso_segment = tcp4_gso_segment,
.gso_segment = tcp_gso_segment,
.gro_receive = tcp4_gro_receive, .gro_receive = tcp4_gro_receive,
.gro_complete = tcp4_gro_complete, .gro_complete = tcp4_gro_complete,
}, },
......
...@@ -25,28 +25,6 @@ struct udp_offload_priv { ...@@ -25,28 +25,6 @@ struct udp_offload_priv {
struct udp_offload_priv __rcu *next; struct udp_offload_priv __rcu *next;
}; };
static int udp4_ufo_send_check(struct sk_buff *skb)
{
if (!pskb_may_pull(skb, sizeof(struct udphdr)))
return -EINVAL;
if (likely(!skb->encapsulation)) {
const struct iphdr *iph;
struct udphdr *uh;
iph = ip_hdr(skb);
uh = udp_hdr(skb);
uh->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr, skb->len,
IPPROTO_UDP, 0);
skb->csum_start = skb_transport_header(skb) - skb->head;
skb->csum_offset = offsetof(struct udphdr, check);
skb->ip_summed = CHECKSUM_PARTIAL;
}
return 0;
}
struct sk_buff *skb_udp_tunnel_segment(struct sk_buff *skb, struct sk_buff *skb_udp_tunnel_segment(struct sk_buff *skb,
netdev_features_t features) netdev_features_t features)
{ {
...@@ -128,8 +106,9 @@ static struct sk_buff *udp4_ufo_fragment(struct sk_buff *skb, ...@@ -128,8 +106,9 @@ static struct sk_buff *udp4_ufo_fragment(struct sk_buff *skb,
{ {
struct sk_buff *segs = ERR_PTR(-EINVAL); struct sk_buff *segs = ERR_PTR(-EINVAL);
unsigned int mss; unsigned int mss;
int offset;
__wsum csum; __wsum csum;
struct udphdr *uh;
struct iphdr *iph;
if (skb->encapsulation && if (skb->encapsulation &&
(skb_shinfo(skb)->gso_type & (skb_shinfo(skb)->gso_type &
...@@ -138,6 +117,9 @@ static struct sk_buff *udp4_ufo_fragment(struct sk_buff *skb, ...@@ -138,6 +117,9 @@ static struct sk_buff *udp4_ufo_fragment(struct sk_buff *skb,
goto out; goto out;
} }
if (!pskb_may_pull(skb, sizeof(struct udphdr)))
goto out;
mss = skb_shinfo(skb)->gso_size; mss = skb_shinfo(skb)->gso_size;
if (unlikely(skb->len <= mss)) if (unlikely(skb->len <= mss))
goto out; goto out;
...@@ -165,10 +147,16 @@ static struct sk_buff *udp4_ufo_fragment(struct sk_buff *skb, ...@@ -165,10 +147,16 @@ static struct sk_buff *udp4_ufo_fragment(struct sk_buff *skb,
* HW cannot do checksum of UDP packets sent as multiple * HW cannot do checksum of UDP packets sent as multiple
* IP fragments. * IP fragments.
*/ */
offset = skb_checksum_start_offset(skb);
csum = skb_checksum(skb, offset, skb->len - offset, 0); uh = udp_hdr(skb);
offset += skb->csum_offset; iph = ip_hdr(skb);
*(__sum16 *)(skb->data + offset) = csum_fold(csum);
uh->check = 0;
csum = skb_checksum(skb, 0, skb->len, 0);
uh->check = udp_v4_check(skb->len, iph->saddr, iph->daddr, csum);
if (uh->check == 0)
uh->check = CSUM_MANGLED_0;
skb->ip_summed = CHECKSUM_NONE; skb->ip_summed = CHECKSUM_NONE;
/* Fragment the skb. IP headers of the fragments are updated in /* Fragment the skb. IP headers of the fragments are updated in
...@@ -353,7 +341,6 @@ static int udp4_gro_complete(struct sk_buff *skb, int nhoff) ...@@ -353,7 +341,6 @@ static int udp4_gro_complete(struct sk_buff *skb, int nhoff)
static const struct net_offload udpv4_offload = { static const struct net_offload udpv4_offload = {
.callbacks = { .callbacks = {
.gso_send_check = udp4_ufo_send_check,
.gso_segment = udp4_ufo_fragment, .gso_segment = udp4_ufo_fragment,
.gro_receive = udp4_gro_receive, .gro_receive = udp4_gro_receive,
.gro_complete = udp4_gro_complete, .gro_complete = udp4_gro_complete,
......
...@@ -53,31 +53,6 @@ static int ipv6_gso_pull_exthdrs(struct sk_buff *skb, int proto) ...@@ -53,31 +53,6 @@ static int ipv6_gso_pull_exthdrs(struct sk_buff *skb, int proto)
return proto; return proto;
} }
static int ipv6_gso_send_check(struct sk_buff *skb)
{
const struct ipv6hdr *ipv6h;
const struct net_offload *ops;
int err = -EINVAL;
if (unlikely(!pskb_may_pull(skb, sizeof(*ipv6h))))
goto out;
ipv6h = ipv6_hdr(skb);
__skb_pull(skb, sizeof(*ipv6h));
err = -EPROTONOSUPPORT;
ops = rcu_dereference(inet6_offloads[
ipv6_gso_pull_exthdrs(skb, ipv6h->nexthdr)]);
if (likely(ops && ops->callbacks.gso_send_check)) {
skb_reset_transport_header(skb);
err = ops->callbacks.gso_send_check(skb);
}
out:
return err;
}
static struct sk_buff *ipv6_gso_segment(struct sk_buff *skb, static struct sk_buff *ipv6_gso_segment(struct sk_buff *skb,
netdev_features_t features) netdev_features_t features)
{ {
...@@ -306,7 +281,6 @@ static int ipv6_gro_complete(struct sk_buff *skb, int nhoff) ...@@ -306,7 +281,6 @@ static int ipv6_gro_complete(struct sk_buff *skb, int nhoff)
static struct packet_offload ipv6_packet_offload __read_mostly = { static struct packet_offload ipv6_packet_offload __read_mostly = {
.type = cpu_to_be16(ETH_P_IPV6), .type = cpu_to_be16(ETH_P_IPV6),
.callbacks = { .callbacks = {
.gso_send_check = ipv6_gso_send_check,
.gso_segment = ipv6_gso_segment, .gso_segment = ipv6_gso_segment,
.gro_receive = ipv6_gro_receive, .gro_receive = ipv6_gro_receive,
.gro_complete = ipv6_gro_complete, .gro_complete = ipv6_gro_complete,
...@@ -315,7 +289,6 @@ static struct packet_offload ipv6_packet_offload __read_mostly = { ...@@ -315,7 +289,6 @@ static struct packet_offload ipv6_packet_offload __read_mostly = {
static const struct net_offload sit_offload = { static const struct net_offload sit_offload = {
.callbacks = { .callbacks = {
.gso_send_check = ipv6_gso_send_check,
.gso_segment = ipv6_gso_segment, .gso_segment = ipv6_gso_segment,
.gro_receive = ipv6_gro_receive, .gro_receive = ipv6_gro_receive,
.gro_complete = ipv6_gro_complete, .gro_complete = ipv6_gro_complete,
......
...@@ -15,23 +15,6 @@ ...@@ -15,23 +15,6 @@
#include <net/ip6_checksum.h> #include <net/ip6_checksum.h>
#include "ip6_offload.h" #include "ip6_offload.h"
static int tcp_v6_gso_send_check(struct sk_buff *skb)
{
const struct ipv6hdr *ipv6h;
struct tcphdr *th;
if (!pskb_may_pull(skb, sizeof(*th)))
return -EINVAL;
ipv6h = ipv6_hdr(skb);
th = tcp_hdr(skb);
th->check = 0;
skb->ip_summed = CHECKSUM_PARTIAL;
__tcp_v6_send_check(skb, &ipv6h->saddr, &ipv6h->daddr);
return 0;
}
static struct sk_buff **tcp6_gro_receive(struct sk_buff **head, static struct sk_buff **tcp6_gro_receive(struct sk_buff **head,
struct sk_buff *skb) struct sk_buff *skb)
{ {
...@@ -58,10 +41,32 @@ static int tcp6_gro_complete(struct sk_buff *skb, int thoff) ...@@ -58,10 +41,32 @@ static int tcp6_gro_complete(struct sk_buff *skb, int thoff)
return tcp_gro_complete(skb); return tcp_gro_complete(skb);
} }
struct sk_buff *tcp6_gso_segment(struct sk_buff *skb,
netdev_features_t features)
{
struct tcphdr *th;
if (!pskb_may_pull(skb, sizeof(*th)))
return ERR_PTR(-EINVAL);
if (unlikely(skb->ip_summed != CHECKSUM_PARTIAL)) {
const struct ipv6hdr *ipv6h = ipv6_hdr(skb);
struct tcphdr *th = tcp_hdr(skb);
/* Set up pseudo header, usually expect stack to have done
* this.
*/
th->check = 0;
skb->ip_summed = CHECKSUM_PARTIAL;
__tcp_v6_send_check(skb, &ipv6h->saddr, &ipv6h->daddr);
}
return tcp_gso_segment(skb, features);
}
static const struct net_offload tcpv6_offload = { static const struct net_offload tcpv6_offload = {
.callbacks = { .callbacks = {
.gso_send_check = tcp_v6_gso_send_check, .gso_segment = tcp6_gso_segment,
.gso_segment = tcp_gso_segment,
.gro_receive = tcp6_gro_receive, .gro_receive = tcp6_gro_receive,
.gro_complete = tcp6_gro_complete, .gro_complete = tcp6_gro_complete,
}, },
......
...@@ -17,28 +17,6 @@ ...@@ -17,28 +17,6 @@
#include <net/ip6_checksum.h> #include <net/ip6_checksum.h>
#include "ip6_offload.h" #include "ip6_offload.h"
static int udp6_ufo_send_check(struct sk_buff *skb)
{
const struct ipv6hdr *ipv6h;
struct udphdr *uh;
if (!pskb_may_pull(skb, sizeof(*uh)))
return -EINVAL;
if (likely(!skb->encapsulation)) {
ipv6h = ipv6_hdr(skb);
uh = udp_hdr(skb);
uh->check = ~csum_ipv6_magic(&ipv6h->saddr, &ipv6h->daddr, skb->len,
IPPROTO_UDP, 0);
skb->csum_start = skb_transport_header(skb) - skb->head;
skb->csum_offset = offsetof(struct udphdr, check);
skb->ip_summed = CHECKSUM_PARTIAL;
}
return 0;
}
static struct sk_buff *udp6_ufo_fragment(struct sk_buff *skb, static struct sk_buff *udp6_ufo_fragment(struct sk_buff *skb,
netdev_features_t features) netdev_features_t features)
{ {
...@@ -49,7 +27,6 @@ static struct sk_buff *udp6_ufo_fragment(struct sk_buff *skb, ...@@ -49,7 +27,6 @@ static struct sk_buff *udp6_ufo_fragment(struct sk_buff *skb,
u8 *packet_start, *prevhdr; u8 *packet_start, *prevhdr;
u8 nexthdr; u8 nexthdr;
u8 frag_hdr_sz = sizeof(struct frag_hdr); u8 frag_hdr_sz = sizeof(struct frag_hdr);
int offset;
__wsum csum; __wsum csum;
int tnl_hlen; int tnl_hlen;
...@@ -83,13 +60,27 @@ static struct sk_buff *udp6_ufo_fragment(struct sk_buff *skb, ...@@ -83,13 +60,27 @@ static struct sk_buff *udp6_ufo_fragment(struct sk_buff *skb,
(SKB_GSO_UDP_TUNNEL|SKB_GSO_UDP_TUNNEL_CSUM)) (SKB_GSO_UDP_TUNNEL|SKB_GSO_UDP_TUNNEL_CSUM))
segs = skb_udp_tunnel_segment(skb, features); segs = skb_udp_tunnel_segment(skb, features);
else { else {
const struct ipv6hdr *ipv6h;
struct udphdr *uh;
if (!pskb_may_pull(skb, sizeof(struct udphdr)))
goto out;
/* Do software UFO. Complete and fill in the UDP checksum as HW cannot /* Do software UFO. Complete and fill in the UDP checksum as HW cannot
* do checksum of UDP packets sent as multiple IP fragments. * do checksum of UDP packets sent as multiple IP fragments.
*/ */
offset = skb_checksum_start_offset(skb);
csum = skb_checksum(skb, offset, skb->len - offset, 0); uh = udp_hdr(skb);
offset += skb->csum_offset; ipv6h = ipv6_hdr(skb);
*(__sum16 *)(skb->data + offset) = csum_fold(csum);
uh->check = 0;
csum = skb_checksum(skb, 0, skb->len, 0);
uh->check = udp_v6_check(skb->len, &ipv6h->saddr,
&ipv6h->daddr, csum);
if (uh->check == 0)
uh->check = CSUM_MANGLED_0;
skb->ip_summed = CHECKSUM_NONE; skb->ip_summed = CHECKSUM_NONE;
/* Check if there is enough headroom to insert fragment header. */ /* Check if there is enough headroom to insert fragment header. */
...@@ -170,7 +161,6 @@ static int udp6_gro_complete(struct sk_buff *skb, int nhoff) ...@@ -170,7 +161,6 @@ static int udp6_gro_complete(struct sk_buff *skb, int nhoff)
static const struct net_offload udpv6_offload = { static const struct net_offload udpv6_offload = {
.callbacks = { .callbacks = {
.gso_send_check = udp6_ufo_send_check,
.gso_segment = udp6_ufo_fragment, .gso_segment = udp6_ufo_fragment,
.gro_receive = udp6_gro_receive, .gro_receive = udp6_gro_receive,
.gro_complete = udp6_gro_complete, .gro_complete = udp6_gro_complete,
......
...@@ -65,15 +65,9 @@ static struct sk_buff *mpls_gso_segment(struct sk_buff *skb, ...@@ -65,15 +65,9 @@ static struct sk_buff *mpls_gso_segment(struct sk_buff *skb,
return segs; return segs;
} }
static int mpls_gso_send_check(struct sk_buff *skb)
{
return 0;
}
static struct packet_offload mpls_mc_offload = { static struct packet_offload mpls_mc_offload = {
.type = cpu_to_be16(ETH_P_MPLS_MC), .type = cpu_to_be16(ETH_P_MPLS_MC),
.callbacks = { .callbacks = {
.gso_send_check = mpls_gso_send_check,
.gso_segment = mpls_gso_segment, .gso_segment = mpls_gso_segment,
}, },
}; };
...@@ -81,7 +75,6 @@ static struct packet_offload mpls_mc_offload = { ...@@ -81,7 +75,6 @@ static struct packet_offload mpls_mc_offload = {
static struct packet_offload mpls_uc_offload = { static struct packet_offload mpls_uc_offload = {
.type = cpu_to_be16(ETH_P_MPLS_UC), .type = cpu_to_be16(ETH_P_MPLS_UC),
.callbacks = { .callbacks = {
.gso_send_check = mpls_gso_send_check,
.gso_segment = mpls_gso_segment, .gso_segment = mpls_gso_segment,
}, },
}; };
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment