Commit 53481da3 authored by David S. Miller's avatar David S. Miller

Merge branch 'ipip_gso'

Eric Dumazet says:

====================
net: Implement GSO/TSO support for IPIP

This patch serie implements GSO/TSO support for IPIP

David, please note it applies after "ipv4: gso: send_check() & segment() cleanups"
( http://patchwork.ozlabs.org/patch/284714/ )

Broadcom bnx2x driver is now enabled for TSO support of IPIP traffic

Before patch :

lpq83:~# ./netperf -H 7.7.9.84 -Cc
MIGRATED TCP STREAM TEST from 0.0.0.0 (0.0.0.0) port 0 AF_INET to 7.7.9.84 () port 0 AF_INET
Recv   Send    Send                          Utilization       Service Demand
Socket Socket  Message  Elapsed              Send     Recv     Send    Recv
Size   Size    Size     Time     Throughput  local    remote   local   remote
bytes  bytes   bytes    secs.    10^6bits/s  % S      % S      us/KB   us/KB

 87380  16384  16384    10.00      3357.88   5.09     3.70     2.983   2.167

After patch :

lpq83:~# ./netperf -H 7.7.9.84 -Cc
MIGRATED TCP STREAM TEST from 0.0.0.0 (0.0.0.0) port 0 AF_INET to 7.7.9.84 () port 0 AF_INET
Recv   Send    Send                          Utilization       Service Demand
Socket Socket  Message  Elapsed              Send     Recv     Send    Recv
Size   Size    Size     Time     Throughput  local    remote   local   remote
bytes  bytes   bytes    secs.    10^6bits/s  % S      % S      us/KB   us/KB

 87380  16384  16384    10.00      8532.40   2.55     7.73     0.588   1.781
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents b917eb15 117401ee
......@@ -12260,10 +12260,12 @@ static int bnx2x_init_dev(struct bnx2x *bp, struct pci_dev *pdev,
NETIF_F_RXCSUM | NETIF_F_LRO | NETIF_F_GRO |
NETIF_F_RXHASH | NETIF_F_HW_VLAN_CTAG_TX;
if (!CHIP_IS_E1x(bp)) {
dev->hw_features |= NETIF_F_GSO_GRE | NETIF_F_GSO_UDP_TUNNEL;
dev->hw_features |= NETIF_F_GSO_GRE | NETIF_F_GSO_UDP_TUNNEL |
NETIF_F_GSO_IPIP;
dev->hw_enc_features =
NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_SG |
NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6 |
NETIF_F_GSO_IPIP |
NETIF_F_GSO_GRE | NETIF_F_GSO_UDP_TUNNEL;
}
......
......@@ -42,6 +42,7 @@ enum {
NETIF_F_TSO6_BIT, /* ... TCPv6 segmentation */
NETIF_F_FSO_BIT, /* ... FCoE segmentation */
NETIF_F_GSO_GRE_BIT, /* ... GRE with TSO */
NETIF_F_GSO_IPIP_BIT, /* ... IPIP tunnel with TSO */
NETIF_F_GSO_UDP_TUNNEL_BIT, /* ... UDP TUNNEL with TSO */
NETIF_F_GSO_MPLS_BIT, /* ... MPLS segmentation */
/**/NETIF_F_GSO_LAST = /* last bit, see GSO_MASK */
......@@ -107,6 +108,7 @@ enum {
#define NETIF_F_RXFCS __NETIF_F(RXFCS)
#define NETIF_F_RXALL __NETIF_F(RXALL)
#define NETIF_F_GSO_GRE __NETIF_F(GSO_GRE)
#define NETIF_F_GSO_IPIP __NETIF_F(GSO_IPIP)
#define NETIF_F_GSO_UDP_TUNNEL __NETIF_F(GSO_UDP_TUNNEL)
#define NETIF_F_GSO_MPLS __NETIF_F(GSO_MPLS)
#define NETIF_F_HW_VLAN_STAG_FILTER __NETIF_F(HW_VLAN_STAG_FILTER)
......
......@@ -318,9 +318,11 @@ enum {
SKB_GSO_GRE = 1 << 6,
SKB_GSO_UDP_TUNNEL = 1 << 7,
SKB_GSO_IPIP = 1 << 7,
SKB_GSO_MPLS = 1 << 8,
SKB_GSO_UDP_TUNNEL = 1 << 8,
SKB_GSO_MPLS = 1 << 9,
};
#if BITS_PER_LONG > 32
......@@ -2722,9 +2724,12 @@ static inline struct sec_path *skb_sec_path(struct sk_buff *skb)
/* Keeps track of mac header offset relative to skb->head.
* It is useful for TSO of Tunneling protocol. e.g. GRE.
* For non-tunnel skb it points to skb_mac_header() and for
* tunnel skb it points to outer mac header. */
* tunnel skb it points to outer mac header.
* Keeps track of level of encapsulation of network headers.
*/
struct skb_gso_cb {
int mac_offset;
int mac_offset;
int encap_level;
};
#define SKB_GSO_CB(skb) ((struct skb_gso_cb *)(skb)->cb)
......
......@@ -38,7 +38,13 @@ void gre_offload_exit(void);
void gre_build_header(struct sk_buff *skb, const struct tnl_ptk_info *tpi,
int hdr_len);
struct sk_buff *gre_handle_offloads(struct sk_buff *skb, bool gre_csum);
static inline struct sk_buff *gre_handle_offloads(struct sk_buff *skb,
bool gre_csum)
{
return iptunnel_handle_offloads(skb, gre_csum, SKB_GSO_GRE);
}
static inline int ip_gre_calc_hlen(__be16 o_flags)
{
......
......@@ -150,6 +150,9 @@ int iptunnel_xmit(struct rtable *rt, struct sk_buff *skb,
__be32 src, __be32 dst, __u8 proto,
__u8 tos, __u8 ttl, __be16 df, bool xnet);
struct sk_buff *iptunnel_handle_offloads(struct sk_buff *skb, bool gre_csum,
int gso_type_mask);
static inline void iptunnel_xmit_stats(int err,
struct net_device_stats *err_stats,
struct pcpu_tstats __percpu *stats)
......
......@@ -2377,6 +2377,8 @@ struct sk_buff *__skb_gso_segment(struct sk_buff *skb,
}
SKB_GSO_CB(skb)->mac_offset = skb_headroom(skb);
SKB_GSO_CB(skb)->encap_level = 0;
skb_reset_mac_header(skb);
skb_reset_mac_len(skb);
......
......@@ -81,6 +81,7 @@ static const char netdev_features_strings[NETDEV_FEATURE_COUNT][ETH_GSTRING_LEN]
[NETIF_F_TSO6_BIT] = "tx-tcp6-segmentation",
[NETIF_F_FSO_BIT] = "tx-fcoe-segmentation",
[NETIF_F_GSO_GRE_BIT] = "tx-gre-segmentation",
[NETIF_F_GSO_IPIP_BIT] = "tx-ipip-segmentation",
[NETIF_F_GSO_UDP_TUNNEL_BIT] = "tx-udp_tnl-segmentation",
[NETIF_F_GSO_MPLS_BIT] = "tx-mpls-segmentation",
......
......@@ -903,6 +903,9 @@ EXPORT_SYMBOL(skb_clone);
static void skb_headers_offset_update(struct sk_buff *skb, int off)
{
/* Only adjust this if it actually is csum_start rather than csum */
if (skb->ip_summed == CHECKSUM_PARTIAL)
skb->csum_start += off;
/* {transport,network,mac}_header and tail are relative to skb->head */
skb->transport_header += off;
skb->network_header += off;
......@@ -1109,9 +1112,6 @@ int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail,
#endif
skb->tail += off;
skb_headers_offset_update(skb, nhead);
/* Only adjust this if it actually is csum_start rather than csum */
if (skb->ip_summed == CHECKSUM_PARTIAL)
skb->csum_start += nhead;
skb->cloned = 0;
skb->hdr_len = 0;
skb->nohdr = 0;
......@@ -1176,7 +1176,6 @@ struct sk_buff *skb_copy_expand(const struct sk_buff *skb,
NUMA_NO_NODE);
int oldheadroom = skb_headroom(skb);
int head_copy_len, head_copy_off;
int off;
if (!n)
return NULL;
......@@ -1200,11 +1199,7 @@ struct sk_buff *skb_copy_expand(const struct sk_buff *skb,
copy_skb_header(n, skb);
off = newheadroom - oldheadroom;
if (n->ip_summed == CHECKSUM_PARTIAL)
n->csum_start += off;
skb_headers_offset_update(n, off);
skb_headers_offset_update(n, newheadroom - oldheadroom);
return n;
}
......@@ -2837,14 +2832,7 @@ struct sk_buff *skb_segment(struct sk_buff *skb, netdev_features_t features)
__copy_skb_header(nskb, skb);
nskb->mac_len = skb->mac_len;
/* nskb and skb might have different headroom */
if (nskb->ip_summed == CHECKSUM_PARTIAL)
nskb->csum_start += skb_headroom(nskb) - headroom;
skb_reset_mac_header(nskb);
skb_set_network_header(nskb, skb->mac_len);
nskb->transport_header = (nskb->network_header +
skb_network_header_len(skb));
skb_headers_offset_update(nskb, skb_headroom(nskb) - headroom);
skb_copy_from_linear_data_offset(skb, -tnl_hlen,
nskb->data - tnl_hlen,
......
......@@ -1273,16 +1273,17 @@ static int inet_gso_send_check(struct sk_buff *skb)
}
static struct sk_buff *inet_gso_segment(struct sk_buff *skb,
netdev_features_t features)
netdev_features_t features)
{
struct sk_buff *segs = ERR_PTR(-EINVAL);
const struct net_offload *ops;
unsigned int offset = 0;
struct iphdr *iph;
bool tunnel;
int proto;
int nhoff;
int ihl;
int id;
unsigned int offset = 0;
bool tunnel;
if (unlikely(skb_shinfo(skb)->gso_type &
~(SKB_GSO_TCPV4 |
......@@ -1290,12 +1291,15 @@ static struct sk_buff *inet_gso_segment(struct sk_buff *skb,
SKB_GSO_DODGY |
SKB_GSO_TCP_ECN |
SKB_GSO_GRE |
SKB_GSO_IPIP |
SKB_GSO_TCPV6 |
SKB_GSO_UDP_TUNNEL |
SKB_GSO_MPLS |
0)))
goto out;
skb_reset_network_header(skb);
nhoff = skb_network_header(skb) - skb_mac_header(skb);
if (unlikely(!pskb_may_pull(skb, sizeof(*iph))))
goto out;
......@@ -1312,7 +1316,10 @@ static struct sk_buff *inet_gso_segment(struct sk_buff *skb,
goto out;
__skb_pull(skb, ihl);
tunnel = !!skb->encapsulation;
tunnel = SKB_GSO_CB(skb)->encap_level > 0;
if (tunnel)
features = skb->dev->hw_enc_features & netif_skb_features(skb);
SKB_GSO_CB(skb)->encap_level += ihl;
skb_reset_transport_header(skb);
......@@ -1327,18 +1334,23 @@ static struct sk_buff *inet_gso_segment(struct sk_buff *skb,
skb = segs;
do {
iph = ip_hdr(skb);
iph = (struct iphdr *)(skb_mac_header(skb) + nhoff);
if (!tunnel && proto == IPPROTO_UDP) {
iph->id = htons(id);
iph->frag_off = htons(offset >> 3);
if (skb->next != NULL)
iph->frag_off |= htons(IP_MF);
offset += (skb->len - skb->mac_len - iph->ihl * 4);
offset += skb->len - nhoff - ihl;
} else {
iph->id = htons(id++);
}
iph->tot_len = htons(skb->len - skb->mac_len);
iph->tot_len = htons(skb->len - nhoff);
ip_send_check(iph);
if (tunnel) {
skb_reset_inner_headers(skb);
skb->encapsulation = 1;
}
skb->network_header = (u8 *)iph - skb->head;
} while ((skb = skb->next));
out:
......@@ -1645,6 +1657,13 @@ static struct packet_offload ip_packet_offload __read_mostly = {
},
};
static const struct net_offload ipip_offload = {
.callbacks = {
.gso_send_check = inet_gso_send_check,
.gso_segment = inet_gso_segment,
},
};
static int __init ipv4_offload_init(void)
{
/*
......@@ -1656,6 +1675,7 @@ static int __init ipv4_offload_init(void)
pr_crit("%s: Cannot add TCP protocol offload\n", __func__);
dev_add_offload(&ip_packet_offload);
inet_add_offload(&ipip_offload, IPPROTO_IPIP);
return 0;
}
......
......@@ -93,35 +93,6 @@ void gre_build_header(struct sk_buff *skb, const struct tnl_ptk_info *tpi,
}
EXPORT_SYMBOL_GPL(gre_build_header);
struct sk_buff *gre_handle_offloads(struct sk_buff *skb, bool gre_csum)
{
int err;
if (likely(!skb->encapsulation)) {
skb_reset_inner_headers(skb);
skb->encapsulation = 1;
}
if (skb_is_gso(skb)) {
err = skb_unclone(skb, GFP_ATOMIC);
if (unlikely(err))
goto error;
skb_shinfo(skb)->gso_type |= SKB_GSO_GRE;
return skb;
} else if (skb->ip_summed == CHECKSUM_PARTIAL && gre_csum) {
err = skb_checksum_help(skb);
if (unlikely(err))
goto error;
} else if (skb->ip_summed != CHECKSUM_PARTIAL)
skb->ip_summed = CHECKSUM_NONE;
return skb;
error:
kfree_skb(skb);
return ERR_PTR(err);
}
EXPORT_SYMBOL_GPL(gre_handle_offloads);
static __sum16 check_checksum(struct sk_buff *skb)
{
__sum16 csum = 0;
......
......@@ -39,7 +39,8 @@ static struct sk_buff *gre_gso_segment(struct sk_buff *skb,
SKB_GSO_UDP |
SKB_GSO_DODGY |
SKB_GSO_TCP_ECN |
SKB_GSO_GRE)))
SKB_GSO_GRE |
SKB_GSO_IPIP)))
goto out;
if (unlikely(!pskb_may_pull(skb, sizeof(*greh))))
......
......@@ -116,3 +116,36 @@ int iptunnel_pull_header(struct sk_buff *skb, int hdr_len, __be16 inner_proto)
return 0;
}
EXPORT_SYMBOL_GPL(iptunnel_pull_header);
struct sk_buff *iptunnel_handle_offloads(struct sk_buff *skb,
bool csum_help,
int gso_type_mask)
{
int err;
if (likely(!skb->encapsulation)) {
skb_reset_inner_headers(skb);
skb->encapsulation = 1;
}
if (skb_is_gso(skb)) {
err = skb_unclone(skb, GFP_ATOMIC);
if (unlikely(err))
goto error;
skb_shinfo(skb)->gso_type |= gso_type_mask;
return skb;
}
if (skb->ip_summed == CHECKSUM_PARTIAL && csum_help) {
err = skb_checksum_help(skb);
if (unlikely(err))
goto error;
} else if (skb->ip_summed != CHECKSUM_PARTIAL)
skb->ip_summed = CHECKSUM_NONE;
return skb;
error:
kfree_skb(skb);
return ERR_PTR(err);
}
EXPORT_SYMBOL_GPL(iptunnel_handle_offloads);
......@@ -220,17 +220,17 @@ static netdev_tx_t ipip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
if (unlikely(skb->protocol != htons(ETH_P_IP)))
goto tx_error;
if (likely(!skb->encapsulation)) {
skb_reset_inner_headers(skb);
skb->encapsulation = 1;
}
skb = iptunnel_handle_offloads(skb, false, SKB_GSO_IPIP);
if (IS_ERR(skb))
goto out;
ip_tunnel_xmit(skb, dev, tiph, tiph->protocol);
return NETDEV_TX_OK;
tx_error:
dev->stats.tx_errors++;
dev_kfree_skb(skb);
out:
dev->stats.tx_errors++;
return NETDEV_TX_OK;
}
......@@ -275,6 +275,7 @@ static const struct net_device_ops ipip_netdev_ops = {
#define IPIP_FEATURES (NETIF_F_SG | \
NETIF_F_FRAGLIST | \
NETIF_F_HIGHDMA | \
NETIF_F_GSO_SOFTWARE | \
NETIF_F_HW_CSUM)
static void ipip_tunnel_setup(struct net_device *dev)
......
......@@ -56,6 +56,7 @@ struct sk_buff *tcp_gso_segment(struct sk_buff *skb,
SKB_GSO_TCP_ECN |
SKB_GSO_TCPV6 |
SKB_GSO_GRE |
SKB_GSO_IPIP |
SKB_GSO_MPLS |
SKB_GSO_UDP_TUNNEL |
0) ||
......
......@@ -52,6 +52,7 @@ static struct sk_buff *udp4_ufo_fragment(struct sk_buff *skb,
if (unlikely(type & ~(SKB_GSO_UDP | SKB_GSO_DODGY |
SKB_GSO_UDP_TUNNEL |
SKB_GSO_IPIP |
SKB_GSO_GRE | SKB_GSO_MPLS) ||
!(type & (SKB_GSO_UDP))))
goto out;
......
......@@ -96,6 +96,7 @@ static struct sk_buff *ipv6_gso_segment(struct sk_buff *skb,
SKB_GSO_DODGY |
SKB_GSO_TCP_ECN |
SKB_GSO_GRE |
SKB_GSO_IPIP |
SKB_GSO_UDP_TUNNEL |
SKB_GSO_MPLS |
SKB_GSO_TCPV6 |
......
......@@ -64,6 +64,7 @@ static struct sk_buff *udp6_ufo_fragment(struct sk_buff *skb,
SKB_GSO_DODGY |
SKB_GSO_UDP_TUNNEL |
SKB_GSO_GRE |
SKB_GSO_IPIP |
SKB_GSO_MPLS) ||
!(type & (SKB_GSO_UDP))))
goto out;
......
......@@ -33,6 +33,7 @@ static struct sk_buff *mpls_gso_segment(struct sk_buff *skb,
SKB_GSO_DODGY |
SKB_GSO_TCP_ECN |
SKB_GSO_GRE |
SKB_GSO_IPIP |
SKB_GSO_MPLS)))
goto out;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment