Commit f83ef8c0 authored by Herbert Xu's avatar Herbert Xu Committed by David S. Miller

[IPV6]: Added GSO support for TCPv6

This patch adds GSO support for IPv6 and TCPv6.  This is based on a patch
by Ananda Raju <Ananda.Raju@neterion.com>.  His original description is:

	This patch enables TSO over IPv6. Currently Linux network stacks
	restricts TSO over IPv6 by clearing of the NETIF_F_TSO bit from
	"dev->features". This patch will remove this restriction.

	This patch will introduce a new flag NETIF_F_TSO6 which will be used
	to check whether device supports TSO over IPv6. If device support TSO
	over IPv6 then we don't clear of NETIF_F_TSO and which will make the
	TCP layer to create TSO packets. Any device supporting TSO over IPv6
	will set NETIF_F_TSO6 flag in "dev->features" along with NETIF_F_TSO.

	In case when user disables TSO using ethtool, NETIF_F_TSO will get
	cleared from "dev->features". So even if we have NETIF_F_TSO6 we don't
	get TSO packets created by TCP layer.

	SKB_GSO_TCPV4 renamed to SKB_GSO_TCP to make it generic GSO packet.
	SKB_GSO_UDPV4 renamed to SKB_GSO_UDP as UFO is not a IPv4 feature.
	UFO is supported over IPv6 also

	The following table shows there is significant improvement in
	throughput with normal frames and CPU usage for both normal and jumbo.

	--------------------------------------------------
	|          |     1500        |      9600         |
	|          ------------------|-------------------|
	|          | thru     CPU    |  thru     CPU     |
	--------------------------------------------------
	| TSO OFF  | 2.00   5.5% id  |  5.66   20.0% id  |
	--------------------------------------------------
	| TSO ON   | 2.63   78.0 id  |  5.67   39.0% id  |
	--------------------------------------------------
Signed-off-by: default avatarHerbert Xu <herbert@gondor.apana.org.au>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent bcd76111
...@@ -3960,7 +3960,7 @@ static int s2io_xmit(struct sk_buff *skb, struct net_device *dev) ...@@ -3960,7 +3960,7 @@ static int s2io_xmit(struct sk_buff *skb, struct net_device *dev)
txdp->Control_2 = 0; txdp->Control_2 = 0;
#ifdef NETIF_F_TSO #ifdef NETIF_F_TSO
mss = skb_shinfo(skb)->gso_size; mss = skb_shinfo(skb)->gso_size;
if (skb_shinfo(skb)->gso_type == SKB_GSO_TCPV4) { if (skb_shinfo(skb)->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)) {
txdp->Control_1 |= TXD_TCP_LSO_EN; txdp->Control_1 |= TXD_TCP_LSO_EN;
txdp->Control_1 |= TXD_TCP_LSO_MSS(mss); txdp->Control_1 |= TXD_TCP_LSO_MSS(mss);
} }
...@@ -3980,7 +3980,7 @@ static int s2io_xmit(struct sk_buff *skb, struct net_device *dev) ...@@ -3980,7 +3980,7 @@ static int s2io_xmit(struct sk_buff *skb, struct net_device *dev)
} }
frg_len = skb->len - skb->data_len; frg_len = skb->len - skb->data_len;
if (skb_shinfo(skb)->gso_type == SKB_GSO_UDPV4) { if (skb_shinfo(skb)->gso_type == SKB_GSO_UDP) {
int ufo_size; int ufo_size;
ufo_size = skb_shinfo(skb)->gso_size; ufo_size = skb_shinfo(skb)->gso_size;
...@@ -4009,7 +4009,7 @@ static int s2io_xmit(struct sk_buff *skb, struct net_device *dev) ...@@ -4009,7 +4009,7 @@ static int s2io_xmit(struct sk_buff *skb, struct net_device *dev)
txdp->Host_Control = (unsigned long) skb; txdp->Host_Control = (unsigned long) skb;
txdp->Control_1 |= TXD_BUFFER0_SIZE(frg_len); txdp->Control_1 |= TXD_BUFFER0_SIZE(frg_len);
if (skb_shinfo(skb)->gso_type == SKB_GSO_UDPV4) if (skb_shinfo(skb)->gso_type == SKB_GSO_UDP)
txdp->Control_1 |= TXD_UFO_EN; txdp->Control_1 |= TXD_UFO_EN;
frg_cnt = skb_shinfo(skb)->nr_frags; frg_cnt = skb_shinfo(skb)->nr_frags;
...@@ -4024,12 +4024,12 @@ static int s2io_xmit(struct sk_buff *skb, struct net_device *dev) ...@@ -4024,12 +4024,12 @@ static int s2io_xmit(struct sk_buff *skb, struct net_device *dev)
(sp->pdev, frag->page, frag->page_offset, (sp->pdev, frag->page, frag->page_offset,
frag->size, PCI_DMA_TODEVICE); frag->size, PCI_DMA_TODEVICE);
txdp->Control_1 = TXD_BUFFER0_SIZE(frag->size); txdp->Control_1 = TXD_BUFFER0_SIZE(frag->size);
if (skb_shinfo(skb)->gso_type == SKB_GSO_UDPV4) if (skb_shinfo(skb)->gso_type == SKB_GSO_UDP)
txdp->Control_1 |= TXD_UFO_EN; txdp->Control_1 |= TXD_UFO_EN;
} }
txdp->Control_1 |= TXD_GATHER_CODE_LAST; txdp->Control_1 |= TXD_GATHER_CODE_LAST;
if (skb_shinfo(skb)->gso_type == SKB_GSO_UDPV4) if (skb_shinfo(skb)->gso_type == SKB_GSO_UDP)
frg_cnt++; /* as Txd0 was used for inband header */ frg_cnt++; /* as Txd0 was used for inband header */
tx_fifo = mac_control->tx_FIFO_start[queue]; tx_fifo = mac_control->tx_FIFO_start[queue];
...@@ -4043,7 +4043,7 @@ static int s2io_xmit(struct sk_buff *skb, struct net_device *dev) ...@@ -4043,7 +4043,7 @@ static int s2io_xmit(struct sk_buff *skb, struct net_device *dev)
if (mss) if (mss)
val64 |= TX_FIFO_SPECIAL_FUNC; val64 |= TX_FIFO_SPECIAL_FUNC;
#endif #endif
if (skb_shinfo(skb)->gso_type == SKB_GSO_UDPV4) if (skb_shinfo(skb)->gso_type == SKB_GSO_UDP)
val64 |= TX_FIFO_SPECIAL_FUNC; val64 |= TX_FIFO_SPECIAL_FUNC;
writeq(val64, &tx_fifo->List_Control); writeq(val64, &tx_fifo->List_Control);
...@@ -7020,6 +7020,9 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre) ...@@ -7020,6 +7020,9 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
dev->features |= NETIF_F_HIGHDMA; dev->features |= NETIF_F_HIGHDMA;
#ifdef NETIF_F_TSO #ifdef NETIF_F_TSO
dev->features |= NETIF_F_TSO; dev->features |= NETIF_F_TSO;
#endif
#ifdef NETIF_F_TSO6
dev->features |= NETIF_F_TSO6;
#endif #endif
if (sp->device_type & XFRAME_II_DEVICE) { if (sp->device_type & XFRAME_II_DEVICE) {
dev->features |= NETIF_F_UFO; dev->features |= NETIF_F_UFO;
......
...@@ -315,9 +315,10 @@ struct net_device ...@@ -315,9 +315,10 @@ struct net_device
#define NETIF_F_GSO_SHIFT 16 #define NETIF_F_GSO_SHIFT 16
#define NETIF_F_GSO_MASK 0xffff0000 #define NETIF_F_GSO_MASK 0xffff0000
#define NETIF_F_TSO (SKB_GSO_TCPV4 << NETIF_F_GSO_SHIFT) #define NETIF_F_TSO (SKB_GSO_TCPV4 << NETIF_F_GSO_SHIFT)
#define NETIF_F_UFO (SKB_GSO_UDPV4 << NETIF_F_GSO_SHIFT) #define NETIF_F_UFO (SKB_GSO_UDP << NETIF_F_GSO_SHIFT)
#define NETIF_F_GSO_ROBUST (SKB_GSO_DODGY << NETIF_F_GSO_SHIFT) #define NETIF_F_GSO_ROBUST (SKB_GSO_DODGY << NETIF_F_GSO_SHIFT)
#define NETIF_F_TSO_ECN (SKB_GSO_TCPV4_ECN << NETIF_F_GSO_SHIFT) #define NETIF_F_TSO_ECN (SKB_GSO_TCP_ECN << NETIF_F_GSO_SHIFT)
#define NETIF_F_TSO6 (SKB_GSO_TCPV6 << NETIF_F_GSO_SHIFT)
#define NETIF_F_GEN_CSUM (NETIF_F_NO_CSUM | NETIF_F_HW_CSUM) #define NETIF_F_GEN_CSUM (NETIF_F_NO_CSUM | NETIF_F_HW_CSUM)
#define NETIF_F_ALL_CSUM (NETIF_F_IP_CSUM | NETIF_F_GEN_CSUM) #define NETIF_F_ALL_CSUM (NETIF_F_IP_CSUM | NETIF_F_GEN_CSUM)
......
...@@ -171,13 +171,15 @@ enum { ...@@ -171,13 +171,15 @@ enum {
enum { enum {
SKB_GSO_TCPV4 = 1 << 0, SKB_GSO_TCPV4 = 1 << 0,
SKB_GSO_UDPV4 = 1 << 1, SKB_GSO_UDP = 1 << 1,
/* This indicates the skb is from an untrusted source. */ /* This indicates the skb is from an untrusted source. */
SKB_GSO_DODGY = 1 << 2, SKB_GSO_DODGY = 1 << 2,
/* This indicates the tcp segment has CWR set. */ /* This indicates the tcp segment has CWR set. */
SKB_GSO_TCPV4_ECN = 1 << 3, SKB_GSO_TCP_ECN = 1 << 3,
SKB_GSO_TCPV6 = 1 << 4,
}; };
/** /**
......
...@@ -146,7 +146,7 @@ static inline void ip6_dst_store(struct sock *sk, struct dst_entry *dst, ...@@ -146,7 +146,7 @@ static inline void ip6_dst_store(struct sock *sk, struct dst_entry *dst,
struct rt6_info *rt = (struct rt6_info *) dst; struct rt6_info *rt = (struct rt6_info *) dst;
write_lock(&sk->sk_dst_lock); write_lock(&sk->sk_dst_lock);
__sk_dst_set(sk, dst); sk_setup_caps(sk, dst);
np->daddr_cache = daddr; np->daddr_cache = daddr;
np->dst_cookie = rt->rt6i_node ? rt->rt6i_node->fn_sernum : 0; np->dst_cookie = rt->rt6i_node ? rt->rt6i_node->fn_sernum : 0;
write_unlock(&sk->sk_dst_lock); write_unlock(&sk->sk_dst_lock);
......
...@@ -55,9 +55,7 @@ static inline void TCP_ECN_send(struct sock *sk, struct tcp_sock *tp, ...@@ -55,9 +55,7 @@ static inline void TCP_ECN_send(struct sock *sk, struct tcp_sock *tp,
if (tp->ecn_flags&TCP_ECN_QUEUE_CWR) { if (tp->ecn_flags&TCP_ECN_QUEUE_CWR) {
tp->ecn_flags &= ~TCP_ECN_QUEUE_CWR; tp->ecn_flags &= ~TCP_ECN_QUEUE_CWR;
skb->h.th->cwr = 1; skb->h.th->cwr = 1;
if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4) skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_ECN;
skb_shinfo(skb)->gso_type |=
SKB_GSO_TCPV4_ECN;
} }
} else { } else {
/* ACK or retransmitted segment: clear ECT|CE */ /* ACK or retransmitted segment: clear ECT|CE */
......
...@@ -744,7 +744,7 @@ static inline int ip_ufo_append_data(struct sock *sk, ...@@ -744,7 +744,7 @@ static inline int ip_ufo_append_data(struct sock *sk,
if (!err) { if (!err) {
/* specify the length of each IP datagram fragment*/ /* specify the length of each IP datagram fragment*/
skb_shinfo(skb)->gso_size = mtu - fragheaderlen; skb_shinfo(skb)->gso_size = mtu - fragheaderlen;
skb_shinfo(skb)->gso_type = SKB_GSO_UDPV4; skb_shinfo(skb)->gso_type = SKB_GSO_UDP;
__skb_queue_tail(&sk->sk_write_queue, skb); __skb_queue_tail(&sk->sk_write_queue, skb);
return 0; return 0;
...@@ -1089,7 +1089,7 @@ ssize_t ip_append_page(struct sock *sk, struct page *page, ...@@ -1089,7 +1089,7 @@ ssize_t ip_append_page(struct sock *sk, struct page *page,
if ((sk->sk_protocol == IPPROTO_UDP) && if ((sk->sk_protocol == IPPROTO_UDP) &&
(rt->u.dst.dev->features & NETIF_F_UFO)) { (rt->u.dst.dev->features & NETIF_F_UFO)) {
skb_shinfo(skb)->gso_size = mtu - fragheaderlen; skb_shinfo(skb)->gso_size = mtu - fragheaderlen;
skb_shinfo(skb)->gso_type = SKB_GSO_UDPV4; skb_shinfo(skb)->gso_type = SKB_GSO_UDP;
} }
......
...@@ -660,8 +660,6 @@ int inet6_sk_rebuild_header(struct sock *sk) ...@@ -660,8 +660,6 @@ int inet6_sk_rebuild_header(struct sock *sk)
} }
ip6_dst_store(sk, dst, NULL); ip6_dst_store(sk, dst, NULL);
sk->sk_route_caps = dst->dev->features &
~(NETIF_F_IP_CSUM | NETIF_F_TSO);
} }
return 0; return 0;
......
...@@ -187,8 +187,6 @@ int inet6_csk_xmit(struct sk_buff *skb, int ipfragok) ...@@ -187,8 +187,6 @@ int inet6_csk_xmit(struct sk_buff *skb, int ipfragok)
} }
ip6_dst_store(sk, dst, NULL); ip6_dst_store(sk, dst, NULL);
sk->sk_route_caps = dst->dev->features &
~(NETIF_F_IP_CSUM | NETIF_F_TSO);
} }
skb->dst = dst_clone(dst); skb->dst = dst_clone(dst);
......
...@@ -230,7 +230,7 @@ int ip6_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl, ...@@ -230,7 +230,7 @@ int ip6_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl,
skb->priority = sk->sk_priority; skb->priority = sk->sk_priority;
mtu = dst_mtu(dst); mtu = dst_mtu(dst);
if ((skb->len <= mtu) || ipfragok) { if ((skb->len <= mtu) || ipfragok || skb_shinfo(skb)->gso_size) {
IP6_INC_STATS(IPSTATS_MIB_OUTREQUESTS); IP6_INC_STATS(IPSTATS_MIB_OUTREQUESTS);
return NF_HOOK(PF_INET6, NF_IP6_LOCAL_OUT, skb, NULL, dst->dev, return NF_HOOK(PF_INET6, NF_IP6_LOCAL_OUT, skb, NULL, dst->dev,
dst_output); dst_output);
...@@ -835,7 +835,7 @@ static inline int ip6_ufo_append_data(struct sock *sk, ...@@ -835,7 +835,7 @@ static inline int ip6_ufo_append_data(struct sock *sk,
/* specify the length of each IP datagram fragment*/ /* specify the length of each IP datagram fragment*/
skb_shinfo(skb)->gso_size = mtu - fragheaderlen - skb_shinfo(skb)->gso_size = mtu - fragheaderlen -
sizeof(struct frag_hdr); sizeof(struct frag_hdr);
skb_shinfo(skb)->gso_type = SKB_GSO_UDPV4; skb_shinfo(skb)->gso_type = SKB_GSO_UDP;
ipv6_select_ident(skb, &fhdr); ipv6_select_ident(skb, &fhdr);
skb_shinfo(skb)->ip6_frag_id = fhdr.identification; skb_shinfo(skb)->ip6_frag_id = fhdr.identification;
__skb_queue_tail(&sk->sk_write_queue, skb); __skb_queue_tail(&sk->sk_write_queue, skb);
......
...@@ -270,9 +270,8 @@ static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr, ...@@ -270,9 +270,8 @@ static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
ipv6_addr_copy(&np->saddr, saddr); ipv6_addr_copy(&np->saddr, saddr);
inet->rcv_saddr = LOOPBACK4_IPV6; inet->rcv_saddr = LOOPBACK4_IPV6;
sk->sk_gso_type = SKB_GSO_TCPV6;
ip6_dst_store(sk, dst, NULL); ip6_dst_store(sk, dst, NULL);
sk->sk_route_caps = dst->dev->features &
~(NETIF_F_IP_CSUM | NETIF_F_TSO);
icsk->icsk_ext_hdr_len = 0; icsk->icsk_ext_hdr_len = 0;
if (np->opt) if (np->opt)
...@@ -930,9 +929,8 @@ static struct sock * tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb, ...@@ -930,9 +929,8 @@ static struct sock * tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
* comment in that function for the gory details. -acme * comment in that function for the gory details. -acme
*/ */
sk->sk_gso_type = SKB_GSO_TCPV6;
ip6_dst_store(newsk, dst, NULL); ip6_dst_store(newsk, dst, NULL);
newsk->sk_route_caps = dst->dev->features &
~(NETIF_F_IP_CSUM | NETIF_F_TSO);
newtcp6sk = (struct tcp6_sock *)newsk; newtcp6sk = (struct tcp6_sock *)newsk;
inet_sk(newsk)->pinet6 = &newtcp6sk->inet6; inet_sk(newsk)->pinet6 = &newtcp6sk->inet6;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment