Commit 936d7de3 authored by Roland Dreier's avatar Roland Dreier Committed by David S. Miller

IPoIB: Stop lying about hard_header_len and use skb->cb to stash LL addresses

Commit a0417fa3 ("net: Make qdisc_skb_cb upper size bound
explicit.") made it possible for a netdev driver to use skb->cb
between its header_ops.create method and its .ndo_start_xmit
method.  Use this in ipoib_hard_header() to stash away the LL address
(GID + QPN), instead of the "ipoib_pseudoheader" hack.  This allows
IPoIB to stop lying about its hard_header_len, which will let us fix
the L2 check for GRO.
Signed-off-by: default avatarRoland Dreier <roland@purestorage.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent a1728800
...@@ -44,6 +44,7 @@ ...@@ -44,6 +44,7 @@
#include <linux/mutex.h> #include <linux/mutex.h>
#include <net/neighbour.h> #include <net/neighbour.h>
#include <net/sch_generic.h>
#include <linux/atomic.h> #include <linux/atomic.h>
...@@ -117,7 +118,8 @@ struct ipoib_header { ...@@ -117,7 +118,8 @@ struct ipoib_header {
u16 reserved; u16 reserved;
}; };
struct ipoib_pseudoheader { struct ipoib_cb {
struct qdisc_skb_cb qdisc_cb;
u8 hwaddr[INFINIBAND_ALEN]; u8 hwaddr[INFINIBAND_ALEN];
}; };
......
...@@ -653,7 +653,7 @@ static void ipoib_path_lookup(struct sk_buff *skb, struct neighbour *n, struct n ...@@ -653,7 +653,7 @@ static void ipoib_path_lookup(struct sk_buff *skb, struct neighbour *n, struct n
} }
static void unicast_arp_send(struct sk_buff *skb, struct net_device *dev, static void unicast_arp_send(struct sk_buff *skb, struct net_device *dev,
struct ipoib_pseudoheader *phdr) struct ipoib_cb *cb)
{ {
struct ipoib_dev_priv *priv = netdev_priv(dev); struct ipoib_dev_priv *priv = netdev_priv(dev);
struct ipoib_path *path; struct ipoib_path *path;
...@@ -661,17 +661,15 @@ static void unicast_arp_send(struct sk_buff *skb, struct net_device *dev, ...@@ -661,17 +661,15 @@ static void unicast_arp_send(struct sk_buff *skb, struct net_device *dev,
spin_lock_irqsave(&priv->lock, flags); spin_lock_irqsave(&priv->lock, flags);
path = __path_find(dev, phdr->hwaddr + 4); path = __path_find(dev, cb->hwaddr + 4);
if (!path || !path->valid) { if (!path || !path->valid) {
int new_path = 0; int new_path = 0;
if (!path) { if (!path) {
path = path_rec_create(dev, phdr->hwaddr + 4); path = path_rec_create(dev, cb->hwaddr + 4);
new_path = 1; new_path = 1;
} }
if (path) { if (path) {
/* put pseudoheader back on for next time */
skb_push(skb, sizeof *phdr);
__skb_queue_tail(&path->queue, skb); __skb_queue_tail(&path->queue, skb);
if (!path->query && path_rec_start(dev, path)) { if (!path->query && path_rec_start(dev, path)) {
...@@ -695,12 +693,10 @@ static void unicast_arp_send(struct sk_buff *skb, struct net_device *dev, ...@@ -695,12 +693,10 @@ static void unicast_arp_send(struct sk_buff *skb, struct net_device *dev,
be16_to_cpu(path->pathrec.dlid)); be16_to_cpu(path->pathrec.dlid));
spin_unlock_irqrestore(&priv->lock, flags); spin_unlock_irqrestore(&priv->lock, flags);
ipoib_send(dev, skb, path->ah, IPOIB_QPN(phdr->hwaddr)); ipoib_send(dev, skb, path->ah, IPOIB_QPN(cb->hwaddr));
return; return;
} else if ((path->query || !path_rec_start(dev, path)) && } else if ((path->query || !path_rec_start(dev, path)) &&
skb_queue_len(&path->queue) < IPOIB_MAX_PATH_REC_QUEUE) { skb_queue_len(&path->queue) < IPOIB_MAX_PATH_REC_QUEUE) {
/* put pseudoheader back on for next time */
skb_push(skb, sizeof *phdr);
__skb_queue_tail(&path->queue, skb); __skb_queue_tail(&path->queue, skb);
} else { } else {
++dev->stats.tx_dropped; ++dev->stats.tx_dropped;
...@@ -774,16 +770,14 @@ static int ipoib_start_xmit(struct sk_buff *skb, struct net_device *dev) ...@@ -774,16 +770,14 @@ static int ipoib_start_xmit(struct sk_buff *skb, struct net_device *dev)
dev_kfree_skb_any(skb); dev_kfree_skb_any(skb);
} }
} else { } else {
struct ipoib_pseudoheader *phdr = struct ipoib_cb *cb = (struct ipoib_cb *) skb->cb;
(struct ipoib_pseudoheader *) skb->data;
skb_pull(skb, sizeof *phdr);
if (phdr->hwaddr[4] == 0xff) { if (cb->hwaddr[4] == 0xff) {
/* Add in the P_Key for multicast*/ /* Add in the P_Key for multicast*/
phdr->hwaddr[8] = (priv->pkey >> 8) & 0xff; cb->hwaddr[8] = (priv->pkey >> 8) & 0xff;
phdr->hwaddr[9] = priv->pkey & 0xff; cb->hwaddr[9] = priv->pkey & 0xff;
ipoib_mcast_send(dev, phdr->hwaddr + 4, skb); ipoib_mcast_send(dev, cb->hwaddr + 4, skb);
} else { } else {
/* unicast GID -- should be ARP or RARP reply */ /* unicast GID -- should be ARP or RARP reply */
...@@ -792,14 +786,14 @@ static int ipoib_start_xmit(struct sk_buff *skb, struct net_device *dev) ...@@ -792,14 +786,14 @@ static int ipoib_start_xmit(struct sk_buff *skb, struct net_device *dev)
ipoib_warn(priv, "Unicast, no %s: type %04x, QPN %06x %pI6\n", ipoib_warn(priv, "Unicast, no %s: type %04x, QPN %06x %pI6\n",
skb_dst(skb) ? "neigh" : "dst", skb_dst(skb) ? "neigh" : "dst",
be16_to_cpup((__be16 *) skb->data), be16_to_cpup((__be16 *) skb->data),
IPOIB_QPN(phdr->hwaddr), IPOIB_QPN(cb->hwaddr),
phdr->hwaddr + 4); cb->hwaddr + 4);
dev_kfree_skb_any(skb); dev_kfree_skb_any(skb);
++dev->stats.tx_dropped; ++dev->stats.tx_dropped;
goto unlock; goto unlock;
} }
unicast_arp_send(skb, dev, phdr); unicast_arp_send(skb, dev, cb);
} }
} }
unlock: unlock:
...@@ -825,8 +819,6 @@ static int ipoib_hard_header(struct sk_buff *skb, ...@@ -825,8 +819,6 @@ static int ipoib_hard_header(struct sk_buff *skb,
const void *daddr, const void *saddr, unsigned len) const void *daddr, const void *saddr, unsigned len)
{ {
struct ipoib_header *header; struct ipoib_header *header;
struct dst_entry *dst;
struct neighbour *n;
header = (struct ipoib_header *) skb_push(skb, sizeof *header); header = (struct ipoib_header *) skb_push(skb, sizeof *header);
...@@ -834,18 +826,13 @@ static int ipoib_hard_header(struct sk_buff *skb, ...@@ -834,18 +826,13 @@ static int ipoib_hard_header(struct sk_buff *skb,
header->reserved = 0; header->reserved = 0;
/* /*
* If we don't have a neighbour structure, stuff the * If we don't have a dst_entry structure, stuff the
* destination address onto the front of the skb so we can * destination address into skb->cb so we can figure out where
* figure out where to send the packet later. * to send the packet later.
*/ */
dst = skb_dst(skb); if (!skb_dst(skb)) {
n = NULL; struct ipoib_cb *cb = (struct ipoib_cb *) skb->cb;
if (dst) memcpy(cb->hwaddr, daddr, INFINIBAND_ALEN);
n = dst_get_neighbour_noref_raw(dst);
if ((!dst || !n) && daddr) {
struct ipoib_pseudoheader *phdr =
(struct ipoib_pseudoheader *) skb_push(skb, sizeof *phdr);
memcpy(phdr->hwaddr, daddr, INFINIBAND_ALEN);
} }
return 0; return 0;
...@@ -1021,11 +1008,7 @@ static void ipoib_setup(struct net_device *dev) ...@@ -1021,11 +1008,7 @@ static void ipoib_setup(struct net_device *dev)
dev->flags |= IFF_BROADCAST | IFF_MULTICAST; dev->flags |= IFF_BROADCAST | IFF_MULTICAST;
/* dev->hard_header_len = IPOIB_ENCAP_LEN;
* We add in INFINIBAND_ALEN to allow for the destination
* address "pseudoheader" for skbs without neighbour struct.
*/
dev->hard_header_len = IPOIB_ENCAP_LEN + INFINIBAND_ALEN;
dev->addr_len = INFINIBAND_ALEN; dev->addr_len = INFINIBAND_ALEN;
dev->type = ARPHRD_INFINIBAND; dev->type = ARPHRD_INFINIBAND;
dev->tx_queue_len = ipoib_sendq_size * 2; dev->tx_queue_len = ipoib_sendq_size * 2;
......
...@@ -262,21 +262,13 @@ static int ipoib_mcast_join_finish(struct ipoib_mcast *mcast, ...@@ -262,21 +262,13 @@ static int ipoib_mcast_join_finish(struct ipoib_mcast *mcast,
netif_tx_lock_bh(dev); netif_tx_lock_bh(dev);
while (!skb_queue_empty(&mcast->pkt_queue)) { while (!skb_queue_empty(&mcast->pkt_queue)) {
struct sk_buff *skb = skb_dequeue(&mcast->pkt_queue); struct sk_buff *skb = skb_dequeue(&mcast->pkt_queue);
struct dst_entry *dst = skb_dst(skb);
struct neighbour *n = NULL;
netif_tx_unlock_bh(dev); netif_tx_unlock_bh(dev);
skb->dev = dev; skb->dev = dev;
if (dst)
n = dst_get_neighbour_noref_raw(dst);
if (!dst || !n) {
/* put pseudoheader back on for next time */
skb_push(skb, sizeof (struct ipoib_pseudoheader));
}
if (dev_queue_xmit(skb)) if (dev_queue_xmit(skb))
ipoib_warn(priv, "dev_queue_xmit failed to requeue packet\n"); ipoib_warn(priv, "dev_queue_xmit failed to requeue packet\n");
netif_tx_lock_bh(dev); netif_tx_lock_bh(dev);
} }
netif_tx_unlock_bh(dev); netif_tx_unlock_bh(dev);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment