Commit fedbc8c1 authored by Paul Durrant's avatar Paul Durrant Committed by David S. Miller

xen-netback: retire guest rx side prefix GSO feature

As far as I am aware only very old Windows network frontends make use of
this style of passing GSO packets from backend to frontend. These
frontends can easily be replaced by the freely available Xen Project
Windows PV network frontend, which uses the 'default' mechanism for
passing GSO packets, which is also used by all Linux frontends.

NOTE: Removal of this feature will not cause breakage in old Windows
      frontends. They simply will no longer receive GSO packets - the
      packets instead being fragmented in the backend.
Signed-off-by: default avatarPaul Durrant <paul.durrant@citrix.com>
Reviewed-by: default avatarDavid Vrabel <david.vrabel@citrix.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 3254f836
...@@ -260,7 +260,6 @@ struct xenvif { ...@@ -260,7 +260,6 @@ struct xenvif {
/* Frontend feature information. */ /* Frontend feature information. */
int gso_mask; int gso_mask;
int gso_prefix_mask;
u8 can_sg:1; u8 can_sg:1;
u8 ip_csum:1; u8 ip_csum:1;
......
...@@ -319,9 +319,9 @@ static netdev_features_t xenvif_fix_features(struct net_device *dev, ...@@ -319,9 +319,9 @@ static netdev_features_t xenvif_fix_features(struct net_device *dev,
if (!vif->can_sg) if (!vif->can_sg)
features &= ~NETIF_F_SG; features &= ~NETIF_F_SG;
if (~(vif->gso_mask | vif->gso_prefix_mask) & GSO_BIT(TCPV4)) if (~(vif->gso_mask) & GSO_BIT(TCPV4))
features &= ~NETIF_F_TSO; features &= ~NETIF_F_TSO;
if (~(vif->gso_mask | vif->gso_prefix_mask) & GSO_BIT(TCPV6)) if (~(vif->gso_mask) & GSO_BIT(TCPV6))
features &= ~NETIF_F_TSO6; features &= ~NETIF_F_TSO6;
if (!vif->ip_csum) if (!vif->ip_csum)
features &= ~NETIF_F_IP_CSUM; features &= ~NETIF_F_IP_CSUM;
......
...@@ -347,16 +347,6 @@ static int xenvif_gop_skb(struct sk_buff *skb, ...@@ -347,16 +347,6 @@ static int xenvif_gop_skb(struct sk_buff *skb,
gso_type = XEN_NETIF_GSO_TYPE_TCPV6; gso_type = XEN_NETIF_GSO_TYPE_TCPV6;
} }
/* Set up a GSO prefix descriptor, if necessary */
if ((1 << gso_type) & vif->gso_prefix_mask) {
RING_COPY_REQUEST(&queue->rx, queue->rx.req_cons++, &req);
meta = npo->meta + npo->meta_prod++;
meta->gso_type = gso_type;
meta->gso_size = skb_shinfo(skb)->gso_size;
meta->size = 0;
meta->id = req.id;
}
RING_COPY_REQUEST(&queue->rx, queue->rx.req_cons++, &req); RING_COPY_REQUEST(&queue->rx, queue->rx.req_cons++, &req);
meta = npo->meta + npo->meta_prod++; meta = npo->meta + npo->meta_prod++;
...@@ -511,22 +501,6 @@ static void xenvif_rx_action(struct xenvif_queue *queue) ...@@ -511,22 +501,6 @@ static void xenvif_rx_action(struct xenvif_queue *queue)
while ((skb = __skb_dequeue(&rxq)) != NULL) { while ((skb = __skb_dequeue(&rxq)) != NULL) {
struct xen_netif_extra_info *extra = NULL; struct xen_netif_extra_info *extra = NULL;
if ((1 << queue->meta[npo.meta_cons].gso_type) &
vif->gso_prefix_mask) {
resp = RING_GET_RESPONSE(&queue->rx,
queue->rx.rsp_prod_pvt++);
resp->flags = XEN_NETRXF_gso_prefix |
XEN_NETRXF_more_data;
resp->offset = queue->meta[npo.meta_cons].gso_size;
resp->id = queue->meta[npo.meta_cons].id;
resp->status = XENVIF_RX_CB(skb)->meta_slots_used;
npo.meta_cons++;
XENVIF_RX_CB(skb)->meta_slots_used--;
}
queue->stats.tx_bytes += skb->len; queue->stats.tx_bytes += skb->len;
queue->stats.tx_packets++; queue->stats.tx_packets++;
......
...@@ -1135,7 +1135,6 @@ static int read_xenbus_vif_flags(struct backend_info *be) ...@@ -1135,7 +1135,6 @@ static int read_xenbus_vif_flags(struct backend_info *be)
vif->can_sg = !!val; vif->can_sg = !!val;
vif->gso_mask = 0; vif->gso_mask = 0;
vif->gso_prefix_mask = 0;
if (xenbus_scanf(XBT_NIL, dev->otherend, "feature-gso-tcpv4", if (xenbus_scanf(XBT_NIL, dev->otherend, "feature-gso-tcpv4",
"%d", &val) < 0) "%d", &val) < 0)
...@@ -1143,32 +1142,12 @@ static int read_xenbus_vif_flags(struct backend_info *be) ...@@ -1143,32 +1142,12 @@ static int read_xenbus_vif_flags(struct backend_info *be)
if (val) if (val)
vif->gso_mask |= GSO_BIT(TCPV4); vif->gso_mask |= GSO_BIT(TCPV4);
if (xenbus_scanf(XBT_NIL, dev->otherend, "feature-gso-tcpv4-prefix",
"%d", &val) < 0)
val = 0;
if (val)
vif->gso_prefix_mask |= GSO_BIT(TCPV4);
if (xenbus_scanf(XBT_NIL, dev->otherend, "feature-gso-tcpv6", if (xenbus_scanf(XBT_NIL, dev->otherend, "feature-gso-tcpv6",
"%d", &val) < 0) "%d", &val) < 0)
val = 0; val = 0;
if (val) if (val)
vif->gso_mask |= GSO_BIT(TCPV6); vif->gso_mask |= GSO_BIT(TCPV6);
if (xenbus_scanf(XBT_NIL, dev->otherend, "feature-gso-tcpv6-prefix",
"%d", &val) < 0)
val = 0;
if (val)
vif->gso_prefix_mask |= GSO_BIT(TCPV6);
if (vif->gso_mask & vif->gso_prefix_mask) {
xenbus_dev_fatal(dev, err,
"%s: gso and gso prefix flags are not "
"mutually exclusive",
dev->otherend);
return -EOPNOTSUPP;
}
if (xenbus_scanf(XBT_NIL, dev->otherend, "feature-no-csum-offload", if (xenbus_scanf(XBT_NIL, dev->otherend, "feature-no-csum-offload",
"%d", &val) < 0) "%d", &val) < 0)
val = 0; val = 0;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment