Commit eb76a9f1 authored by David S. Miller's avatar David S. Miller

Merge branch 'xen-netback-rx-refactor'

Paul Durrant says:

====================
xen-netback: guest rx side refactor

This series refactors the guest rx side of xen-netback:

- The code is moved into its own source module.

- The prefix variant of GSO handling is retired (since it is no longer
  in common use, and alternatives exist).

- The code is then simplified and modifications made to improve
  performance.

v2:
- Rebased onto refreshed net-next
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 00c06ed7 2167ca02
obj-$(CONFIG_XEN_NETDEV_BACKEND) := xen-netback.o obj-$(CONFIG_XEN_NETDEV_BACKEND) := xen-netback.o
xen-netback-y := netback.o xenbus.o interface.o hash.o xen-netback-y := netback.o xenbus.o interface.o hash.o rx.o
...@@ -91,13 +91,6 @@ struct xenvif_rx_meta { ...@@ -91,13 +91,6 @@ struct xenvif_rx_meta {
*/ */
#define MAX_XEN_SKB_FRAGS (65536 / XEN_PAGE_SIZE + 1) #define MAX_XEN_SKB_FRAGS (65536 / XEN_PAGE_SIZE + 1)
/* It's possible for an skb to have a maximal number of frags
* but still be less than MAX_BUFFER_OFFSET in size. Thus the
* worst-case number of copy operations is MAX_XEN_SKB_FRAGS per
* ring slot.
*/
#define MAX_GRANT_COPY_OPS (MAX_XEN_SKB_FRAGS * XEN_NETIF_RX_RING_SIZE)
#define NETBACK_INVALID_HANDLE -1 #define NETBACK_INVALID_HANDLE -1
/* To avoid confusion, we define XEN_NETBK_LEGACY_SLOTS_MAX indicating /* To avoid confusion, we define XEN_NETBK_LEGACY_SLOTS_MAX indicating
...@@ -133,6 +126,15 @@ struct xenvif_stats { ...@@ -133,6 +126,15 @@ struct xenvif_stats {
unsigned long tx_frag_overflow; unsigned long tx_frag_overflow;
}; };
#define COPY_BATCH_SIZE 64
struct xenvif_copy_state {
struct gnttab_copy op[COPY_BATCH_SIZE];
RING_IDX idx[COPY_BATCH_SIZE];
unsigned int num;
struct sk_buff_head *completed;
};
struct xenvif_queue { /* Per-queue data for xenvif */ struct xenvif_queue { /* Per-queue data for xenvif */
unsigned int id; /* Queue ID, 0-based */ unsigned int id; /* Queue ID, 0-based */
char name[QUEUE_NAME_SIZE]; /* DEVNAME-qN */ char name[QUEUE_NAME_SIZE]; /* DEVNAME-qN */
...@@ -189,12 +191,7 @@ struct xenvif_queue { /* Per-queue data for xenvif */ ...@@ -189,12 +191,7 @@ struct xenvif_queue { /* Per-queue data for xenvif */
unsigned long last_rx_time; unsigned long last_rx_time;
bool stalled; bool stalled;
struct gnttab_copy grant_copy_op[MAX_GRANT_COPY_OPS]; struct xenvif_copy_state rx_copy;
/* We create one meta structure per ring request we consume, so
* the maximum number is the same as the ring size.
*/
struct xenvif_rx_meta meta[XEN_NETIF_RX_RING_SIZE];
/* Transmit shaping: allow 'credit_bytes' every 'credit_usec'. */ /* Transmit shaping: allow 'credit_bytes' every 'credit_usec'. */
unsigned long credit_bytes; unsigned long credit_bytes;
...@@ -260,7 +257,6 @@ struct xenvif { ...@@ -260,7 +257,6 @@ struct xenvif {
/* Frontend feature information. */ /* Frontend feature information. */
int gso_mask; int gso_mask;
int gso_prefix_mask;
u8 can_sg:1; u8 can_sg:1;
u8 ip_csum:1; u8 ip_csum:1;
...@@ -359,6 +355,7 @@ int xenvif_dealloc_kthread(void *data); ...@@ -359,6 +355,7 @@ int xenvif_dealloc_kthread(void *data);
irqreturn_t xenvif_ctrl_irq_fn(int irq, void *data); irqreturn_t xenvif_ctrl_irq_fn(int irq, void *data);
void xenvif_rx_action(struct xenvif_queue *queue);
void xenvif_rx_queue_tail(struct xenvif_queue *queue, struct sk_buff *skb); void xenvif_rx_queue_tail(struct xenvif_queue *queue, struct sk_buff *skb);
void xenvif_carrier_on(struct xenvif *vif); void xenvif_carrier_on(struct xenvif *vif);
......
...@@ -319,9 +319,9 @@ static netdev_features_t xenvif_fix_features(struct net_device *dev, ...@@ -319,9 +319,9 @@ static netdev_features_t xenvif_fix_features(struct net_device *dev,
if (!vif->can_sg) if (!vif->can_sg)
features &= ~NETIF_F_SG; features &= ~NETIF_F_SG;
if (~(vif->gso_mask | vif->gso_prefix_mask) & GSO_BIT(TCPV4)) if (~(vif->gso_mask) & GSO_BIT(TCPV4))
features &= ~NETIF_F_TSO; features &= ~NETIF_F_TSO;
if (~(vif->gso_mask | vif->gso_prefix_mask) & GSO_BIT(TCPV6)) if (~(vif->gso_mask) & GSO_BIT(TCPV6))
features &= ~NETIF_F_TSO6; features &= ~NETIF_F_TSO6;
if (!vif->ip_csum) if (!vif->ip_csum)
features &= ~NETIF_F_IP_CSUM; features &= ~NETIF_F_IP_CSUM;
...@@ -467,7 +467,7 @@ struct xenvif *xenvif_alloc(struct device *parent, domid_t domid, ...@@ -467,7 +467,7 @@ struct xenvif *xenvif_alloc(struct device *parent, domid_t domid,
dev->netdev_ops = &xenvif_netdev_ops; dev->netdev_ops = &xenvif_netdev_ops;
dev->hw_features = NETIF_F_SG | dev->hw_features = NETIF_F_SG |
NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
NETIF_F_TSO | NETIF_F_TSO6; NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_FRAGLIST;
dev->features = dev->hw_features | NETIF_F_RXCSUM; dev->features = dev->hw_features | NETIF_F_RXCSUM;
dev->ethtool_ops = &xenvif_ethtool_ops; dev->ethtool_ops = &xenvif_ethtool_ops;
......
This diff is collapsed.
This diff is collapsed.
...@@ -1135,7 +1135,6 @@ static int read_xenbus_vif_flags(struct backend_info *be) ...@@ -1135,7 +1135,6 @@ static int read_xenbus_vif_flags(struct backend_info *be)
vif->can_sg = !!val; vif->can_sg = !!val;
vif->gso_mask = 0; vif->gso_mask = 0;
vif->gso_prefix_mask = 0;
if (xenbus_scanf(XBT_NIL, dev->otherend, "feature-gso-tcpv4", if (xenbus_scanf(XBT_NIL, dev->otherend, "feature-gso-tcpv4",
"%d", &val) < 0) "%d", &val) < 0)
...@@ -1143,32 +1142,12 @@ static int read_xenbus_vif_flags(struct backend_info *be) ...@@ -1143,32 +1142,12 @@ static int read_xenbus_vif_flags(struct backend_info *be)
if (val) if (val)
vif->gso_mask |= GSO_BIT(TCPV4); vif->gso_mask |= GSO_BIT(TCPV4);
if (xenbus_scanf(XBT_NIL, dev->otherend, "feature-gso-tcpv4-prefix",
"%d", &val) < 0)
val = 0;
if (val)
vif->gso_prefix_mask |= GSO_BIT(TCPV4);
if (xenbus_scanf(XBT_NIL, dev->otherend, "feature-gso-tcpv6", if (xenbus_scanf(XBT_NIL, dev->otherend, "feature-gso-tcpv6",
"%d", &val) < 0) "%d", &val) < 0)
val = 0; val = 0;
if (val) if (val)
vif->gso_mask |= GSO_BIT(TCPV6); vif->gso_mask |= GSO_BIT(TCPV6);
if (xenbus_scanf(XBT_NIL, dev->otherend, "feature-gso-tcpv6-prefix",
"%d", &val) < 0)
val = 0;
if (val)
vif->gso_prefix_mask |= GSO_BIT(TCPV6);
if (vif->gso_mask & vif->gso_prefix_mask) {
xenbus_dev_fatal(dev, err,
"%s: gso and gso prefix flags are not "
"mutually exclusive",
dev->otherend);
return -EOPNOTSUPP;
}
if (xenbus_scanf(XBT_NIL, dev->otherend, "feature-no-csum-offload", if (xenbus_scanf(XBT_NIL, dev->otherend, "feature-no-csum-offload",
"%d", &val) < 0) "%d", &val) < 0)
val = 0; val = 0;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment