Commit e1f00a69 authored by Wei Liu's avatar Wei Liu Committed by David S. Miller

xen-netback: split event channels support for Xen backend driver

Netback and netfront only use one event channel to do TX / RX notification,
which may cause unnecessary wake-up of processing routines. This patch adds a
new feature called feature-split-event-channels to netback, enabling it to
handle TX and RX events separately.

Netback will use tx_irq to notify guest for TX completion, rx_irq for RX
notification.

If frontend doesn't support this feature, tx_irq equals to rx_irq.
Signed-off-by: default avatarWei Liu <wei.liu2@citrix.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 88924753
...@@ -57,8 +57,12 @@ struct xenvif { ...@@ -57,8 +57,12 @@ struct xenvif {
u8 fe_dev_addr[6]; u8 fe_dev_addr[6];
/* Physical parameters of the comms window. */ /* When feature-split-event-channels = 0, tx_irq = rx_irq. */
unsigned int irq; unsigned int tx_irq;
unsigned int rx_irq;
/* Only used when feature-split-event-channels = 1 */
char tx_irq_name[IFNAMSIZ+4]; /* DEVNAME-tx */
char rx_irq_name[IFNAMSIZ+4]; /* DEVNAME-rx */
/* List of frontends to notify after a batch of frames sent. */ /* List of frontends to notify after a batch of frames sent. */
struct list_head notify_list; struct list_head notify_list;
...@@ -113,7 +117,8 @@ struct xenvif *xenvif_alloc(struct device *parent, ...@@ -113,7 +117,8 @@ struct xenvif *xenvif_alloc(struct device *parent,
unsigned int handle); unsigned int handle);
int xenvif_connect(struct xenvif *vif, unsigned long tx_ring_ref, int xenvif_connect(struct xenvif *vif, unsigned long tx_ring_ref,
unsigned long rx_ring_ref, unsigned int evtchn); unsigned long rx_ring_ref, unsigned int tx_evtchn,
unsigned int rx_evtchn);
void xenvif_disconnect(struct xenvif *vif); void xenvif_disconnect(struct xenvif *vif);
void xenvif_get(struct xenvif *vif); void xenvif_get(struct xenvif *vif);
...@@ -158,4 +163,6 @@ void xenvif_carrier_off(struct xenvif *vif); ...@@ -158,4 +163,6 @@ void xenvif_carrier_off(struct xenvif *vif);
/* Returns number of ring slots required to send an skb to the frontend */ /* Returns number of ring slots required to send an skb to the frontend */
unsigned int xen_netbk_count_skb_slots(struct xenvif *vif, struct sk_buff *skb); unsigned int xen_netbk_count_skb_slots(struct xenvif *vif, struct sk_buff *skb);
extern bool separate_tx_rx_irq;
#endif /* __XEN_NETBACK__COMMON_H__ */ #endif /* __XEN_NETBACK__COMMON_H__ */
...@@ -60,21 +60,39 @@ static int xenvif_rx_schedulable(struct xenvif *vif) ...@@ -60,21 +60,39 @@ static int xenvif_rx_schedulable(struct xenvif *vif)
return xenvif_schedulable(vif) && !xen_netbk_rx_ring_full(vif); return xenvif_schedulable(vif) && !xen_netbk_rx_ring_full(vif);
} }
static irqreturn_t xenvif_interrupt(int irq, void *dev_id) static irqreturn_t xenvif_tx_interrupt(int irq, void *dev_id)
{ {
struct xenvif *vif = dev_id; struct xenvif *vif = dev_id;
if (vif->netbk == NULL) if (vif->netbk == NULL)
return IRQ_NONE; return IRQ_HANDLED;
xen_netbk_schedule_xenvif(vif); xen_netbk_schedule_xenvif(vif);
return IRQ_HANDLED;
}
static irqreturn_t xenvif_rx_interrupt(int irq, void *dev_id)
{
struct xenvif *vif = dev_id;
if (vif->netbk == NULL)
return IRQ_HANDLED;
if (xenvif_rx_schedulable(vif)) if (xenvif_rx_schedulable(vif))
netif_wake_queue(vif->dev); netif_wake_queue(vif->dev);
return IRQ_HANDLED; return IRQ_HANDLED;
} }
static irqreturn_t xenvif_interrupt(int irq, void *dev_id)
{
xenvif_tx_interrupt(irq, dev_id);
xenvif_rx_interrupt(irq, dev_id);
return IRQ_HANDLED;
}
static int xenvif_start_xmit(struct sk_buff *skb, struct net_device *dev) static int xenvif_start_xmit(struct sk_buff *skb, struct net_device *dev)
{ {
struct xenvif *vif = netdev_priv(dev); struct xenvif *vif = netdev_priv(dev);
...@@ -125,13 +143,17 @@ static struct net_device_stats *xenvif_get_stats(struct net_device *dev) ...@@ -125,13 +143,17 @@ static struct net_device_stats *xenvif_get_stats(struct net_device *dev)
static void xenvif_up(struct xenvif *vif) static void xenvif_up(struct xenvif *vif)
{ {
xen_netbk_add_xenvif(vif); xen_netbk_add_xenvif(vif);
enable_irq(vif->irq); enable_irq(vif->tx_irq);
if (vif->tx_irq != vif->rx_irq)
enable_irq(vif->rx_irq);
xen_netbk_check_rx_xenvif(vif); xen_netbk_check_rx_xenvif(vif);
} }
static void xenvif_down(struct xenvif *vif) static void xenvif_down(struct xenvif *vif)
{ {
disable_irq(vif->irq); disable_irq(vif->tx_irq);
if (vif->tx_irq != vif->rx_irq)
disable_irq(vif->rx_irq);
del_timer_sync(&vif->credit_timeout); del_timer_sync(&vif->credit_timeout);
xen_netbk_deschedule_xenvif(vif); xen_netbk_deschedule_xenvif(vif);
xen_netbk_remove_xenvif(vif); xen_netbk_remove_xenvif(vif);
...@@ -308,12 +330,13 @@ struct xenvif *xenvif_alloc(struct device *parent, domid_t domid, ...@@ -308,12 +330,13 @@ struct xenvif *xenvif_alloc(struct device *parent, domid_t domid,
} }
int xenvif_connect(struct xenvif *vif, unsigned long tx_ring_ref, int xenvif_connect(struct xenvif *vif, unsigned long tx_ring_ref,
unsigned long rx_ring_ref, unsigned int evtchn) unsigned long rx_ring_ref, unsigned int tx_evtchn,
unsigned int rx_evtchn)
{ {
int err = -ENOMEM; int err = -ENOMEM;
/* Already connected through? */ /* Already connected through? */
if (vif->irq) if (vif->tx_irq)
return 0; return 0;
__module_get(THIS_MODULE); __module_get(THIS_MODULE);
...@@ -322,13 +345,37 @@ int xenvif_connect(struct xenvif *vif, unsigned long tx_ring_ref, ...@@ -322,13 +345,37 @@ int xenvif_connect(struct xenvif *vif, unsigned long tx_ring_ref,
if (err < 0) if (err < 0)
goto err; goto err;
if (tx_evtchn == rx_evtchn) {
/* feature-split-event-channels == 0 */
err = bind_interdomain_evtchn_to_irqhandler( err = bind_interdomain_evtchn_to_irqhandler(
vif->domid, evtchn, xenvif_interrupt, 0, vif->domid, tx_evtchn, xenvif_interrupt, 0,
vif->dev->name, vif); vif->dev->name, vif);
if (err < 0) if (err < 0)
goto err_unmap; goto err_unmap;
vif->irq = err; vif->tx_irq = vif->rx_irq = err;
disable_irq(vif->irq); disable_irq(vif->tx_irq);
} else {
/* feature-split-event-channels == 1 */
snprintf(vif->tx_irq_name, sizeof(vif->tx_irq_name),
"%s-tx", vif->dev->name);
err = bind_interdomain_evtchn_to_irqhandler(
vif->domid, tx_evtchn, xenvif_tx_interrupt, 0,
vif->tx_irq_name, vif);
if (err < 0)
goto err_unmap;
vif->tx_irq = err;
disable_irq(vif->tx_irq);
snprintf(vif->rx_irq_name, sizeof(vif->rx_irq_name),
"%s-rx", vif->dev->name);
err = bind_interdomain_evtchn_to_irqhandler(
vif->domid, rx_evtchn, xenvif_rx_interrupt, 0,
vif->rx_irq_name, vif);
if (err < 0)
goto err_tx_unbind;
vif->rx_irq = err;
disable_irq(vif->rx_irq);
}
xenvif_get(vif); xenvif_get(vif);
...@@ -342,6 +389,9 @@ int xenvif_connect(struct xenvif *vif, unsigned long tx_ring_ref, ...@@ -342,6 +389,9 @@ int xenvif_connect(struct xenvif *vif, unsigned long tx_ring_ref,
rtnl_unlock(); rtnl_unlock();
return 0; return 0;
err_tx_unbind:
unbind_from_irqhandler(vif->tx_irq, vif);
vif->tx_irq = 0;
err_unmap: err_unmap:
xen_netbk_unmap_frontend_rings(vif); xen_netbk_unmap_frontend_rings(vif);
err: err:
...@@ -375,8 +425,13 @@ void xenvif_disconnect(struct xenvif *vif) ...@@ -375,8 +425,13 @@ void xenvif_disconnect(struct xenvif *vif)
atomic_dec(&vif->refcnt); atomic_dec(&vif->refcnt);
wait_event(vif->waiting_to_free, atomic_read(&vif->refcnt) == 0); wait_event(vif->waiting_to_free, atomic_read(&vif->refcnt) == 0);
if (vif->irq) { if (vif->tx_irq) {
unbind_from_irqhandler(vif->irq, vif); if (vif->tx_irq == vif->rx_irq)
unbind_from_irqhandler(vif->tx_irq, vif);
else {
unbind_from_irqhandler(vif->tx_irq, vif);
unbind_from_irqhandler(vif->rx_irq, vif);
}
/* vif->irq is valid, we had a module_get in /* vif->irq is valid, we had a module_get in
* xenvif_connect. * xenvif_connect.
*/ */
......
...@@ -47,6 +47,13 @@ ...@@ -47,6 +47,13 @@
#include <asm/xen/hypercall.h> #include <asm/xen/hypercall.h>
#include <asm/xen/page.h> #include <asm/xen/page.h>
/* Provide an option to disable split event channels at load time as
* event channels are limited resource. Split event channels are
* enabled by default.
*/
bool separate_tx_rx_irq = 1;
module_param(separate_tx_rx_irq, bool, 0644);
/* /*
* This is the maximum slots a skb can have. If a guest sends a skb * This is the maximum slots a skb can have. If a guest sends a skb
* which exceeds this limit it is considered malicious. * which exceeds this limit it is considered malicious.
...@@ -662,7 +669,7 @@ static void xen_netbk_rx_action(struct xen_netbk *netbk) ...@@ -662,7 +669,7 @@ static void xen_netbk_rx_action(struct xen_netbk *netbk)
{ {
struct xenvif *vif = NULL, *tmp; struct xenvif *vif = NULL, *tmp;
s8 status; s8 status;
u16 irq, flags; u16 flags;
struct xen_netif_rx_response *resp; struct xen_netif_rx_response *resp;
struct sk_buff_head rxq; struct sk_buff_head rxq;
struct sk_buff *skb; struct sk_buff *skb;
...@@ -771,7 +778,6 @@ static void xen_netbk_rx_action(struct xen_netbk *netbk) ...@@ -771,7 +778,6 @@ static void xen_netbk_rx_action(struct xen_netbk *netbk)
sco->meta_slots_used); sco->meta_slots_used);
RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&vif->rx, ret); RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&vif->rx, ret);
irq = vif->irq;
if (ret && list_empty(&vif->notify_list)) if (ret && list_empty(&vif->notify_list))
list_add_tail(&vif->notify_list, &notify); list_add_tail(&vif->notify_list, &notify);
...@@ -783,7 +789,7 @@ static void xen_netbk_rx_action(struct xen_netbk *netbk) ...@@ -783,7 +789,7 @@ static void xen_netbk_rx_action(struct xen_netbk *netbk)
} }
list_for_each_entry_safe(vif, tmp, &notify, notify_list) { list_for_each_entry_safe(vif, tmp, &notify, notify_list) {
notify_remote_via_irq(vif->irq); notify_remote_via_irq(vif->rx_irq);
list_del_init(&vif->notify_list); list_del_init(&vif->notify_list);
} }
...@@ -1762,7 +1768,7 @@ static void make_tx_response(struct xenvif *vif, ...@@ -1762,7 +1768,7 @@ static void make_tx_response(struct xenvif *vif,
vif->tx.rsp_prod_pvt = ++i; vif->tx.rsp_prod_pvt = ++i;
RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&vif->tx, notify); RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&vif->tx, notify);
if (notify) if (notify)
notify_remote_via_irq(vif->irq); notify_remote_via_irq(vif->tx_irq);
} }
static struct xen_netif_rx_response *make_rx_response(struct xenvif *vif, static struct xen_netif_rx_response *make_rx_response(struct xenvif *vif,
......
...@@ -122,6 +122,16 @@ static int netback_probe(struct xenbus_device *dev, ...@@ -122,6 +122,16 @@ static int netback_probe(struct xenbus_device *dev,
goto fail; goto fail;
} }
/*
* Split event channels support, this is optional so it is not
* put inside the above loop.
*/
err = xenbus_printf(XBT_NIL, dev->nodename,
"feature-split-event-channels",
"%u", separate_tx_rx_irq);
if (err)
pr_debug("Error writing feature-split-event-channels");
err = xenbus_switch_state(dev, XenbusStateInitWait); err = xenbus_switch_state(dev, XenbusStateInitWait);
if (err) if (err)
goto fail; goto fail;
...@@ -393,21 +403,36 @@ static int connect_rings(struct backend_info *be) ...@@ -393,21 +403,36 @@ static int connect_rings(struct backend_info *be)
struct xenvif *vif = be->vif; struct xenvif *vif = be->vif;
struct xenbus_device *dev = be->dev; struct xenbus_device *dev = be->dev;
unsigned long tx_ring_ref, rx_ring_ref; unsigned long tx_ring_ref, rx_ring_ref;
unsigned int evtchn, rx_copy; unsigned int tx_evtchn, rx_evtchn, rx_copy;
int err; int err;
int val; int val;
err = xenbus_gather(XBT_NIL, dev->otherend, err = xenbus_gather(XBT_NIL, dev->otherend,
"tx-ring-ref", "%lu", &tx_ring_ref, "tx-ring-ref", "%lu", &tx_ring_ref,
"rx-ring-ref", "%lu", &rx_ring_ref, "rx-ring-ref", "%lu", &rx_ring_ref, NULL);
"event-channel", "%u", &evtchn, NULL);
if (err) { if (err) {
xenbus_dev_fatal(dev, err, xenbus_dev_fatal(dev, err,
"reading %s/ring-ref and event-channel", "reading %s/ring-ref",
dev->otherend); dev->otherend);
return err; return err;
} }
/* Try split event channels first, then single event channel. */
err = xenbus_gather(XBT_NIL, dev->otherend,
"event-channel-tx", "%u", &tx_evtchn,
"event-channel-rx", "%u", &rx_evtchn, NULL);
if (err < 0) {
err = xenbus_scanf(XBT_NIL, dev->otherend,
"event-channel", "%u", &tx_evtchn);
if (err < 0) {
xenbus_dev_fatal(dev, err,
"reading %s/event-channel(-tx/rx)",
dev->otherend);
return err;
}
rx_evtchn = tx_evtchn;
}
err = xenbus_scanf(XBT_NIL, dev->otherend, "request-rx-copy", "%u", err = xenbus_scanf(XBT_NIL, dev->otherend, "request-rx-copy", "%u",
&rx_copy); &rx_copy);
if (err == -ENOENT) { if (err == -ENOENT) {
...@@ -454,11 +479,13 @@ static int connect_rings(struct backend_info *be) ...@@ -454,11 +479,13 @@ static int connect_rings(struct backend_info *be)
vif->csum = !val; vif->csum = !val;
/* Map the shared frame, irq etc. */ /* Map the shared frame, irq etc. */
err = xenvif_connect(vif, tx_ring_ref, rx_ring_ref, evtchn); err = xenvif_connect(vif, tx_ring_ref, rx_ring_ref,
tx_evtchn, rx_evtchn);
if (err) { if (err) {
xenbus_dev_fatal(dev, err, xenbus_dev_fatal(dev, err,
"mapping shared-frames %lu/%lu port %u", "mapping shared-frames %lu/%lu port tx %u rx %u",
tx_ring_ref, rx_ring_ref, evtchn); tx_ring_ref, rx_ring_ref,
tx_evtchn, rx_evtchn);
return err; return err;
} }
return 0; return 0;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment