Commit cf66f9d4 authored by Konrad Rzeszutek Wilk's avatar Konrad Rzeszutek Wilk Committed by David S. Miller

xen/netfront: add netconsole support.

add polling interface to xen-netfront device to support netconsole

This patch also alters the spin_lock usage to use irqsave variant.
Documentation/networking/netdevices.txt states that start_xmit
can be called with interrupts disabled by netconsole and therefore using
the irqsave/restore locking in this function is looks correct.
Signed-off-by: default avatarTina.Yang <tina.yang@oracle.com>
Cc: Jeremy Fitzhardinge <jeremy@goop.org>
Signed-off-by: default avatarZhenzhong.Duan <zhenzhong.duan@oracle.com>
Tested-by: default avatargurudas.pai <gurudas.pai@oracle.com>
[v1: Copy-n-pasted Ian Campbell comments]
Signed-off-by: default avatarKonrad Rzeszutek Wilk <konrad.wilk@oracle.com>
Acked-by: default avatarIan Campbell <ian.campbell@citrix.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent c54a4570
...@@ -489,6 +489,7 @@ static int xennet_start_xmit(struct sk_buff *skb, struct net_device *dev) ...@@ -489,6 +489,7 @@ static int xennet_start_xmit(struct sk_buff *skb, struct net_device *dev)
int frags = skb_shinfo(skb)->nr_frags; int frags = skb_shinfo(skb)->nr_frags;
unsigned int offset = offset_in_page(data); unsigned int offset = offset_in_page(data);
unsigned int len = skb_headlen(skb); unsigned int len = skb_headlen(skb);
unsigned long flags;
frags += DIV_ROUND_UP(offset + len, PAGE_SIZE); frags += DIV_ROUND_UP(offset + len, PAGE_SIZE);
if (unlikely(frags > MAX_SKB_FRAGS + 1)) { if (unlikely(frags > MAX_SKB_FRAGS + 1)) {
...@@ -498,12 +499,12 @@ static int xennet_start_xmit(struct sk_buff *skb, struct net_device *dev) ...@@ -498,12 +499,12 @@ static int xennet_start_xmit(struct sk_buff *skb, struct net_device *dev)
goto drop; goto drop;
} }
spin_lock_irq(&np->tx_lock); spin_lock_irqsave(&np->tx_lock, flags);
if (unlikely(!netif_carrier_ok(dev) || if (unlikely(!netif_carrier_ok(dev) ||
(frags > 1 && !xennet_can_sg(dev)) || (frags > 1 && !xennet_can_sg(dev)) ||
netif_needs_gso(skb, netif_skb_features(skb)))) { netif_needs_gso(skb, netif_skb_features(skb)))) {
spin_unlock_irq(&np->tx_lock); spin_unlock_irqrestore(&np->tx_lock, flags);
goto drop; goto drop;
} }
...@@ -574,7 +575,7 @@ static int xennet_start_xmit(struct sk_buff *skb, struct net_device *dev) ...@@ -574,7 +575,7 @@ static int xennet_start_xmit(struct sk_buff *skb, struct net_device *dev)
if (!netfront_tx_slot_available(np)) if (!netfront_tx_slot_available(np))
netif_stop_queue(dev); netif_stop_queue(dev);
spin_unlock_irq(&np->tx_lock); spin_unlock_irqrestore(&np->tx_lock, flags);
return NETDEV_TX_OK; return NETDEV_TX_OK;
...@@ -1228,6 +1229,33 @@ static int xennet_set_features(struct net_device *dev, ...@@ -1228,6 +1229,33 @@ static int xennet_set_features(struct net_device *dev,
return 0; return 0;
} }
static irqreturn_t xennet_interrupt(int irq, void *dev_id)
{
struct net_device *dev = dev_id;
struct netfront_info *np = netdev_priv(dev);
unsigned long flags;
spin_lock_irqsave(&np->tx_lock, flags);
if (likely(netif_carrier_ok(dev))) {
xennet_tx_buf_gc(dev);
/* Under tx_lock: protects access to rx shared-ring indexes. */
if (RING_HAS_UNCONSUMED_RESPONSES(&np->rx))
napi_schedule(&np->napi);
}
spin_unlock_irqrestore(&np->tx_lock, flags);
return IRQ_HANDLED;
}
#ifdef CONFIG_NET_POLL_CONTROLLER
static void xennet_poll_controller(struct net_device *dev)
{
xennet_interrupt(0, dev);
}
#endif
static const struct net_device_ops xennet_netdev_ops = { static const struct net_device_ops xennet_netdev_ops = {
.ndo_open = xennet_open, .ndo_open = xennet_open,
.ndo_uninit = xennet_uninit, .ndo_uninit = xennet_uninit,
...@@ -1239,6 +1267,9 @@ static const struct net_device_ops xennet_netdev_ops = { ...@@ -1239,6 +1267,9 @@ static const struct net_device_ops xennet_netdev_ops = {
.ndo_validate_addr = eth_validate_addr, .ndo_validate_addr = eth_validate_addr,
.ndo_fix_features = xennet_fix_features, .ndo_fix_features = xennet_fix_features,
.ndo_set_features = xennet_set_features, .ndo_set_features = xennet_set_features,
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = xennet_poll_controller,
#endif
}; };
static struct net_device * __devinit xennet_create_dev(struct xenbus_device *dev) static struct net_device * __devinit xennet_create_dev(struct xenbus_device *dev)
...@@ -1448,26 +1479,6 @@ static int xen_net_read_mac(struct xenbus_device *dev, u8 mac[]) ...@@ -1448,26 +1479,6 @@ static int xen_net_read_mac(struct xenbus_device *dev, u8 mac[])
return 0; return 0;
} }
static irqreturn_t xennet_interrupt(int irq, void *dev_id)
{
struct net_device *dev = dev_id;
struct netfront_info *np = netdev_priv(dev);
unsigned long flags;
spin_lock_irqsave(&np->tx_lock, flags);
if (likely(netif_carrier_ok(dev))) {
xennet_tx_buf_gc(dev);
/* Under tx_lock: protects access to rx shared-ring indexes. */
if (RING_HAS_UNCONSUMED_RESPONSES(&np->rx))
napi_schedule(&np->napi);
}
spin_unlock_irqrestore(&np->tx_lock, flags);
return IRQ_HANDLED;
}
static int setup_netfront(struct xenbus_device *dev, struct netfront_info *info) static int setup_netfront(struct xenbus_device *dev, struct netfront_info *info)
{ {
struct xen_netif_tx_sring *txs; struct xen_netif_tx_sring *txs;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment