Commit b6cd27ed authored by Stephen Hemminger's avatar Stephen Hemminger Committed by David S. Miller

netpoll per device txq

When the netpoll beast got really busy, it tended to clog
things, so it stored them for later. But the beast was putting
all it's skb's in one basket. This was bad because maybe some
pipes were clogged and others were not.
Signed-off-by: default avatarStephen Hemminger <shemminger@osdl.org>
parent 93ec2c72
...@@ -33,6 +33,8 @@ struct netpoll_info { ...@@ -33,6 +33,8 @@ struct netpoll_info {
spinlock_t rx_lock; spinlock_t rx_lock;
struct netpoll *rx_np; /* netpoll that registered an rx_hook */ struct netpoll *rx_np; /* netpoll that registered an rx_hook */
struct sk_buff_head arp_tx; /* list of arp requests to reply to */ struct sk_buff_head arp_tx; /* list of arp requests to reply to */
struct sk_buff_head txq;
struct work_struct tx_work;
}; };
void netpoll_poll(struct netpoll *np); void netpoll_poll(struct netpoll *np);
......
...@@ -38,10 +38,6 @@ ...@@ -38,10 +38,6 @@
static struct sk_buff_head skb_pool; static struct sk_buff_head skb_pool;
static DEFINE_SPINLOCK(queue_lock);
static int queue_depth;
static struct sk_buff *queue_head, *queue_tail;
static atomic_t trapped; static atomic_t trapped;
#define NETPOLL_RX_ENABLED 1 #define NETPOLL_RX_ENABLED 1
...@@ -56,46 +52,25 @@ static void arp_reply(struct sk_buff *skb); ...@@ -56,46 +52,25 @@ static void arp_reply(struct sk_buff *skb);
static void queue_process(void *p) static void queue_process(void *p)
{ {
unsigned long flags; struct netpoll_info *npinfo = p;
struct sk_buff *skb; struct sk_buff *skb;
while (queue_head) { while ((skb = skb_dequeue(&npinfo->txq)))
spin_lock_irqsave(&queue_lock, flags);
skb = queue_head;
queue_head = skb->next;
if (skb == queue_tail)
queue_head = NULL;
queue_depth--;
spin_unlock_irqrestore(&queue_lock, flags);
dev_queue_xmit(skb); dev_queue_xmit(skb);
}
}
static DECLARE_WORK(send_queue, queue_process, NULL); }
void netpoll_queue(struct sk_buff *skb) void netpoll_queue(struct sk_buff *skb)
{ {
unsigned long flags; struct net_device *dev = skb->dev;
struct netpoll_info *npinfo = dev->npinfo;
if (queue_depth == MAX_QUEUE_DEPTH) { if (!npinfo)
__kfree_skb(skb); kfree_skb(skb);
return; else {
skb_queue_tail(&npinfo->txq, skb);
schedule_work(&npinfo->tx_work);
} }
spin_lock_irqsave(&queue_lock, flags);
if (!queue_head)
queue_head = skb;
else
queue_tail->next = skb;
queue_tail = skb;
queue_depth++;
spin_unlock_irqrestore(&queue_lock, flags);
schedule_work(&send_queue);
} }
static int checksum_udp(struct sk_buff *skb, struct udphdr *uh, static int checksum_udp(struct sk_buff *skb, struct udphdr *uh,
...@@ -658,6 +633,9 @@ int netpoll_setup(struct netpoll *np) ...@@ -658,6 +633,9 @@ int netpoll_setup(struct netpoll *np)
npinfo->tries = MAX_RETRIES; npinfo->tries = MAX_RETRIES;
spin_lock_init(&npinfo->rx_lock); spin_lock_init(&npinfo->rx_lock);
skb_queue_head_init(&npinfo->arp_tx); skb_queue_head_init(&npinfo->arp_tx);
skb_queue_head_init(&npinfo->txq);
INIT_WORK(&npinfo->tx_work, queue_process, npinfo);
atomic_set(&npinfo->refcnt, 1); atomic_set(&npinfo->refcnt, 1);
} else { } else {
npinfo = ndev->npinfo; npinfo = ndev->npinfo;
...@@ -780,6 +758,8 @@ void netpoll_cleanup(struct netpoll *np) ...@@ -780,6 +758,8 @@ void netpoll_cleanup(struct netpoll *np)
np->dev->npinfo = NULL; np->dev->npinfo = NULL;
if (atomic_dec_and_test(&npinfo->refcnt)) { if (atomic_dec_and_test(&npinfo->refcnt)) {
skb_queue_purge(&npinfo->arp_tx); skb_queue_purge(&npinfo->arp_tx);
skb_queue_purge(&npinfo->txq);
flush_scheduled_work();
kfree(npinfo); kfree(npinfo);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment