Commit df133f3f authored by Michael S. Tsirkin's avatar Michael S. Tsirkin Committed by David S. Miller

virtio_net: bulk free tx skbs

Use napi_consume_skb() to get bulk free.  Note that napi_consume_skb is
safe to call in a non-napi context as long as the napi_budget flag is
correct.
Signed-off-by: default avatarMichael S. Tsirkin <mst@redhat.com>
Acked-by: default avatarJason Wang <jasowang@redhat.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 3e64cf7a
...@@ -1330,7 +1330,7 @@ static int virtnet_receive(struct receive_queue *rq, int budget, ...@@ -1330,7 +1330,7 @@ static int virtnet_receive(struct receive_queue *rq, int budget,
return stats.packets; return stats.packets;
} }
static void free_old_xmit_skbs(struct send_queue *sq) static void free_old_xmit_skbs(struct send_queue *sq, bool in_napi)
{ {
struct sk_buff *skb; struct sk_buff *skb;
unsigned int len; unsigned int len;
...@@ -1343,7 +1343,7 @@ static void free_old_xmit_skbs(struct send_queue *sq) ...@@ -1343,7 +1343,7 @@ static void free_old_xmit_skbs(struct send_queue *sq)
bytes += skb->len; bytes += skb->len;
packets++; packets++;
dev_consume_skb_any(skb); napi_consume_skb(skb, in_napi);
} }
/* Avoid overhead when no packets have been processed /* Avoid overhead when no packets have been processed
...@@ -1369,7 +1369,7 @@ static void virtnet_poll_cleantx(struct receive_queue *rq) ...@@ -1369,7 +1369,7 @@ static void virtnet_poll_cleantx(struct receive_queue *rq)
return; return;
if (__netif_tx_trylock(txq)) { if (__netif_tx_trylock(txq)) {
free_old_xmit_skbs(sq); free_old_xmit_skbs(sq, true);
__netif_tx_unlock(txq); __netif_tx_unlock(txq);
} }
...@@ -1445,7 +1445,7 @@ static int virtnet_poll_tx(struct napi_struct *napi, int budget) ...@@ -1445,7 +1445,7 @@ static int virtnet_poll_tx(struct napi_struct *napi, int budget)
struct netdev_queue *txq = netdev_get_tx_queue(vi->dev, vq2txq(sq->vq)); struct netdev_queue *txq = netdev_get_tx_queue(vi->dev, vq2txq(sq->vq));
__netif_tx_lock(txq, raw_smp_processor_id()); __netif_tx_lock(txq, raw_smp_processor_id());
free_old_xmit_skbs(sq); free_old_xmit_skbs(sq, true);
__netif_tx_unlock(txq); __netif_tx_unlock(txq);
virtqueue_napi_complete(napi, sq->vq, 0); virtqueue_napi_complete(napi, sq->vq, 0);
...@@ -1514,7 +1514,7 @@ static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev) ...@@ -1514,7 +1514,7 @@ static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev)
bool use_napi = sq->napi.weight; bool use_napi = sq->napi.weight;
/* Free up any pending old buffers before queueing new ones. */ /* Free up any pending old buffers before queueing new ones. */
free_old_xmit_skbs(sq); free_old_xmit_skbs(sq, false);
if (use_napi && kick) if (use_napi && kick)
virtqueue_enable_cb_delayed(sq->vq); virtqueue_enable_cb_delayed(sq->vq);
...@@ -1557,7 +1557,7 @@ static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev) ...@@ -1557,7 +1557,7 @@ static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev)
if (!use_napi && if (!use_napi &&
unlikely(!virtqueue_enable_cb_delayed(sq->vq))) { unlikely(!virtqueue_enable_cb_delayed(sq->vq))) {
/* More just got used, free them then recheck. */ /* More just got used, free them then recheck. */
free_old_xmit_skbs(sq); free_old_xmit_skbs(sq, false);
if (sq->vq->num_free >= 2+MAX_SKB_FRAGS) { if (sq->vq->num_free >= 2+MAX_SKB_FRAGS) {
netif_start_subqueue(dev, qnum); netif_start_subqueue(dev, qnum);
virtqueue_disable_cb(sq->vq); virtqueue_disable_cb(sq->vq);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment