Commit 5a2f966d authored by Michael S. Tsirkin's avatar Michael S. Tsirkin

virtio_net: move tx vq operation under tx queue lock

It's unsafe to operate a vq from multiple threads.
Unfortunately this is exactly what we do when invoking
clean tx poll from rx napi.
Same happens with napi-tx even without the
opportunistic cleaning from the receive interrupt: that races
with processing the vq in start_xmit.

As a fix move everything that deals with the vq to under tx lock.

Fixes: b92f1e67 ("virtio-net: transmit napi")
Signed-off-by: default avatarMichael S. Tsirkin <mst@redhat.com>
parent 6f5312f8
...@@ -1592,6 +1592,8 @@ static int virtnet_poll_tx(struct napi_struct *napi, int budget) ...@@ -1592,6 +1592,8 @@ static int virtnet_poll_tx(struct napi_struct *napi, int budget)
struct virtnet_info *vi = sq->vq->vdev->priv; struct virtnet_info *vi = sq->vq->vdev->priv;
unsigned int index = vq2txq(sq->vq); unsigned int index = vq2txq(sq->vq);
struct netdev_queue *txq; struct netdev_queue *txq;
int opaque;
bool done;
if (unlikely(is_xdp_raw_buffer_queue(vi, index))) { if (unlikely(is_xdp_raw_buffer_queue(vi, index))) {
/* We don't need to enable cb for XDP */ /* We don't need to enable cb for XDP */
...@@ -1601,10 +1603,28 @@ static int virtnet_poll_tx(struct napi_struct *napi, int budget) ...@@ -1601,10 +1603,28 @@ static int virtnet_poll_tx(struct napi_struct *napi, int budget)
txq = netdev_get_tx_queue(vi->dev, index); txq = netdev_get_tx_queue(vi->dev, index);
__netif_tx_lock(txq, raw_smp_processor_id()); __netif_tx_lock(txq, raw_smp_processor_id());
virtqueue_disable_cb(sq->vq);
free_old_xmit_skbs(sq, true); free_old_xmit_skbs(sq, true);
opaque = virtqueue_enable_cb_prepare(sq->vq);
done = napi_complete_done(napi, 0);
if (!done)
virtqueue_disable_cb(sq->vq);
__netif_tx_unlock(txq); __netif_tx_unlock(txq);
virtqueue_napi_complete(napi, sq->vq, 0); if (done) {
if (unlikely(virtqueue_poll(sq->vq, opaque))) {
if (napi_schedule_prep(napi)) {
__netif_tx_lock(txq, raw_smp_processor_id());
virtqueue_disable_cb(sq->vq);
__netif_tx_unlock(txq);
__napi_schedule(napi);
}
}
}
if (sq->vq->num_free >= 2 + MAX_SKB_FRAGS) if (sq->vq->num_free >= 2 + MAX_SKB_FRAGS)
netif_tx_wake_queue(txq); netif_tx_wake_queue(txq);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment