Commit 21bc54fc authored by Gerard Garcia's avatar Gerard Garcia Committed by Michael S. Tsirkin

vhost/vsock: drop space available check for TX vq

Remove unnecessary use of enable/disable callback notifications
and the incorrect more space available check.

The virtio_transport_tx_work handles when the TX virtqueue
has more buffers available.
Signed-off-by: default avatarGerard Garcia <ggarcia@deic.uab.cat>
Acked-by: default avatarStefan Hajnoczi <stefanha@redhat.com>
Signed-off-by: default avatarMichael S. Tsirkin <mst@redhat.com>
parent 52012619
...@@ -87,9 +87,6 @@ virtio_transport_send_pkt_work(struct work_struct *work) ...@@ -87,9 +87,6 @@ virtio_transport_send_pkt_work(struct work_struct *work)
vq = vsock->vqs[VSOCK_VQ_TX]; vq = vsock->vqs[VSOCK_VQ_TX];
/* Avoid unnecessary interrupts while we're processing the ring */
virtqueue_disable_cb(vq);
for (;;) { for (;;) {
struct virtio_vsock_pkt *pkt; struct virtio_vsock_pkt *pkt;
struct scatterlist hdr, buf, *sgs[2]; struct scatterlist hdr, buf, *sgs[2];
...@@ -99,7 +96,6 @@ virtio_transport_send_pkt_work(struct work_struct *work) ...@@ -99,7 +96,6 @@ virtio_transport_send_pkt_work(struct work_struct *work)
spin_lock_bh(&vsock->send_pkt_list_lock); spin_lock_bh(&vsock->send_pkt_list_lock);
if (list_empty(&vsock->send_pkt_list)) { if (list_empty(&vsock->send_pkt_list)) {
spin_unlock_bh(&vsock->send_pkt_list_lock); spin_unlock_bh(&vsock->send_pkt_list_lock);
virtqueue_enable_cb(vq);
break; break;
} }
...@@ -118,13 +114,13 @@ virtio_transport_send_pkt_work(struct work_struct *work) ...@@ -118,13 +114,13 @@ virtio_transport_send_pkt_work(struct work_struct *work)
} }
ret = virtqueue_add_sgs(vq, sgs, out_sg, in_sg, pkt, GFP_KERNEL); ret = virtqueue_add_sgs(vq, sgs, out_sg, in_sg, pkt, GFP_KERNEL);
/* Usually this means that there is no more space available in
* the vq
*/
if (ret < 0) { if (ret < 0) {
spin_lock_bh(&vsock->send_pkt_list_lock); spin_lock_bh(&vsock->send_pkt_list_lock);
list_add(&pkt->list, &vsock->send_pkt_list); list_add(&pkt->list, &vsock->send_pkt_list);
spin_unlock_bh(&vsock->send_pkt_list_lock); spin_unlock_bh(&vsock->send_pkt_list_lock);
if (!virtqueue_enable_cb(vq) && ret == -ENOSPC)
continue; /* retry now that we have more space */
break; break;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment