Commit c1ea02f1 authored by Jason Wang's avatar Jason Wang Committed by Michael S. Tsirkin

vhost: scsi: add weight support

This patch will check the weight and exit the loop if we exceeds the
weight. This is useful for preventing scsi kthread from hogging cpu
which is guest triggerable.

This addresses CVE-2019-3900.

Cc: Paolo Bonzini <pbonzini@redhat.com>
Cc: Stefan Hajnoczi <stefanha@redhat.com>
Fixes: 057cbf49 ("tcm_vhost: Initial merge for vhost level target fabric driver")
Signed-off-by: default avatarJason Wang <jasowang@redhat.com>
Reviewed-by: default avatarStefan Hajnoczi <stefanha@redhat.com>
Signed-off-by: default avatarMichael S. Tsirkin <mst@redhat.com>
Reviewed-by: default avatarStefan Hajnoczi <stefanha@redhat.com>
parent e79b431f
......@@ -918,7 +918,7 @@ vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq)
struct iov_iter in_iter, prot_iter, data_iter;
u64 tag;
u32 exp_data_len, data_direction;
int ret, prot_bytes;
int ret, prot_bytes, c = 0;
u16 lun;
u8 task_attr;
bool t10_pi = vhost_has_feature(vq, VIRTIO_SCSI_F_T10_PI);
......@@ -938,7 +938,7 @@ vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq)
vhost_disable_notify(&vs->dev, vq);
for (;;) {
do {
ret = vhost_scsi_get_desc(vs, vq, &vc);
if (ret)
goto err;
......@@ -1118,7 +1118,7 @@ vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq)
break;
else if (ret == -EIO)
vhost_scsi_send_bad_target(vs, vq, vc.head, vc.out);
}
} while (likely(!vhost_exceeds_weight(vq, ++c, 0)));
out:
mutex_unlock(&vq->mutex);
}
......@@ -1177,7 +1177,7 @@ vhost_scsi_ctl_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq)
} v_req;
struct vhost_scsi_ctx vc;
size_t typ_size;
int ret;
int ret, c = 0;
mutex_lock(&vq->mutex);
/*
......@@ -1191,7 +1191,7 @@ vhost_scsi_ctl_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq)
vhost_disable_notify(&vs->dev, vq);
for (;;) {
do {
ret = vhost_scsi_get_desc(vs, vq, &vc);
if (ret)
goto err;
......@@ -1270,7 +1270,7 @@ vhost_scsi_ctl_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq)
break;
else if (ret == -EIO)
vhost_scsi_send_bad_target(vs, vq, vc.head, vc.out);
}
} while (likely(!vhost_exceeds_weight(vq, ++c, 0)));
out:
mutex_unlock(&vq->mutex);
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment