Commit 718628ad authored by Christoph Hellwig's avatar Christoph Hellwig Committed by Jens Axboe

virtio_blk: split virtblk_probe

Split out a virtblk_read_limits helper that just reads the various
queue limits to separate it from the higher level probing logic.
Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
Reviewed-by: default avatarKeith Busch <kbusch@kernel.org>
Reviewed-by: default avatarStefan Hajnoczi <stefanha@redhat.com>
Reviewed-by: default avatarChaitanya Kulkarni <kch@nvidia.com>
Reviewed-by: default avatarMing Lei <ming.lei@redhat.com>
Reviewed-by: default avatarDamien Le Moal <dlemoal@kernel.org>
Reviewed-by: default avatarMartin K. Petersen <martin.petersen@oracle.com>
Reviewed-by: default avatarHannes Reinecke <hare@suse.de>
Link: https://lore.kernel.org/r/20240213073425.1621680-12-hch@lst.deSigned-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent 27e32cd2
...@@ -1248,31 +1248,17 @@ static const struct blk_mq_ops virtio_mq_ops = { ...@@ -1248,31 +1248,17 @@ static const struct blk_mq_ops virtio_mq_ops = {
static unsigned int virtblk_queue_depth; static unsigned int virtblk_queue_depth;
module_param_named(queue_depth, virtblk_queue_depth, uint, 0444); module_param_named(queue_depth, virtblk_queue_depth, uint, 0444);
static int virtblk_probe(struct virtio_device *vdev) static int virtblk_read_limits(struct virtio_blk *vblk)
{ {
struct virtio_blk *vblk; struct request_queue *q = vblk->disk->queue;
struct request_queue *q; struct virtio_device *vdev = vblk->vdev;
int err, index;
u32 v, blk_size, max_size, sg_elems, opt_io_size; u32 v, blk_size, max_size, sg_elems, opt_io_size;
u32 max_discard_segs = 0; u32 max_discard_segs = 0;
u32 discard_granularity = 0; u32 discard_granularity = 0;
u16 min_io_size; u16 min_io_size;
u8 physical_block_exp, alignment_offset; u8 physical_block_exp, alignment_offset;
unsigned int queue_depth;
size_t max_dma_size; size_t max_dma_size;
int err;
if (!vdev->config->get) {
dev_err(&vdev->dev, "%s failure: config access disabled\n",
__func__);
return -EINVAL;
}
err = ida_alloc_range(&vd_index_ida, 0,
minor_to_index(1 << MINORBITS) - 1, GFP_KERNEL);
if (err < 0)
goto out;
index = err;
/* We need to know how many segments before we allocate. */ /* We need to know how many segments before we allocate. */
err = virtio_cread_feature(vdev, VIRTIO_BLK_F_SEG_MAX, err = virtio_cread_feature(vdev, VIRTIO_BLK_F_SEG_MAX,
...@@ -1286,73 +1272,6 @@ static int virtblk_probe(struct virtio_device *vdev) ...@@ -1286,73 +1272,6 @@ static int virtblk_probe(struct virtio_device *vdev)
/* Prevent integer overflows and honor max vq size */ /* Prevent integer overflows and honor max vq size */
sg_elems = min_t(u32, sg_elems, VIRTIO_BLK_MAX_SG_ELEMS - 2); sg_elems = min_t(u32, sg_elems, VIRTIO_BLK_MAX_SG_ELEMS - 2);
vdev->priv = vblk = kmalloc(sizeof(*vblk), GFP_KERNEL);
if (!vblk) {
err = -ENOMEM;
goto out_free_index;
}
mutex_init(&vblk->vdev_mutex);
vblk->vdev = vdev;
INIT_WORK(&vblk->config_work, virtblk_config_changed_work);
err = init_vq(vblk);
if (err)
goto out_free_vblk;
/* Default queue sizing is to fill the ring. */
if (!virtblk_queue_depth) {
queue_depth = vblk->vqs[0].vq->num_free;
/* ... but without indirect descs, we use 2 descs per req */
if (!virtio_has_feature(vdev, VIRTIO_RING_F_INDIRECT_DESC))
queue_depth /= 2;
} else {
queue_depth = virtblk_queue_depth;
}
memset(&vblk->tag_set, 0, sizeof(vblk->tag_set));
vblk->tag_set.ops = &virtio_mq_ops;
vblk->tag_set.queue_depth = queue_depth;
vblk->tag_set.numa_node = NUMA_NO_NODE;
vblk->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
vblk->tag_set.cmd_size =
sizeof(struct virtblk_req) +
sizeof(struct scatterlist) * VIRTIO_BLK_INLINE_SG_CNT;
vblk->tag_set.driver_data = vblk;
vblk->tag_set.nr_hw_queues = vblk->num_vqs;
vblk->tag_set.nr_maps = 1;
if (vblk->io_queues[HCTX_TYPE_POLL])
vblk->tag_set.nr_maps = 3;
err = blk_mq_alloc_tag_set(&vblk->tag_set);
if (err)
goto out_free_vq;
vblk->disk = blk_mq_alloc_disk(&vblk->tag_set, NULL, vblk);
if (IS_ERR(vblk->disk)) {
err = PTR_ERR(vblk->disk);
goto out_free_tags;
}
q = vblk->disk->queue;
virtblk_name_format("vd", index, vblk->disk->disk_name, DISK_NAME_LEN);
vblk->disk->major = major;
vblk->disk->first_minor = index_to_minor(index);
vblk->disk->minors = 1 << PART_BITS;
vblk->disk->private_data = vblk;
vblk->disk->fops = &virtblk_fops;
vblk->index = index;
/* configure queue flush support */
virtblk_update_cache_mode(vdev);
/* If disk is read-only in the host, the guest should obey */
if (virtio_has_feature(vdev, VIRTIO_BLK_F_RO))
set_disk_ro(vblk->disk, 1);
/* We can handle whatever the host told us to handle. */ /* We can handle whatever the host told us to handle. */
blk_queue_max_segments(q, sg_elems); blk_queue_max_segments(q, sg_elems);
...@@ -1381,7 +1300,7 @@ static int virtblk_probe(struct virtio_device *vdev) ...@@ -1381,7 +1300,7 @@ static int virtblk_probe(struct virtio_device *vdev)
dev_err(&vdev->dev, dev_err(&vdev->dev,
"virtio_blk: invalid block size: 0x%x\n", "virtio_blk: invalid block size: 0x%x\n",
blk_size); blk_size);
goto out_cleanup_disk; return err;
} }
blk_queue_logical_block_size(q, blk_size); blk_queue_logical_block_size(q, blk_size);
...@@ -1455,8 +1374,7 @@ static int virtblk_probe(struct virtio_device *vdev) ...@@ -1455,8 +1374,7 @@ static int virtblk_probe(struct virtio_device *vdev)
if (!v) { if (!v) {
dev_err(&vdev->dev, dev_err(&vdev->dev,
"virtio_blk: secure_erase_sector_alignment can't be 0\n"); "virtio_blk: secure_erase_sector_alignment can't be 0\n");
err = -EINVAL; return -EINVAL;
goto out_cleanup_disk;
} }
discard_granularity = min_not_zero(discard_granularity, v); discard_granularity = min_not_zero(discard_granularity, v);
...@@ -1470,8 +1388,7 @@ static int virtblk_probe(struct virtio_device *vdev) ...@@ -1470,8 +1388,7 @@ static int virtblk_probe(struct virtio_device *vdev)
if (!v) { if (!v) {
dev_err(&vdev->dev, dev_err(&vdev->dev,
"virtio_blk: max_secure_erase_sectors can't be 0\n"); "virtio_blk: max_secure_erase_sectors can't be 0\n");
err = -EINVAL; return -EINVAL;
goto out_cleanup_disk;
} }
blk_queue_max_secure_erase_sectors(q, v); blk_queue_max_secure_erase_sectors(q, v);
...@@ -1485,8 +1402,7 @@ static int virtblk_probe(struct virtio_device *vdev) ...@@ -1485,8 +1402,7 @@ static int virtblk_probe(struct virtio_device *vdev)
if (!v) { if (!v) {
dev_err(&vdev->dev, dev_err(&vdev->dev,
"virtio_blk: max_secure_erase_seg can't be 0\n"); "virtio_blk: max_secure_erase_seg can't be 0\n");
err = -EINVAL; return -EINVAL;
goto out_cleanup_disk;
} }
max_discard_segs = min_not_zero(max_discard_segs, v); max_discard_segs = min_not_zero(max_discard_segs, v);
...@@ -1511,6 +1427,99 @@ static int virtblk_probe(struct virtio_device *vdev) ...@@ -1511,6 +1427,99 @@ static int virtblk_probe(struct virtio_device *vdev)
q->limits.discard_granularity = blk_size; q->limits.discard_granularity = blk_size;
} }
return 0;
}
static int virtblk_probe(struct virtio_device *vdev)
{
struct virtio_blk *vblk;
struct request_queue *q;
int err, index;
unsigned int queue_depth;
if (!vdev->config->get) {
dev_err(&vdev->dev, "%s failure: config access disabled\n",
__func__);
return -EINVAL;
}
err = ida_alloc_range(&vd_index_ida, 0,
minor_to_index(1 << MINORBITS) - 1, GFP_KERNEL);
if (err < 0)
goto out;
index = err;
vdev->priv = vblk = kmalloc(sizeof(*vblk), GFP_KERNEL);
if (!vblk) {
err = -ENOMEM;
goto out_free_index;
}
mutex_init(&vblk->vdev_mutex);
vblk->vdev = vdev;
INIT_WORK(&vblk->config_work, virtblk_config_changed_work);
err = init_vq(vblk);
if (err)
goto out_free_vblk;
/* Default queue sizing is to fill the ring. */
if (!virtblk_queue_depth) {
queue_depth = vblk->vqs[0].vq->num_free;
/* ... but without indirect descs, we use 2 descs per req */
if (!virtio_has_feature(vdev, VIRTIO_RING_F_INDIRECT_DESC))
queue_depth /= 2;
} else {
queue_depth = virtblk_queue_depth;
}
memset(&vblk->tag_set, 0, sizeof(vblk->tag_set));
vblk->tag_set.ops = &virtio_mq_ops;
vblk->tag_set.queue_depth = queue_depth;
vblk->tag_set.numa_node = NUMA_NO_NODE;
vblk->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
vblk->tag_set.cmd_size =
sizeof(struct virtblk_req) +
sizeof(struct scatterlist) * VIRTIO_BLK_INLINE_SG_CNT;
vblk->tag_set.driver_data = vblk;
vblk->tag_set.nr_hw_queues = vblk->num_vqs;
vblk->tag_set.nr_maps = 1;
if (vblk->io_queues[HCTX_TYPE_POLL])
vblk->tag_set.nr_maps = 3;
err = blk_mq_alloc_tag_set(&vblk->tag_set);
if (err)
goto out_free_vq;
vblk->disk = blk_mq_alloc_disk(&vblk->tag_set, NULL, vblk);
if (IS_ERR(vblk->disk)) {
err = PTR_ERR(vblk->disk);
goto out_free_tags;
}
q = vblk->disk->queue;
virtblk_name_format("vd", index, vblk->disk->disk_name, DISK_NAME_LEN);
vblk->disk->major = major;
vblk->disk->first_minor = index_to_minor(index);
vblk->disk->minors = 1 << PART_BITS;
vblk->disk->private_data = vblk;
vblk->disk->fops = &virtblk_fops;
vblk->index = index;
/* configure queue flush support */
virtblk_update_cache_mode(vdev);
/* If disk is read-only in the host, the guest should obey */
if (virtio_has_feature(vdev, VIRTIO_BLK_F_RO))
set_disk_ro(vblk->disk, 1);
err = virtblk_read_limits(vblk);
if (err)
goto out_cleanup_disk;
virtblk_update_capacity(vblk, false); virtblk_update_capacity(vblk, false);
virtio_device_ready(vdev); virtio_device_ready(vdev);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment