Commit dfc19154 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mst/vhost

Pull virtio fixes from Michael Tsirkin:
 "Some last minute fixes - most of them for regressions"

* tag 'for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mst/vhost:
  vdpa_sim_net: complete the initialization before register the device
  vdpa/mlx5: Add and remove debugfs in setup/teardown driver
  tools/virtio: fix typo in README instructions
  vhost-scsi: Fix crash during LUN unmapping
  vhost-scsi: Fix vhost_scsi struct use after free
  virtio-blk: fix ZBD probe in kernels without ZBD support
  virtio-blk: fix to match virtio spec
parents c118b59e 9da667e5
...@@ -96,16 +96,14 @@ struct virtblk_req { ...@@ -96,16 +96,14 @@ struct virtblk_req {
/* /*
* The zone append command has an extended in header. * The zone append command has an extended in header.
* The status field in zone_append_in_hdr must have * The status field in zone_append_in_hdr must always
* the same offset in virtblk_req as the non-zoned * be the last byte.
* status field above.
*/ */
struct { struct {
__virtio64 sector;
u8 status; u8 status;
u8 reserved[7]; } zone_append;
__le64 append_sector; } in_hdr;
} zone_append_in_hdr;
};
size_t in_hdr_len; size_t in_hdr_len;
...@@ -154,7 +152,7 @@ static int virtblk_add_req(struct virtqueue *vq, struct virtblk_req *vbr) ...@@ -154,7 +152,7 @@ static int virtblk_add_req(struct virtqueue *vq, struct virtblk_req *vbr)
sgs[num_out + num_in++] = vbr->sg_table.sgl; sgs[num_out + num_in++] = vbr->sg_table.sgl;
} }
sg_init_one(&in_hdr, &vbr->status, vbr->in_hdr_len); sg_init_one(&in_hdr, &vbr->in_hdr.status, vbr->in_hdr_len);
sgs[num_out + num_in++] = &in_hdr; sgs[num_out + num_in++] = &in_hdr;
return virtqueue_add_sgs(vq, sgs, num_out, num_in, vbr, GFP_ATOMIC); return virtqueue_add_sgs(vq, sgs, num_out, num_in, vbr, GFP_ATOMIC);
...@@ -242,11 +240,14 @@ static blk_status_t virtblk_setup_cmd(struct virtio_device *vdev, ...@@ -242,11 +240,14 @@ static blk_status_t virtblk_setup_cmd(struct virtio_device *vdev,
struct request *req, struct request *req,
struct virtblk_req *vbr) struct virtblk_req *vbr)
{ {
size_t in_hdr_len = sizeof(vbr->status); size_t in_hdr_len = sizeof(vbr->in_hdr.status);
bool unmap = false; bool unmap = false;
u32 type; u32 type;
u64 sector = 0; u64 sector = 0;
if (!IS_ENABLED(CONFIG_BLK_DEV_ZONED) && op_is_zone_mgmt(req_op(req)))
return BLK_STS_NOTSUPP;
/* Set fields for all request types */ /* Set fields for all request types */
vbr->out_hdr.ioprio = cpu_to_virtio32(vdev, req_get_ioprio(req)); vbr->out_hdr.ioprio = cpu_to_virtio32(vdev, req_get_ioprio(req));
...@@ -287,7 +288,7 @@ static blk_status_t virtblk_setup_cmd(struct virtio_device *vdev, ...@@ -287,7 +288,7 @@ static blk_status_t virtblk_setup_cmd(struct virtio_device *vdev,
case REQ_OP_ZONE_APPEND: case REQ_OP_ZONE_APPEND:
type = VIRTIO_BLK_T_ZONE_APPEND; type = VIRTIO_BLK_T_ZONE_APPEND;
sector = blk_rq_pos(req); sector = blk_rq_pos(req);
in_hdr_len = sizeof(vbr->zone_append_in_hdr); in_hdr_len = sizeof(vbr->in_hdr.zone_append);
break; break;
case REQ_OP_ZONE_RESET: case REQ_OP_ZONE_RESET:
type = VIRTIO_BLK_T_ZONE_RESET; type = VIRTIO_BLK_T_ZONE_RESET;
...@@ -297,7 +298,10 @@ static blk_status_t virtblk_setup_cmd(struct virtio_device *vdev, ...@@ -297,7 +298,10 @@ static blk_status_t virtblk_setup_cmd(struct virtio_device *vdev,
type = VIRTIO_BLK_T_ZONE_RESET_ALL; type = VIRTIO_BLK_T_ZONE_RESET_ALL;
break; break;
case REQ_OP_DRV_IN: case REQ_OP_DRV_IN:
/* Out header already filled in, nothing to do */ /*
* Out header has already been prepared by the caller (virtblk_get_id()
* or virtblk_submit_zone_report()), nothing to do here.
*/
return 0; return 0;
default: default:
WARN_ON_ONCE(1); WARN_ON_ONCE(1);
...@@ -318,16 +322,28 @@ static blk_status_t virtblk_setup_cmd(struct virtio_device *vdev, ...@@ -318,16 +322,28 @@ static blk_status_t virtblk_setup_cmd(struct virtio_device *vdev,
return 0; return 0;
} }
/*
* The status byte is always the last byte of the virtblk request
* in-header. This helper fetches its value for all in-header formats
* that are currently defined.
*/
static inline u8 virtblk_vbr_status(struct virtblk_req *vbr)
{
return *((u8 *)&vbr->in_hdr + vbr->in_hdr_len - 1);
}
static inline void virtblk_request_done(struct request *req) static inline void virtblk_request_done(struct request *req)
{ {
struct virtblk_req *vbr = blk_mq_rq_to_pdu(req); struct virtblk_req *vbr = blk_mq_rq_to_pdu(req);
blk_status_t status = virtblk_result(vbr->status); blk_status_t status = virtblk_result(virtblk_vbr_status(vbr));
struct virtio_blk *vblk = req->mq_hctx->queue->queuedata;
virtblk_unmap_data(req, vbr); virtblk_unmap_data(req, vbr);
virtblk_cleanup_cmd(req); virtblk_cleanup_cmd(req);
if (req_op(req) == REQ_OP_ZONE_APPEND) if (req_op(req) == REQ_OP_ZONE_APPEND)
req->__sector = le64_to_cpu(vbr->zone_append_in_hdr.append_sector); req->__sector = virtio64_to_cpu(vblk->vdev,
vbr->in_hdr.zone_append.sector);
blk_mq_end_request(req, status); blk_mq_end_request(req, status);
} }
...@@ -355,7 +371,7 @@ static int virtblk_handle_req(struct virtio_blk_vq *vq, ...@@ -355,7 +371,7 @@ static int virtblk_handle_req(struct virtio_blk_vq *vq,
if (likely(!blk_should_fake_timeout(req->q)) && if (likely(!blk_should_fake_timeout(req->q)) &&
!blk_mq_complete_request_remote(req) && !blk_mq_complete_request_remote(req) &&
!blk_mq_add_to_batch(req, iob, vbr->status, !blk_mq_add_to_batch(req, iob, virtblk_vbr_status(vbr),
virtblk_complete_batch)) virtblk_complete_batch))
virtblk_request_done(req); virtblk_request_done(req);
req_done++; req_done++;
...@@ -550,7 +566,6 @@ static void virtio_queue_rqs(struct request **rqlist) ...@@ -550,7 +566,6 @@ static void virtio_queue_rqs(struct request **rqlist)
#ifdef CONFIG_BLK_DEV_ZONED #ifdef CONFIG_BLK_DEV_ZONED
static void *virtblk_alloc_report_buffer(struct virtio_blk *vblk, static void *virtblk_alloc_report_buffer(struct virtio_blk *vblk,
unsigned int nr_zones, unsigned int nr_zones,
unsigned int zone_sectors,
size_t *buflen) size_t *buflen)
{ {
struct request_queue *q = vblk->disk->queue; struct request_queue *q = vblk->disk->queue;
...@@ -558,7 +573,7 @@ static void *virtblk_alloc_report_buffer(struct virtio_blk *vblk, ...@@ -558,7 +573,7 @@ static void *virtblk_alloc_report_buffer(struct virtio_blk *vblk,
void *buf; void *buf;
nr_zones = min_t(unsigned int, nr_zones, nr_zones = min_t(unsigned int, nr_zones,
get_capacity(vblk->disk) >> ilog2(zone_sectors)); get_capacity(vblk->disk) >> ilog2(vblk->zone_sectors));
bufsize = sizeof(struct virtio_blk_zone_report) + bufsize = sizeof(struct virtio_blk_zone_report) +
nr_zones * sizeof(struct virtio_blk_zone_descriptor); nr_zones * sizeof(struct virtio_blk_zone_descriptor);
...@@ -592,7 +607,7 @@ static int virtblk_submit_zone_report(struct virtio_blk *vblk, ...@@ -592,7 +607,7 @@ static int virtblk_submit_zone_report(struct virtio_blk *vblk,
return PTR_ERR(req); return PTR_ERR(req);
vbr = blk_mq_rq_to_pdu(req); vbr = blk_mq_rq_to_pdu(req);
vbr->in_hdr_len = sizeof(vbr->status); vbr->in_hdr_len = sizeof(vbr->in_hdr.status);
vbr->out_hdr.type = cpu_to_virtio32(vblk->vdev, VIRTIO_BLK_T_ZONE_REPORT); vbr->out_hdr.type = cpu_to_virtio32(vblk->vdev, VIRTIO_BLK_T_ZONE_REPORT);
vbr->out_hdr.sector = cpu_to_virtio64(vblk->vdev, sector); vbr->out_hdr.sector = cpu_to_virtio64(vblk->vdev, sector);
...@@ -601,7 +616,7 @@ static int virtblk_submit_zone_report(struct virtio_blk *vblk, ...@@ -601,7 +616,7 @@ static int virtblk_submit_zone_report(struct virtio_blk *vblk,
goto out; goto out;
blk_execute_rq(req, false); blk_execute_rq(req, false);
err = blk_status_to_errno(virtblk_result(vbr->status)); err = blk_status_to_errno(virtblk_result(vbr->in_hdr.status));
out: out:
blk_mq_free_request(req); blk_mq_free_request(req);
return err; return err;
...@@ -609,29 +624,72 @@ static int virtblk_submit_zone_report(struct virtio_blk *vblk, ...@@ -609,29 +624,72 @@ static int virtblk_submit_zone_report(struct virtio_blk *vblk,
static int virtblk_parse_zone(struct virtio_blk *vblk, static int virtblk_parse_zone(struct virtio_blk *vblk,
struct virtio_blk_zone_descriptor *entry, struct virtio_blk_zone_descriptor *entry,
unsigned int idx, unsigned int zone_sectors, unsigned int idx, report_zones_cb cb, void *data)
report_zones_cb cb, void *data)
{ {
struct blk_zone zone = { }; struct blk_zone zone = { };
if (entry->z_type != VIRTIO_BLK_ZT_SWR && zone.start = virtio64_to_cpu(vblk->vdev, entry->z_start);
entry->z_type != VIRTIO_BLK_ZT_SWP && if (zone.start + vblk->zone_sectors <= get_capacity(vblk->disk))
entry->z_type != VIRTIO_BLK_ZT_CONV) { zone.len = vblk->zone_sectors;
dev_err(&vblk->vdev->dev, "invalid zone type %#x\n", else
entry->z_type); zone.len = get_capacity(vblk->disk) - zone.start;
return -EINVAL; zone.capacity = virtio64_to_cpu(vblk->vdev, entry->z_cap);
zone.wp = virtio64_to_cpu(vblk->vdev, entry->z_wp);
switch (entry->z_type) {
case VIRTIO_BLK_ZT_SWR:
zone.type = BLK_ZONE_TYPE_SEQWRITE_REQ;
break;
case VIRTIO_BLK_ZT_SWP:
zone.type = BLK_ZONE_TYPE_SEQWRITE_PREF;
break;
case VIRTIO_BLK_ZT_CONV:
zone.type = BLK_ZONE_TYPE_CONVENTIONAL;
break;
default:
dev_err(&vblk->vdev->dev, "zone %llu: invalid type %#x\n",
zone.start, entry->z_type);
return -EIO;
} }
zone.type = entry->z_type; switch (entry->z_state) {
zone.cond = entry->z_state; case VIRTIO_BLK_ZS_EMPTY:
zone.len = zone_sectors; zone.cond = BLK_ZONE_COND_EMPTY;
zone.capacity = le64_to_cpu(entry->z_cap); break;
zone.start = le64_to_cpu(entry->z_start); case VIRTIO_BLK_ZS_CLOSED:
if (zone.cond == BLK_ZONE_COND_FULL) zone.cond = BLK_ZONE_COND_CLOSED;
break;
case VIRTIO_BLK_ZS_FULL:
zone.cond = BLK_ZONE_COND_FULL;
zone.wp = zone.start + zone.len; zone.wp = zone.start + zone.len;
else break;
zone.wp = le64_to_cpu(entry->z_wp); case VIRTIO_BLK_ZS_EOPEN:
zone.cond = BLK_ZONE_COND_EXP_OPEN;
break;
case VIRTIO_BLK_ZS_IOPEN:
zone.cond = BLK_ZONE_COND_IMP_OPEN;
break;
case VIRTIO_BLK_ZS_NOT_WP:
zone.cond = BLK_ZONE_COND_NOT_WP;
break;
case VIRTIO_BLK_ZS_RDONLY:
zone.cond = BLK_ZONE_COND_READONLY;
zone.wp = ULONG_MAX;
break;
case VIRTIO_BLK_ZS_OFFLINE:
zone.cond = BLK_ZONE_COND_OFFLINE;
zone.wp = ULONG_MAX;
break;
default:
dev_err(&vblk->vdev->dev, "zone %llu: invalid condition %#x\n",
zone.start, entry->z_state);
return -EIO;
}
/*
* The callback below checks the validity of the reported
* entry data, no need to further validate it here.
*/
return cb(&zone, idx, data); return cb(&zone, idx, data);
} }
...@@ -641,39 +699,47 @@ static int virtblk_report_zones(struct gendisk *disk, sector_t sector, ...@@ -641,39 +699,47 @@ static int virtblk_report_zones(struct gendisk *disk, sector_t sector,
{ {
struct virtio_blk *vblk = disk->private_data; struct virtio_blk *vblk = disk->private_data;
struct virtio_blk_zone_report *report; struct virtio_blk_zone_report *report;
unsigned int zone_sectors = vblk->zone_sectors; unsigned long long nz, i;
unsigned int nz, i;
int ret, zone_idx = 0;
size_t buflen; size_t buflen;
unsigned int zone_idx = 0;
int ret;
if (WARN_ON_ONCE(!vblk->zone_sectors)) if (WARN_ON_ONCE(!vblk->zone_sectors))
return -EOPNOTSUPP; return -EOPNOTSUPP;
report = virtblk_alloc_report_buffer(vblk, nr_zones, report = virtblk_alloc_report_buffer(vblk, nr_zones, &buflen);
zone_sectors, &buflen);
if (!report) if (!report)
return -ENOMEM; return -ENOMEM;
mutex_lock(&vblk->vdev_mutex);
if (!vblk->vdev) {
ret = -ENXIO;
goto fail_report;
}
while (zone_idx < nr_zones && sector < get_capacity(vblk->disk)) { while (zone_idx < nr_zones && sector < get_capacity(vblk->disk)) {
memset(report, 0, buflen); memset(report, 0, buflen);
ret = virtblk_submit_zone_report(vblk, (char *)report, ret = virtblk_submit_zone_report(vblk, (char *)report,
buflen, sector); buflen, sector);
if (ret) { if (ret)
if (ret > 0) goto fail_report;
ret = -EIO;
goto out_free; nz = min_t(u64, virtio64_to_cpu(vblk->vdev, report->nr_zones),
} nr_zones);
nz = min((unsigned int)le64_to_cpu(report->nr_zones), nr_zones);
if (!nz) if (!nz)
break; break;
for (i = 0; i < nz && zone_idx < nr_zones; i++) { for (i = 0; i < nz && zone_idx < nr_zones; i++) {
ret = virtblk_parse_zone(vblk, &report->zones[i], ret = virtblk_parse_zone(vblk, &report->zones[i],
zone_idx, zone_sectors, cb, data); zone_idx, cb, data);
if (ret) if (ret)
goto out_free; goto fail_report;
sector = le64_to_cpu(report->zones[i].z_start) + zone_sectors;
sector = virtio64_to_cpu(vblk->vdev,
report->zones[i].z_start) +
vblk->zone_sectors;
zone_idx++; zone_idx++;
} }
} }
...@@ -682,7 +748,8 @@ static int virtblk_report_zones(struct gendisk *disk, sector_t sector, ...@@ -682,7 +748,8 @@ static int virtblk_report_zones(struct gendisk *disk, sector_t sector,
ret = zone_idx; ret = zone_idx;
else else
ret = -EINVAL; ret = -EINVAL;
out_free: fail_report:
mutex_unlock(&vblk->vdev_mutex);
kvfree(report); kvfree(report);
return ret; return ret;
} }
...@@ -691,20 +758,28 @@ static void virtblk_revalidate_zones(struct virtio_blk *vblk) ...@@ -691,20 +758,28 @@ static void virtblk_revalidate_zones(struct virtio_blk *vblk)
{ {
u8 model; u8 model;
if (!vblk->zone_sectors)
return;
virtio_cread(vblk->vdev, struct virtio_blk_config, virtio_cread(vblk->vdev, struct virtio_blk_config,
zoned.model, &model); zoned.model, &model);
switch (model) {
default:
dev_err(&vblk->vdev->dev, "unknown zone model %d\n", model);
fallthrough;
case VIRTIO_BLK_Z_NONE:
case VIRTIO_BLK_Z_HA:
disk_set_zoned(vblk->disk, BLK_ZONED_NONE);
return;
case VIRTIO_BLK_Z_HM:
WARN_ON_ONCE(!vblk->zone_sectors);
if (!blk_revalidate_disk_zones(vblk->disk, NULL)) if (!blk_revalidate_disk_zones(vblk->disk, NULL))
set_capacity_and_notify(vblk->disk, 0); set_capacity_and_notify(vblk->disk, 0);
}
} }
static int virtblk_probe_zoned_device(struct virtio_device *vdev, static int virtblk_probe_zoned_device(struct virtio_device *vdev,
struct virtio_blk *vblk, struct virtio_blk *vblk,
struct request_queue *q) struct request_queue *q)
{ {
u32 v; u32 v, wg;
u8 model; u8 model;
int ret; int ret;
...@@ -713,16 +788,11 @@ static int virtblk_probe_zoned_device(struct virtio_device *vdev, ...@@ -713,16 +788,11 @@ static int virtblk_probe_zoned_device(struct virtio_device *vdev,
switch (model) { switch (model) {
case VIRTIO_BLK_Z_NONE: case VIRTIO_BLK_Z_NONE:
case VIRTIO_BLK_Z_HA:
/* Present the host-aware device as non-zoned */
return 0; return 0;
case VIRTIO_BLK_Z_HM: case VIRTIO_BLK_Z_HM:
break; break;
case VIRTIO_BLK_Z_HA:
/*
* Present the host-aware device as a regular drive.
* TODO It is possible to add an option to make it appear
* in the system as a zoned drive.
*/
return 0;
default: default:
dev_err(&vdev->dev, "unsupported zone model %d\n", model); dev_err(&vdev->dev, "unsupported zone model %d\n", model);
return -EINVAL; return -EINVAL;
...@@ -735,32 +805,31 @@ static int virtblk_probe_zoned_device(struct virtio_device *vdev, ...@@ -735,32 +805,31 @@ static int virtblk_probe_zoned_device(struct virtio_device *vdev,
virtio_cread(vdev, struct virtio_blk_config, virtio_cread(vdev, struct virtio_blk_config,
zoned.max_open_zones, &v); zoned.max_open_zones, &v);
disk_set_max_open_zones(vblk->disk, le32_to_cpu(v)); disk_set_max_open_zones(vblk->disk, v);
dev_dbg(&vdev->dev, "max open zones = %u\n", v);
dev_dbg(&vdev->dev, "max open zones = %u\n", le32_to_cpu(v));
virtio_cread(vdev, struct virtio_blk_config, virtio_cread(vdev, struct virtio_blk_config,
zoned.max_active_zones, &v); zoned.max_active_zones, &v);
disk_set_max_active_zones(vblk->disk, le32_to_cpu(v)); disk_set_max_active_zones(vblk->disk, v);
dev_dbg(&vdev->dev, "max active zones = %u\n", le32_to_cpu(v)); dev_dbg(&vdev->dev, "max active zones = %u\n", v);
virtio_cread(vdev, struct virtio_blk_config, virtio_cread(vdev, struct virtio_blk_config,
zoned.write_granularity, &v); zoned.write_granularity, &wg);
if (!v) { if (!wg) {
dev_warn(&vdev->dev, "zero write granularity reported\n"); dev_warn(&vdev->dev, "zero write granularity reported\n");
return -ENODEV; return -ENODEV;
} }
blk_queue_physical_block_size(q, le32_to_cpu(v)); blk_queue_physical_block_size(q, wg);
blk_queue_io_min(q, le32_to_cpu(v)); blk_queue_io_min(q, wg);
dev_dbg(&vdev->dev, "write granularity = %u\n", le32_to_cpu(v)); dev_dbg(&vdev->dev, "write granularity = %u\n", wg);
/* /*
* virtio ZBD specification doesn't require zones to be a power of * virtio ZBD specification doesn't require zones to be a power of
* two sectors in size, but the code in this driver expects that. * two sectors in size, but the code in this driver expects that.
*/ */
virtio_cread(vdev, struct virtio_blk_config, zoned.zone_sectors, &v); virtio_cread(vdev, struct virtio_blk_config, zoned.zone_sectors,
vblk->zone_sectors = le32_to_cpu(v); &vblk->zone_sectors);
if (vblk->zone_sectors == 0 || !is_power_of_2(vblk->zone_sectors)) { if (vblk->zone_sectors == 0 || !is_power_of_2(vblk->zone_sectors)) {
dev_err(&vdev->dev, dev_err(&vdev->dev,
"zoned device with non power of two zone size %u\n", "zoned device with non power of two zone size %u\n",
...@@ -783,36 +852,46 @@ static int virtblk_probe_zoned_device(struct virtio_device *vdev, ...@@ -783,36 +852,46 @@ static int virtblk_probe_zoned_device(struct virtio_device *vdev,
dev_warn(&vdev->dev, "zero max_append_sectors reported\n"); dev_warn(&vdev->dev, "zero max_append_sectors reported\n");
return -ENODEV; return -ENODEV;
} }
blk_queue_max_zone_append_sectors(q, le32_to_cpu(v)); if ((v << SECTOR_SHIFT) < wg) {
dev_dbg(&vdev->dev, "max append sectors = %u\n", le32_to_cpu(v)); dev_err(&vdev->dev,
"write granularity %u exceeds max_append_sectors %u limit\n",
wg, v);
return -ENODEV;
}
blk_queue_max_zone_append_sectors(q, v);
dev_dbg(&vdev->dev, "max append sectors = %u\n", v);
} }
return ret; return ret;
} }
static inline bool virtblk_has_zoned_feature(struct virtio_device *vdev)
{
return virtio_has_feature(vdev, VIRTIO_BLK_F_ZONED);
}
#else #else
/* /*
* Zoned block device support is not configured in this kernel. * Zoned block device support is not configured in this kernel.
* We only need to define a few symbols to avoid compilation errors. * Host-managed zoned devices can't be supported, but others are
* good to go as regular block devices.
*/ */
#define virtblk_report_zones NULL #define virtblk_report_zones NULL
static inline void virtblk_revalidate_zones(struct virtio_blk *vblk) static inline void virtblk_revalidate_zones(struct virtio_blk *vblk)
{ {
} }
static inline int virtblk_probe_zoned_device(struct virtio_device *vdev, static inline int virtblk_probe_zoned_device(struct virtio_device *vdev,
struct virtio_blk *vblk, struct request_queue *q) struct virtio_blk *vblk, struct request_queue *q)
{ {
u8 model;
virtio_cread(vdev, struct virtio_blk_config, zoned.model, &model);
if (model == VIRTIO_BLK_Z_HM) {
dev_err(&vdev->dev,
"virtio_blk: zoned devices are not supported");
return -EOPNOTSUPP; return -EOPNOTSUPP;
} }
static inline bool virtblk_has_zoned_feature(struct virtio_device *vdev) return 0;
{
return false;
} }
#endif /* CONFIG_BLK_DEV_ZONED */ #endif /* CONFIG_BLK_DEV_ZONED */
...@@ -831,7 +910,7 @@ static int virtblk_get_id(struct gendisk *disk, char *id_str) ...@@ -831,7 +910,7 @@ static int virtblk_get_id(struct gendisk *disk, char *id_str)
return PTR_ERR(req); return PTR_ERR(req);
vbr = blk_mq_rq_to_pdu(req); vbr = blk_mq_rq_to_pdu(req);
vbr->in_hdr_len = sizeof(vbr->status); vbr->in_hdr_len = sizeof(vbr->in_hdr.status);
vbr->out_hdr.type = cpu_to_virtio32(vblk->vdev, VIRTIO_BLK_T_GET_ID); vbr->out_hdr.type = cpu_to_virtio32(vblk->vdev, VIRTIO_BLK_T_GET_ID);
vbr->out_hdr.sector = 0; vbr->out_hdr.sector = 0;
...@@ -840,7 +919,7 @@ static int virtblk_get_id(struct gendisk *disk, char *id_str) ...@@ -840,7 +919,7 @@ static int virtblk_get_id(struct gendisk *disk, char *id_str)
goto out; goto out;
blk_execute_rq(req, false); blk_execute_rq(req, false);
err = blk_status_to_errno(virtblk_result(vbr->status)); err = blk_status_to_errno(virtblk_result(vbr->in_hdr.status));
out: out:
blk_mq_free_request(req); blk_mq_free_request(req);
return err; return err;
...@@ -1498,15 +1577,16 @@ static int virtblk_probe(struct virtio_device *vdev) ...@@ -1498,15 +1577,16 @@ static int virtblk_probe(struct virtio_device *vdev)
virtblk_update_capacity(vblk, false); virtblk_update_capacity(vblk, false);
virtio_device_ready(vdev); virtio_device_ready(vdev);
if (virtblk_has_zoned_feature(vdev)) { /*
* All steps that follow use the VQs therefore they need to be
* placed after the virtio_device_ready() call above.
*/
if (virtio_has_feature(vdev, VIRTIO_BLK_F_ZONED)) {
err = virtblk_probe_zoned_device(vdev, vblk, q); err = virtblk_probe_zoned_device(vdev, vblk, q);
if (err) if (err)
goto out_cleanup_disk; goto out_cleanup_disk;
} }
dev_info(&vdev->dev, "blk config size: %zu\n",
sizeof(struct virtio_blk_config));
err = device_add_disk(&vdev->dev, vblk->disk, virtblk_attr_groups); err = device_add_disk(&vdev->dev, vblk->disk, virtblk_attr_groups);
if (err) if (err)
goto out_cleanup_disk; goto out_cleanup_disk;
...@@ -1607,10 +1687,7 @@ static unsigned int features[] = { ...@@ -1607,10 +1687,7 @@ static unsigned int features[] = {
VIRTIO_BLK_F_RO, VIRTIO_BLK_F_BLK_SIZE, VIRTIO_BLK_F_RO, VIRTIO_BLK_F_BLK_SIZE,
VIRTIO_BLK_F_FLUSH, VIRTIO_BLK_F_TOPOLOGY, VIRTIO_BLK_F_CONFIG_WCE, VIRTIO_BLK_F_FLUSH, VIRTIO_BLK_F_TOPOLOGY, VIRTIO_BLK_F_CONFIG_WCE,
VIRTIO_BLK_F_MQ, VIRTIO_BLK_F_DISCARD, VIRTIO_BLK_F_WRITE_ZEROES, VIRTIO_BLK_F_MQ, VIRTIO_BLK_F_DISCARD, VIRTIO_BLK_F_WRITE_ZEROES,
VIRTIO_BLK_F_SECURE_ERASE, VIRTIO_BLK_F_SECURE_ERASE, VIRTIO_BLK_F_ZONED,
#ifdef CONFIG_BLK_DEV_ZONED
VIRTIO_BLK_F_ZONED,
#endif /* CONFIG_BLK_DEV_ZONED */
}; };
static struct virtio_driver virtio_blk = { static struct virtio_driver virtio_blk = {
......
...@@ -2467,10 +2467,11 @@ static int setup_driver(struct mlx5_vdpa_dev *mvdev) ...@@ -2467,10 +2467,11 @@ static int setup_driver(struct mlx5_vdpa_dev *mvdev)
err = 0; err = 0;
goto out; goto out;
} }
mlx5_vdpa_add_debugfs(ndev);
err = setup_virtqueues(mvdev); err = setup_virtqueues(mvdev);
if (err) { if (err) {
mlx5_vdpa_warn(mvdev, "setup_virtqueues\n"); mlx5_vdpa_warn(mvdev, "setup_virtqueues\n");
goto out; goto err_setup;
} }
err = create_rqt(ndev); err = create_rqt(ndev);
...@@ -2500,6 +2501,8 @@ static int setup_driver(struct mlx5_vdpa_dev *mvdev) ...@@ -2500,6 +2501,8 @@ static int setup_driver(struct mlx5_vdpa_dev *mvdev)
destroy_rqt(ndev); destroy_rqt(ndev);
err_rqt: err_rqt:
teardown_virtqueues(ndev); teardown_virtqueues(ndev);
err_setup:
mlx5_vdpa_remove_debugfs(ndev->debugfs);
out: out:
return err; return err;
} }
...@@ -2513,6 +2516,8 @@ static void teardown_driver(struct mlx5_vdpa_net *ndev) ...@@ -2513,6 +2516,8 @@ static void teardown_driver(struct mlx5_vdpa_net *ndev)
if (!ndev->setup) if (!ndev->setup)
return; return;
mlx5_vdpa_remove_debugfs(ndev->debugfs);
ndev->debugfs = NULL;
teardown_steering(ndev); teardown_steering(ndev);
destroy_tir(ndev); destroy_tir(ndev);
destroy_rqt(ndev); destroy_rqt(ndev);
...@@ -3261,7 +3266,6 @@ static int mlx5_vdpa_dev_add(struct vdpa_mgmt_dev *v_mdev, const char *name, ...@@ -3261,7 +3266,6 @@ static int mlx5_vdpa_dev_add(struct vdpa_mgmt_dev *v_mdev, const char *name,
if (err) if (err)
goto err_reg; goto err_reg;
mlx5_vdpa_add_debugfs(ndev);
mgtdev->ndev = ndev; mgtdev->ndev = ndev;
return 0; return 0;
......
...@@ -466,16 +466,21 @@ static int vdpasim_net_dev_add(struct vdpa_mgmt_dev *mdev, const char *name, ...@@ -466,16 +466,21 @@ static int vdpasim_net_dev_add(struct vdpa_mgmt_dev *mdev, const char *name,
vdpasim_net_setup_config(simdev, config); vdpasim_net_setup_config(simdev, config);
ret = _vdpa_register_device(&simdev->vdpa, VDPASIM_NET_VQ_NUM);
if (ret)
goto reg_err;
net = sim_to_net(simdev); net = sim_to_net(simdev);
u64_stats_init(&net->tx_stats.syncp); u64_stats_init(&net->tx_stats.syncp);
u64_stats_init(&net->rx_stats.syncp); u64_stats_init(&net->rx_stats.syncp);
u64_stats_init(&net->cq_stats.syncp); u64_stats_init(&net->cq_stats.syncp);
/*
* Initialization must be completed before this call, since it can
* connect the device to the vDPA bus, so requests can arrive after
* this call.
*/
ret = _vdpa_register_device(&simdev->vdpa, VDPASIM_NET_VQ_NUM);
if (ret)
goto reg_err;
return 0; return 0;
reg_err: reg_err:
......
...@@ -125,7 +125,6 @@ struct vhost_scsi_tpg { ...@@ -125,7 +125,6 @@ struct vhost_scsi_tpg {
struct se_portal_group se_tpg; struct se_portal_group se_tpg;
/* Pointer back to vhost_scsi, protected by tv_tpg_mutex */ /* Pointer back to vhost_scsi, protected by tv_tpg_mutex */
struct vhost_scsi *vhost_scsi; struct vhost_scsi *vhost_scsi;
struct list_head tmf_queue;
}; };
struct vhost_scsi_tport { struct vhost_scsi_tport {
...@@ -206,10 +205,8 @@ struct vhost_scsi { ...@@ -206,10 +205,8 @@ struct vhost_scsi {
struct vhost_scsi_tmf { struct vhost_scsi_tmf {
struct vhost_work vwork; struct vhost_work vwork;
struct vhost_scsi_tpg *tpg;
struct vhost_scsi *vhost; struct vhost_scsi *vhost;
struct vhost_scsi_virtqueue *svq; struct vhost_scsi_virtqueue *svq;
struct list_head queue_entry;
struct se_cmd se_cmd; struct se_cmd se_cmd;
u8 scsi_resp; u8 scsi_resp;
...@@ -352,12 +349,9 @@ static void vhost_scsi_release_cmd_res(struct se_cmd *se_cmd) ...@@ -352,12 +349,9 @@ static void vhost_scsi_release_cmd_res(struct se_cmd *se_cmd)
static void vhost_scsi_release_tmf_res(struct vhost_scsi_tmf *tmf) static void vhost_scsi_release_tmf_res(struct vhost_scsi_tmf *tmf)
{ {
struct vhost_scsi_tpg *tpg = tmf->tpg;
struct vhost_scsi_inflight *inflight = tmf->inflight; struct vhost_scsi_inflight *inflight = tmf->inflight;
mutex_lock(&tpg->tv_tpg_mutex); kfree(tmf);
list_add_tail(&tpg->tmf_queue, &tmf->queue_entry);
mutex_unlock(&tpg->tv_tpg_mutex);
vhost_scsi_put_inflight(inflight); vhost_scsi_put_inflight(inflight);
} }
...@@ -1194,19 +1188,11 @@ vhost_scsi_handle_tmf(struct vhost_scsi *vs, struct vhost_scsi_tpg *tpg, ...@@ -1194,19 +1188,11 @@ vhost_scsi_handle_tmf(struct vhost_scsi *vs, struct vhost_scsi_tpg *tpg,
goto send_reject; goto send_reject;
} }
mutex_lock(&tpg->tv_tpg_mutex); tmf = kzalloc(sizeof(*tmf), GFP_KERNEL);
if (list_empty(&tpg->tmf_queue)) { if (!tmf)
pr_err("Missing reserve TMF. Could not handle LUN RESET.\n");
mutex_unlock(&tpg->tv_tpg_mutex);
goto send_reject; goto send_reject;
}
tmf = list_first_entry(&tpg->tmf_queue, struct vhost_scsi_tmf, vhost_work_init(&tmf->vwork, vhost_scsi_tmf_resp_work);
queue_entry);
list_del_init(&tmf->queue_entry);
mutex_unlock(&tpg->tv_tpg_mutex);
tmf->tpg = tpg;
tmf->vhost = vs; tmf->vhost = vs;
tmf->svq = svq; tmf->svq = svq;
tmf->resp_iov = vq->iov[vc->out]; tmf->resp_iov = vq->iov[vc->out];
...@@ -1658,7 +1644,10 @@ vhost_scsi_set_endpoint(struct vhost_scsi *vs, ...@@ -1658,7 +1644,10 @@ vhost_scsi_set_endpoint(struct vhost_scsi *vs,
for (i = 0; i < VHOST_SCSI_MAX_TARGET; i++) { for (i = 0; i < VHOST_SCSI_MAX_TARGET; i++) {
tpg = vs_tpg[i]; tpg = vs_tpg[i];
if (tpg) { if (tpg) {
mutex_lock(&tpg->tv_tpg_mutex);
tpg->vhost_scsi = NULL;
tpg->tv_tpg_vhost_count--; tpg->tv_tpg_vhost_count--;
mutex_unlock(&tpg->tv_tpg_mutex);
target_undepend_item(&tpg->se_tpg.tpg_group.cg_item); target_undepend_item(&tpg->se_tpg.tpg_group.cg_item);
} }
} }
...@@ -2032,19 +2021,11 @@ static int vhost_scsi_port_link(struct se_portal_group *se_tpg, ...@@ -2032,19 +2021,11 @@ static int vhost_scsi_port_link(struct se_portal_group *se_tpg,
{ {
struct vhost_scsi_tpg *tpg = container_of(se_tpg, struct vhost_scsi_tpg *tpg = container_of(se_tpg,
struct vhost_scsi_tpg, se_tpg); struct vhost_scsi_tpg, se_tpg);
struct vhost_scsi_tmf *tmf;
tmf = kzalloc(sizeof(*tmf), GFP_KERNEL);
if (!tmf)
return -ENOMEM;
INIT_LIST_HEAD(&tmf->queue_entry);
vhost_work_init(&tmf->vwork, vhost_scsi_tmf_resp_work);
mutex_lock(&vhost_scsi_mutex); mutex_lock(&vhost_scsi_mutex);
mutex_lock(&tpg->tv_tpg_mutex); mutex_lock(&tpg->tv_tpg_mutex);
tpg->tv_tpg_port_count++; tpg->tv_tpg_port_count++;
list_add_tail(&tmf->queue_entry, &tpg->tmf_queue);
mutex_unlock(&tpg->tv_tpg_mutex); mutex_unlock(&tpg->tv_tpg_mutex);
vhost_scsi_hotplug(tpg, lun); vhost_scsi_hotplug(tpg, lun);
...@@ -2059,16 +2040,11 @@ static void vhost_scsi_port_unlink(struct se_portal_group *se_tpg, ...@@ -2059,16 +2040,11 @@ static void vhost_scsi_port_unlink(struct se_portal_group *se_tpg,
{ {
struct vhost_scsi_tpg *tpg = container_of(se_tpg, struct vhost_scsi_tpg *tpg = container_of(se_tpg,
struct vhost_scsi_tpg, se_tpg); struct vhost_scsi_tpg, se_tpg);
struct vhost_scsi_tmf *tmf;
mutex_lock(&vhost_scsi_mutex); mutex_lock(&vhost_scsi_mutex);
mutex_lock(&tpg->tv_tpg_mutex); mutex_lock(&tpg->tv_tpg_mutex);
tpg->tv_tpg_port_count--; tpg->tv_tpg_port_count--;
tmf = list_first_entry(&tpg->tmf_queue, struct vhost_scsi_tmf,
queue_entry);
list_del(&tmf->queue_entry);
kfree(tmf);
mutex_unlock(&tpg->tv_tpg_mutex); mutex_unlock(&tpg->tv_tpg_mutex);
vhost_scsi_hotunplug(tpg, lun); vhost_scsi_hotunplug(tpg, lun);
...@@ -2329,7 +2305,6 @@ vhost_scsi_make_tpg(struct se_wwn *wwn, const char *name) ...@@ -2329,7 +2305,6 @@ vhost_scsi_make_tpg(struct se_wwn *wwn, const char *name)
} }
mutex_init(&tpg->tv_tpg_mutex); mutex_init(&tpg->tv_tpg_mutex);
INIT_LIST_HEAD(&tpg->tv_tpg_list); INIT_LIST_HEAD(&tpg->tv_tpg_list);
INIT_LIST_HEAD(&tpg->tmf_queue);
tpg->tport = tport; tpg->tport = tport;
tpg->tport_tpgt = tpgt; tpg->tport_tpgt = tpgt;
......
...@@ -140,11 +140,11 @@ struct virtio_blk_config { ...@@ -140,11 +140,11 @@ struct virtio_blk_config {
/* Zoned block device characteristics (if VIRTIO_BLK_F_ZONED) */ /* Zoned block device characteristics (if VIRTIO_BLK_F_ZONED) */
struct virtio_blk_zoned_characteristics { struct virtio_blk_zoned_characteristics {
__le32 zone_sectors; __virtio32 zone_sectors;
__le32 max_open_zones; __virtio32 max_open_zones;
__le32 max_active_zones; __virtio32 max_active_zones;
__le32 max_append_sectors; __virtio32 max_append_sectors;
__le32 write_granularity; __virtio32 write_granularity;
__u8 model; __u8 model;
__u8 unused2[3]; __u8 unused2[3];
} zoned; } zoned;
...@@ -241,11 +241,11 @@ struct virtio_blk_outhdr { ...@@ -241,11 +241,11 @@ struct virtio_blk_outhdr {
*/ */
struct virtio_blk_zone_descriptor { struct virtio_blk_zone_descriptor {
/* Zone capacity */ /* Zone capacity */
__le64 z_cap; __virtio64 z_cap;
/* The starting sector of the zone */ /* The starting sector of the zone */
__le64 z_start; __virtio64 z_start;
/* Zone write pointer position in sectors */ /* Zone write pointer position in sectors */
__le64 z_wp; __virtio64 z_wp;
/* Zone type */ /* Zone type */
__u8 z_type; __u8 z_type;
/* Zone state */ /* Zone state */
...@@ -254,7 +254,7 @@ struct virtio_blk_zone_descriptor { ...@@ -254,7 +254,7 @@ struct virtio_blk_zone_descriptor {
}; };
struct virtio_blk_zone_report { struct virtio_blk_zone_report {
__le64 nr_zones; __virtio64 nr_zones;
__u8 reserved[56]; __u8 reserved[56];
struct virtio_blk_zone_descriptor zones[]; struct virtio_blk_zone_descriptor zones[];
}; };
......
...@@ -61,7 +61,7 @@ and ...@@ -61,7 +61,7 @@ and
id=channel0,name=agent-ctl-path\ id=channel0,name=agent-ctl-path\
##data path## ##data path##
-chardev pipe,id=charchannel1,path=/tmp/virtio-trace/trace-path-cpu0\ -chardev pipe,id=charchannel1,path=/tmp/virtio-trace/trace-path-cpu0\
-device virtserialport,bus=virtio-serial0.0,nr=2,chardev=charchannel0,\ -device virtserialport,bus=virtio-serial0.0,nr=2,chardev=charchannel1,\
id=channel1,name=trace-path-cpu0\ id=channel1,name=trace-path-cpu0\
... ...
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment