Commit 34ad6151 authored by Alan Adamson's avatar Alan Adamson Committed by Christoph Hellwig

nvmet: add a clear_ids attribute for passthru targets

If the clear_ids attribute is set to true, the EUI/GUID/UUID is cleared
for the passthru target.  By default, loop targets will set clear_ids to
true.

This resolves an issue where a connect to a passthru target fails when
using a trtype of 'loop' because EUI/GUID/UUID is not unique.

Fixes: 2079f41e ("nvme: check that EUI/GUID/UUID are globally unique")
Signed-off-by: default avatarAlan Adamson <alan.adamson@oracle.com>
Reviewed-by: default avatarKeith Busch <kbusch@kernel.org>
Reviewed-by: default avatarChaitanya Kulkarni <kch@nvidia.com>
Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
parent f7f70f4a
...@@ -773,11 +773,31 @@ static ssize_t nvmet_passthru_io_timeout_store(struct config_item *item, ...@@ -773,11 +773,31 @@ static ssize_t nvmet_passthru_io_timeout_store(struct config_item *item,
} }
CONFIGFS_ATTR(nvmet_passthru_, io_timeout); CONFIGFS_ATTR(nvmet_passthru_, io_timeout);
static ssize_t nvmet_passthru_clear_ids_show(struct config_item *item,
char *page)
{
return sprintf(page, "%u\n", to_subsys(item->ci_parent)->clear_ids);
}
static ssize_t nvmet_passthru_clear_ids_store(struct config_item *item,
const char *page, size_t count)
{
struct nvmet_subsys *subsys = to_subsys(item->ci_parent);
unsigned int clear_ids;
if (kstrtouint(page, 0, &clear_ids))
return -EINVAL;
subsys->clear_ids = clear_ids;
return count;
}
CONFIGFS_ATTR(nvmet_passthru_, clear_ids);
static struct configfs_attribute *nvmet_passthru_attrs[] = { static struct configfs_attribute *nvmet_passthru_attrs[] = {
&nvmet_passthru_attr_device_path, &nvmet_passthru_attr_device_path,
&nvmet_passthru_attr_enable, &nvmet_passthru_attr_enable,
&nvmet_passthru_attr_admin_timeout, &nvmet_passthru_attr_admin_timeout,
&nvmet_passthru_attr_io_timeout, &nvmet_passthru_attr_io_timeout,
&nvmet_passthru_attr_clear_ids,
NULL, NULL,
}; };
......
...@@ -1374,6 +1374,12 @@ u16 nvmet_alloc_ctrl(const char *subsysnqn, const char *hostnqn, ...@@ -1374,6 +1374,12 @@ u16 nvmet_alloc_ctrl(const char *subsysnqn, const char *hostnqn,
ctrl->port = req->port; ctrl->port = req->port;
ctrl->ops = req->ops; ctrl->ops = req->ops;
#ifdef CONFIG_NVME_TARGET_PASSTHRU
/* By default, set loop targets to clear IDS by default */
if (ctrl->port->disc_addr.trtype == NVMF_TRTYPE_LOOP)
subsys->clear_ids = 1;
#endif
INIT_WORK(&ctrl->async_event_work, nvmet_async_event_work); INIT_WORK(&ctrl->async_event_work, nvmet_async_event_work);
INIT_LIST_HEAD(&ctrl->async_events); INIT_LIST_HEAD(&ctrl->async_events);
INIT_RADIX_TREE(&ctrl->p2p_ns_map, GFP_KERNEL); INIT_RADIX_TREE(&ctrl->p2p_ns_map, GFP_KERNEL);
......
...@@ -249,6 +249,7 @@ struct nvmet_subsys { ...@@ -249,6 +249,7 @@ struct nvmet_subsys {
struct config_group passthru_group; struct config_group passthru_group;
unsigned int admin_timeout; unsigned int admin_timeout;
unsigned int io_timeout; unsigned int io_timeout;
unsigned int clear_ids;
#endif /* CONFIG_NVME_TARGET_PASSTHRU */ #endif /* CONFIG_NVME_TARGET_PASSTHRU */
#ifdef CONFIG_BLK_DEV_ZONED #ifdef CONFIG_BLK_DEV_ZONED
......
...@@ -30,6 +30,53 @@ void nvmet_passthrough_override_cap(struct nvmet_ctrl *ctrl) ...@@ -30,6 +30,53 @@ void nvmet_passthrough_override_cap(struct nvmet_ctrl *ctrl)
ctrl->cap &= ~(1ULL << 43); ctrl->cap &= ~(1ULL << 43);
} }
static u16 nvmet_passthru_override_id_descs(struct nvmet_req *req)
{
struct nvmet_ctrl *ctrl = req->sq->ctrl;
u16 status = NVME_SC_SUCCESS;
int pos, len;
bool csi_seen = false;
void *data;
u8 csi;
if (!ctrl->subsys->clear_ids)
return status;
data = kzalloc(NVME_IDENTIFY_DATA_SIZE, GFP_KERNEL);
if (!data)
return NVME_SC_INTERNAL;
status = nvmet_copy_from_sgl(req, 0, data, NVME_IDENTIFY_DATA_SIZE);
if (status)
goto out_free;
for (pos = 0; pos < NVME_IDENTIFY_DATA_SIZE; pos += len) {
struct nvme_ns_id_desc *cur = data + pos;
if (cur->nidl == 0)
break;
if (cur->nidt == NVME_NIDT_CSI) {
memcpy(&csi, cur + 1, NVME_NIDT_CSI_LEN);
csi_seen = true;
break;
}
len = sizeof(struct nvme_ns_id_desc) + cur->nidl;
}
memset(data, 0, NVME_IDENTIFY_DATA_SIZE);
if (csi_seen) {
struct nvme_ns_id_desc *cur = data;
cur->nidt = NVME_NIDT_CSI;
cur->nidl = NVME_NIDT_CSI_LEN;
memcpy(cur + 1, &csi, NVME_NIDT_CSI_LEN);
}
status = nvmet_copy_to_sgl(req, 0, data, NVME_IDENTIFY_DATA_SIZE);
out_free:
kfree(data);
return status;
}
static u16 nvmet_passthru_override_id_ctrl(struct nvmet_req *req) static u16 nvmet_passthru_override_id_ctrl(struct nvmet_req *req)
{ {
struct nvmet_ctrl *ctrl = req->sq->ctrl; struct nvmet_ctrl *ctrl = req->sq->ctrl;
...@@ -152,6 +199,11 @@ static u16 nvmet_passthru_override_id_ns(struct nvmet_req *req) ...@@ -152,6 +199,11 @@ static u16 nvmet_passthru_override_id_ns(struct nvmet_req *req)
*/ */
id->mc = 0; id->mc = 0;
if (req->sq->ctrl->subsys->clear_ids) {
memset(id->nguid, 0, NVME_NIDT_NGUID_LEN);
memset(id->eui64, 0, NVME_NIDT_EUI64_LEN);
}
status = nvmet_copy_to_sgl(req, 0, id, sizeof(*id)); status = nvmet_copy_to_sgl(req, 0, id, sizeof(*id));
out_free: out_free:
...@@ -176,6 +228,9 @@ static void nvmet_passthru_execute_cmd_work(struct work_struct *w) ...@@ -176,6 +228,9 @@ static void nvmet_passthru_execute_cmd_work(struct work_struct *w)
case NVME_ID_CNS_NS: case NVME_ID_CNS_NS:
nvmet_passthru_override_id_ns(req); nvmet_passthru_override_id_ns(req);
break; break;
case NVME_ID_CNS_NS_DESC_LIST:
nvmet_passthru_override_id_descs(req);
break;
} }
} else if (status < 0) } else if (status < 0)
status = NVME_SC_INTERNAL; status = NVME_SC_INTERNAL;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment