Commit 455944e4 authored by Jens Axboe's avatar Jens Axboe

Merge tag 'nvme-6.3-2023-02-07' of git://git.infradead.org/nvme into for-6.3/block

Pull NVMe updates from Christoph:

"nvme updates for Linux 6.3

 - small improvements to the logging functionality (Amit Engel)
 - authentication cleanups (Hannes Reinecke)
 - cleanup and optimize the DMA mapping cod in the PCIe driver
   (Keith Busch)
 - work around the command effects for Format NVM (Keith Busch)
 - misc cleanups (Keith Busch, Christoph Hellwig)"

* tag 'nvme-6.3-2023-02-07' of git://git.infradead.org/nvme:
  nvme: mask CSE effects for security receive
  nvme: always initialize known command effects
  nvmet: for nvme admin set_features cmd, call nvmet_check_data_len_lte()
  nvme-tcp: add additional info for nvme_tcp_timeout log
  nvme: add nvme_opcode_str function for all nvme cmd types
  nvme: remove nvme_execute_passthru_rq
  nvme-pci: place descriptor addresses in iod
  nvme-pci: use mapped entries for sgl decision
  nvme-pci: remove SGL segment descriptors
  nvme-auth: don't use NVMe status codes
  nvme-fabrics: clarify AUTHREQ result handling
parents 1972d038 baff6491
...@@ -158,7 +158,7 @@ static int nvme_auth_process_dhchap_challenge(struct nvme_ctrl *ctrl, ...@@ -158,7 +158,7 @@ static int nvme_auth_process_dhchap_challenge(struct nvme_ctrl *ctrl,
if (size > CHAP_BUF_SIZE) { if (size > CHAP_BUF_SIZE) {
chap->status = NVME_AUTH_DHCHAP_FAILURE_INCORRECT_PAYLOAD; chap->status = NVME_AUTH_DHCHAP_FAILURE_INCORRECT_PAYLOAD;
return NVME_SC_INVALID_FIELD; return -EINVAL;
} }
hmac_name = nvme_auth_hmac_name(data->hashid); hmac_name = nvme_auth_hmac_name(data->hashid);
...@@ -167,7 +167,7 @@ static int nvme_auth_process_dhchap_challenge(struct nvme_ctrl *ctrl, ...@@ -167,7 +167,7 @@ static int nvme_auth_process_dhchap_challenge(struct nvme_ctrl *ctrl,
"qid %d: invalid HASH ID %d\n", "qid %d: invalid HASH ID %d\n",
chap->qid, data->hashid); chap->qid, data->hashid);
chap->status = NVME_AUTH_DHCHAP_FAILURE_HASH_UNUSABLE; chap->status = NVME_AUTH_DHCHAP_FAILURE_HASH_UNUSABLE;
return NVME_SC_INVALID_FIELD; return -EPROTO;
} }
if (chap->hash_id == data->hashid && chap->shash_tfm && if (chap->hash_id == data->hashid && chap->shash_tfm &&
...@@ -193,7 +193,7 @@ static int nvme_auth_process_dhchap_challenge(struct nvme_ctrl *ctrl, ...@@ -193,7 +193,7 @@ static int nvme_auth_process_dhchap_challenge(struct nvme_ctrl *ctrl,
chap->qid, hmac_name, PTR_ERR(chap->shash_tfm)); chap->qid, hmac_name, PTR_ERR(chap->shash_tfm));
chap->shash_tfm = NULL; chap->shash_tfm = NULL;
chap->status = NVME_AUTH_DHCHAP_FAILURE_FAILED; chap->status = NVME_AUTH_DHCHAP_FAILURE_FAILED;
return NVME_SC_AUTH_REQUIRED; return -ENOMEM;
} }
if (crypto_shash_digestsize(chap->shash_tfm) != data->hl) { if (crypto_shash_digestsize(chap->shash_tfm) != data->hl) {
...@@ -203,7 +203,7 @@ static int nvme_auth_process_dhchap_challenge(struct nvme_ctrl *ctrl, ...@@ -203,7 +203,7 @@ static int nvme_auth_process_dhchap_challenge(struct nvme_ctrl *ctrl,
crypto_free_shash(chap->shash_tfm); crypto_free_shash(chap->shash_tfm);
chap->shash_tfm = NULL; chap->shash_tfm = NULL;
chap->status = NVME_AUTH_DHCHAP_FAILURE_HASH_UNUSABLE; chap->status = NVME_AUTH_DHCHAP_FAILURE_HASH_UNUSABLE;
return NVME_SC_AUTH_REQUIRED; return -EPROTO;
} }
chap->hash_id = data->hashid; chap->hash_id = data->hashid;
...@@ -219,7 +219,7 @@ static int nvme_auth_process_dhchap_challenge(struct nvme_ctrl *ctrl, ...@@ -219,7 +219,7 @@ static int nvme_auth_process_dhchap_challenge(struct nvme_ctrl *ctrl,
chap->qid, data->dhgid); chap->qid, data->dhgid);
chap->status = NVME_AUTH_DHCHAP_FAILURE_DHGROUP_UNUSABLE; chap->status = NVME_AUTH_DHCHAP_FAILURE_DHGROUP_UNUSABLE;
/* Leave previous dh_tfm intact */ /* Leave previous dh_tfm intact */
return NVME_SC_AUTH_REQUIRED; return -EPROTO;
} }
if (chap->dhgroup_id == data->dhgid && if (chap->dhgroup_id == data->dhgid &&
...@@ -242,7 +242,7 @@ static int nvme_auth_process_dhchap_challenge(struct nvme_ctrl *ctrl, ...@@ -242,7 +242,7 @@ static int nvme_auth_process_dhchap_challenge(struct nvme_ctrl *ctrl,
"qid %d: empty DH value\n", "qid %d: empty DH value\n",
chap->qid); chap->qid);
chap->status = NVME_AUTH_DHCHAP_FAILURE_DHGROUP_UNUSABLE; chap->status = NVME_AUTH_DHCHAP_FAILURE_DHGROUP_UNUSABLE;
return NVME_SC_INVALID_FIELD; return -EPROTO;
} }
chap->dh_tfm = crypto_alloc_kpp(kpp_name, 0, 0); chap->dh_tfm = crypto_alloc_kpp(kpp_name, 0, 0);
...@@ -254,7 +254,7 @@ static int nvme_auth_process_dhchap_challenge(struct nvme_ctrl *ctrl, ...@@ -254,7 +254,7 @@ static int nvme_auth_process_dhchap_challenge(struct nvme_ctrl *ctrl,
chap->qid, ret, gid_name); chap->qid, ret, gid_name);
chap->status = NVME_AUTH_DHCHAP_FAILURE_DHGROUP_UNUSABLE; chap->status = NVME_AUTH_DHCHAP_FAILURE_DHGROUP_UNUSABLE;
chap->dh_tfm = NULL; chap->dh_tfm = NULL;
return NVME_SC_AUTH_REQUIRED; return -ret;
} }
dev_dbg(ctrl->device, "qid %d: selected DH group %s\n", dev_dbg(ctrl->device, "qid %d: selected DH group %s\n",
chap->qid, gid_name); chap->qid, gid_name);
...@@ -263,7 +263,7 @@ static int nvme_auth_process_dhchap_challenge(struct nvme_ctrl *ctrl, ...@@ -263,7 +263,7 @@ static int nvme_auth_process_dhchap_challenge(struct nvme_ctrl *ctrl,
"qid %d: invalid DH value for NULL DH\n", "qid %d: invalid DH value for NULL DH\n",
chap->qid); chap->qid);
chap->status = NVME_AUTH_DHCHAP_FAILURE_INCORRECT_PAYLOAD; chap->status = NVME_AUTH_DHCHAP_FAILURE_INCORRECT_PAYLOAD;
return NVME_SC_INVALID_FIELD; return -EPROTO;
} }
chap->dhgroup_id = data->dhgid; chap->dhgroup_id = data->dhgid;
...@@ -274,7 +274,7 @@ static int nvme_auth_process_dhchap_challenge(struct nvme_ctrl *ctrl, ...@@ -274,7 +274,7 @@ static int nvme_auth_process_dhchap_challenge(struct nvme_ctrl *ctrl,
chap->ctrl_key = kmalloc(dhvlen, GFP_KERNEL); chap->ctrl_key = kmalloc(dhvlen, GFP_KERNEL);
if (!chap->ctrl_key) { if (!chap->ctrl_key) {
chap->status = NVME_AUTH_DHCHAP_FAILURE_FAILED; chap->status = NVME_AUTH_DHCHAP_FAILURE_FAILED;
return NVME_SC_AUTH_REQUIRED; return -ENOMEM;
} }
chap->ctrl_key_len = dhvlen; chap->ctrl_key_len = dhvlen;
memcpy(chap->ctrl_key, data->cval + chap->hash_len, memcpy(chap->ctrl_key, data->cval + chap->hash_len,
...@@ -344,7 +344,7 @@ static int nvme_auth_process_dhchap_success1(struct nvme_ctrl *ctrl, ...@@ -344,7 +344,7 @@ static int nvme_auth_process_dhchap_success1(struct nvme_ctrl *ctrl,
if (size > CHAP_BUF_SIZE) { if (size > CHAP_BUF_SIZE) {
chap->status = NVME_AUTH_DHCHAP_FAILURE_INCORRECT_PAYLOAD; chap->status = NVME_AUTH_DHCHAP_FAILURE_INCORRECT_PAYLOAD;
return NVME_SC_INVALID_FIELD; return -EINVAL;
} }
if (data->hl != chap->hash_len) { if (data->hl != chap->hash_len) {
...@@ -352,7 +352,7 @@ static int nvme_auth_process_dhchap_success1(struct nvme_ctrl *ctrl, ...@@ -352,7 +352,7 @@ static int nvme_auth_process_dhchap_success1(struct nvme_ctrl *ctrl,
"qid %d: invalid hash length %u\n", "qid %d: invalid hash length %u\n",
chap->qid, data->hl); chap->qid, data->hl);
chap->status = NVME_AUTH_DHCHAP_FAILURE_HASH_UNUSABLE; chap->status = NVME_AUTH_DHCHAP_FAILURE_HASH_UNUSABLE;
return NVME_SC_INVALID_FIELD; return -EPROTO;
} }
/* Just print out information for the admin queue */ /* Just print out information for the admin queue */
...@@ -376,7 +376,7 @@ static int nvme_auth_process_dhchap_success1(struct nvme_ctrl *ctrl, ...@@ -376,7 +376,7 @@ static int nvme_auth_process_dhchap_success1(struct nvme_ctrl *ctrl,
"qid %d: controller authentication failed\n", "qid %d: controller authentication failed\n",
chap->qid); chap->qid);
chap->status = NVME_AUTH_DHCHAP_FAILURE_FAILED; chap->status = NVME_AUTH_DHCHAP_FAILURE_FAILED;
return NVME_SC_AUTH_REQUIRED; return -ECONNREFUSED;
} }
/* Just print out information for the admin queue */ /* Just print out information for the admin queue */
...@@ -730,7 +730,7 @@ static void nvme_queue_auth_work(struct work_struct *work) ...@@ -730,7 +730,7 @@ static void nvme_queue_auth_work(struct work_struct *work)
NVME_AUTH_DHCHAP_MESSAGE_CHALLENGE); NVME_AUTH_DHCHAP_MESSAGE_CHALLENGE);
if (ret) { if (ret) {
chap->status = ret; chap->status = ret;
chap->error = NVME_SC_AUTH_REQUIRED; chap->error = -ECONNREFUSED;
return; return;
} }
...@@ -798,7 +798,7 @@ static void nvme_queue_auth_work(struct work_struct *work) ...@@ -798,7 +798,7 @@ static void nvme_queue_auth_work(struct work_struct *work)
NVME_AUTH_DHCHAP_MESSAGE_SUCCESS1); NVME_AUTH_DHCHAP_MESSAGE_SUCCESS1);
if (ret) { if (ret) {
chap->status = ret; chap->status = ret;
chap->error = NVME_SC_AUTH_REQUIRED; chap->error = -ECONNREFUSED;
return; return;
} }
...@@ -819,7 +819,7 @@ static void nvme_queue_auth_work(struct work_struct *work) ...@@ -819,7 +819,7 @@ static void nvme_queue_auth_work(struct work_struct *work)
ret = nvme_auth_process_dhchap_success1(ctrl, chap); ret = nvme_auth_process_dhchap_success1(ctrl, chap);
if (ret) { if (ret) {
/* Controller authentication failed */ /* Controller authentication failed */
chap->error = NVME_SC_AUTH_REQUIRED; chap->error = -ECONNREFUSED;
goto fail2; goto fail2;
} }
......
...@@ -54,6 +54,14 @@ static const char * const nvme_admin_ops[] = { ...@@ -54,6 +54,14 @@ static const char * const nvme_admin_ops[] = {
[nvme_admin_get_lba_status] = "Get LBA Status", [nvme_admin_get_lba_status] = "Get LBA Status",
}; };
static const char * const nvme_fabrics_ops[] = {
[nvme_fabrics_type_property_set] = "Property Set",
[nvme_fabrics_type_property_get] = "Property Get",
[nvme_fabrics_type_connect] = "Connect",
[nvme_fabrics_type_auth_send] = "Authentication Send",
[nvme_fabrics_type_auth_receive] = "Authentication Receive",
};
static const char * const nvme_statuses[] = { static const char * const nvme_statuses[] = {
[NVME_SC_SUCCESS] = "Success", [NVME_SC_SUCCESS] = "Success",
[NVME_SC_INVALID_OPCODE] = "Invalid Command Opcode", [NVME_SC_INVALID_OPCODE] = "Invalid Command Opcode",
...@@ -185,3 +193,11 @@ const unsigned char *nvme_get_admin_opcode_str(u8 opcode) ...@@ -185,3 +193,11 @@ const unsigned char *nvme_get_admin_opcode_str(u8 opcode)
return nvme_admin_ops[opcode]; return nvme_admin_ops[opcode];
return "Unknown"; return "Unknown";
} }
EXPORT_SYMBOL_GPL(nvme_get_admin_opcode_str);
const unsigned char *nvme_get_fabrics_opcode_str(u8 opcode) {
if (opcode < ARRAY_SIZE(nvme_fabrics_ops) && nvme_fabrics_ops[opcode])
return nvme_fabrics_ops[opcode];
return "Unknown";
}
EXPORT_SYMBOL_GPL(nvme_get_fabrics_opcode_str);
...@@ -1002,7 +1002,7 @@ EXPORT_SYMBOL_GPL(nvme_setup_cmd); ...@@ -1002,7 +1002,7 @@ EXPORT_SYMBOL_GPL(nvme_setup_cmd);
* >0: nvme controller's cqe status response * >0: nvme controller's cqe status response
* <0: kernel error in lieu of controller response * <0: kernel error in lieu of controller response
*/ */
static int nvme_execute_rq(struct request *rq, bool at_head) int nvme_execute_rq(struct request *rq, bool at_head)
{ {
blk_status_t status; blk_status_t status;
...@@ -1013,6 +1013,7 @@ static int nvme_execute_rq(struct request *rq, bool at_head) ...@@ -1013,6 +1013,7 @@ static int nvme_execute_rq(struct request *rq, bool at_head)
return nvme_req(rq)->status; return nvme_req(rq)->status;
return blk_status_to_errno(status); return blk_status_to_errno(status);
} }
EXPORT_SYMBOL_NS_GPL(nvme_execute_rq, NVME_TARGET_PASSTHRU);
/* /*
* Returns 0 on success. If the result is negative, it's a Linux error code; * Returns 0 on success. If the result is negative, it's a Linux error code;
...@@ -1058,41 +1059,12 @@ int nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd, ...@@ -1058,41 +1059,12 @@ int nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
} }
EXPORT_SYMBOL_GPL(nvme_submit_sync_cmd); EXPORT_SYMBOL_GPL(nvme_submit_sync_cmd);
static u32 nvme_known_admin_effects(u8 opcode)
{
switch (opcode) {
case nvme_admin_format_nvm:
return NVME_CMD_EFFECTS_LBCC | NVME_CMD_EFFECTS_NCC |
NVME_CMD_EFFECTS_CSE_MASK;
case nvme_admin_sanitize_nvm:
return NVME_CMD_EFFECTS_LBCC | NVME_CMD_EFFECTS_CSE_MASK;
default:
break;
}
return 0;
}
static u32 nvme_known_nvm_effects(u8 opcode)
{
switch (opcode) {
case nvme_cmd_write:
case nvme_cmd_write_zeroes:
case nvme_cmd_write_uncor:
return NVME_CMD_EFFECTS_LBCC;
default:
return 0;
}
}
u32 nvme_command_effects(struct nvme_ctrl *ctrl, struct nvme_ns *ns, u8 opcode) u32 nvme_command_effects(struct nvme_ctrl *ctrl, struct nvme_ns *ns, u8 opcode)
{ {
u32 effects = 0; u32 effects = 0;
if (ns) { if (ns) {
if (ns->head->effects) effects = le32_to_cpu(ns->head->effects->iocs[opcode]);
effects = le32_to_cpu(ns->head->effects->iocs[opcode]);
if (ns->head->ids.csi == NVME_CSI_NVM)
effects |= nvme_known_nvm_effects(opcode);
if (effects & ~(NVME_CMD_EFFECTS_CSUPP | NVME_CMD_EFFECTS_LBCC)) if (effects & ~(NVME_CMD_EFFECTS_CSUPP | NVME_CMD_EFFECTS_LBCC))
dev_warn_once(ctrl->device, dev_warn_once(ctrl->device,
"IO command:%02x has unusual effects:%08x\n", "IO command:%02x has unusual effects:%08x\n",
...@@ -1105,17 +1077,14 @@ u32 nvme_command_effects(struct nvme_ctrl *ctrl, struct nvme_ns *ns, u8 opcode) ...@@ -1105,17 +1077,14 @@ u32 nvme_command_effects(struct nvme_ctrl *ctrl, struct nvme_ns *ns, u8 opcode)
*/ */
effects &= ~NVME_CMD_EFFECTS_CSE_MASK; effects &= ~NVME_CMD_EFFECTS_CSE_MASK;
} else { } else {
if (ctrl->effects) effects = le32_to_cpu(ctrl->effects->acs[opcode]);
effects = le32_to_cpu(ctrl->effects->acs[opcode]);
effects |= nvme_known_admin_effects(opcode);
} }
return effects; return effects;
} }
EXPORT_SYMBOL_NS_GPL(nvme_command_effects, NVME_TARGET_PASSTHRU); EXPORT_SYMBOL_NS_GPL(nvme_command_effects, NVME_TARGET_PASSTHRU);
static u32 nvme_passthru_start(struct nvme_ctrl *ctrl, struct nvme_ns *ns, u32 nvme_passthru_start(struct nvme_ctrl *ctrl, struct nvme_ns *ns, u8 opcode)
u8 opcode)
{ {
u32 effects = nvme_command_effects(ctrl, ns, opcode); u32 effects = nvme_command_effects(ctrl, ns, opcode);
...@@ -1133,6 +1102,7 @@ static u32 nvme_passthru_start(struct nvme_ctrl *ctrl, struct nvme_ns *ns, ...@@ -1133,6 +1102,7 @@ static u32 nvme_passthru_start(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
} }
return effects; return effects;
} }
EXPORT_SYMBOL_NS_GPL(nvme_passthru_start, NVME_TARGET_PASSTHRU);
void nvme_passthru_end(struct nvme_ctrl *ctrl, u32 effects, void nvme_passthru_end(struct nvme_ctrl *ctrl, u32 effects,
struct nvme_command *cmd, int status) struct nvme_command *cmd, int status)
...@@ -1174,17 +1144,6 @@ void nvme_passthru_end(struct nvme_ctrl *ctrl, u32 effects, ...@@ -1174,17 +1144,6 @@ void nvme_passthru_end(struct nvme_ctrl *ctrl, u32 effects,
} }
EXPORT_SYMBOL_NS_GPL(nvme_passthru_end, NVME_TARGET_PASSTHRU); EXPORT_SYMBOL_NS_GPL(nvme_passthru_end, NVME_TARGET_PASSTHRU);
int nvme_execute_passthru_rq(struct request *rq, u32 *effects)
{
struct nvme_command *cmd = nvme_req(rq)->cmd;
struct nvme_ctrl *ctrl = nvme_req(rq)->ctrl;
struct nvme_ns *ns = rq->q->queuedata;
*effects = nvme_passthru_start(ctrl, ns, cmd->common.opcode);
return nvme_execute_rq(rq, false);
}
EXPORT_SYMBOL_NS_GPL(nvme_execute_passthru_rq, NVME_TARGET_PASSTHRU);
/* /*
* Recommended frequency for KATO commands per NVMe 1.4 section 7.12.1: * Recommended frequency for KATO commands per NVMe 1.4 section 7.12.1:
* *
...@@ -3120,6 +3079,62 @@ static int nvme_init_non_mdts_limits(struct nvme_ctrl *ctrl) ...@@ -3120,6 +3079,62 @@ static int nvme_init_non_mdts_limits(struct nvme_ctrl *ctrl)
return ret; return ret;
} }
static void nvme_init_known_nvm_effects(struct nvme_ctrl *ctrl)
{
struct nvme_effects_log *log = ctrl->effects;
log->acs[nvme_admin_format_nvm] |= cpu_to_le32(NVME_CMD_EFFECTS_LBCC |
NVME_CMD_EFFECTS_NCC |
NVME_CMD_EFFECTS_CSE_MASK);
log->acs[nvme_admin_sanitize_nvm] |= cpu_to_le32(NVME_CMD_EFFECTS_LBCC |
NVME_CMD_EFFECTS_CSE_MASK);
/*
* The spec says the result of a security receive command depends on
* the previous security send command. As such, many vendors log this
* command as one to submitted only when no other commands to the same
* namespace are outstanding. The intention is to tell the host to
* prevent mixing security send and receive.
*
* This driver can only enforce such exclusive access against IO
* queues, though. We are not readily able to enforce such a rule for
* two commands to the admin queue, which is the only queue that
* matters for this command.
*
* Rather than blindly freezing the IO queues for this effect that
* doesn't even apply to IO, mask it off.
*/
log->acs[nvme_admin_security_recv] &= ~NVME_CMD_EFFECTS_CSE_MASK;
log->iocs[nvme_cmd_write] |= cpu_to_le32(NVME_CMD_EFFECTS_LBCC);
log->iocs[nvme_cmd_write_zeroes] |= cpu_to_le32(NVME_CMD_EFFECTS_LBCC);
log->iocs[nvme_cmd_write_uncor] |= cpu_to_le32(NVME_CMD_EFFECTS_LBCC);
}
static int nvme_init_effects(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id)
{
int ret = 0;
if (ctrl->effects)
return 0;
if (id->lpa & NVME_CTRL_LPA_CMD_EFFECTS_LOG) {
ret = nvme_get_effects_log(ctrl, NVME_CSI_NVM, &ctrl->effects);
if (ret < 0)
return ret;
}
if (!ctrl->effects) {
ctrl->effects = kzalloc(sizeof(*ctrl->effects), GFP_KERNEL);
if (!ctrl->effects)
return -ENOMEM;
xa_store(&ctrl->cels, NVME_CSI_NVM, ctrl->effects, GFP_KERNEL);
}
nvme_init_known_nvm_effects(ctrl);
return 0;
}
static int nvme_init_identify(struct nvme_ctrl *ctrl) static int nvme_init_identify(struct nvme_ctrl *ctrl)
{ {
struct nvme_id_ctrl *id; struct nvme_id_ctrl *id;
...@@ -3133,12 +3148,6 @@ static int nvme_init_identify(struct nvme_ctrl *ctrl) ...@@ -3133,12 +3148,6 @@ static int nvme_init_identify(struct nvme_ctrl *ctrl)
return -EIO; return -EIO;
} }
if (id->lpa & NVME_CTRL_LPA_CMD_EFFECTS_LOG) {
ret = nvme_get_effects_log(ctrl, NVME_CSI_NVM, &ctrl->effects);
if (ret < 0)
goto out_free;
}
if (!(ctrl->ops->flags & NVME_F_FABRICS)) if (!(ctrl->ops->flags & NVME_F_FABRICS))
ctrl->cntlid = le16_to_cpu(id->cntlid); ctrl->cntlid = le16_to_cpu(id->cntlid);
...@@ -3161,6 +3170,10 @@ static int nvme_init_identify(struct nvme_ctrl *ctrl) ...@@ -3161,6 +3170,10 @@ static int nvme_init_identify(struct nvme_ctrl *ctrl)
ret = nvme_init_subsystem(ctrl, id); ret = nvme_init_subsystem(ctrl, id);
if (ret) if (ret)
goto out_free; goto out_free;
ret = nvme_init_effects(ctrl, id);
if (ret)
goto out_free;
} }
memcpy(ctrl->subsys->firmware_rev, id->fr, memcpy(ctrl->subsys->firmware_rev, id->fr,
sizeof(ctrl->subsys->firmware_rev)); sizeof(ctrl->subsys->firmware_rev));
......
...@@ -410,7 +410,14 @@ int nvmf_connect_admin_queue(struct nvme_ctrl *ctrl) ...@@ -410,7 +410,14 @@ int nvmf_connect_admin_queue(struct nvme_ctrl *ctrl)
result = le32_to_cpu(res.u32); result = le32_to_cpu(res.u32);
ctrl->cntlid = result & 0xFFFF; ctrl->cntlid = result & 0xFFFF;
if ((result >> 16) & 0x3) { if (result & (NVME_CONNECT_AUTHREQ_ATR | NVME_CONNECT_AUTHREQ_ASCR)) {
/* Secure concatenation is not implemented */
if (result & NVME_CONNECT_AUTHREQ_ASCR) {
dev_warn(ctrl->device,
"qid 0: secure concatenation is not supported\n");
ret = NVME_SC_AUTH_REQUIRED;
goto out_free_data;
}
/* Authentication required */ /* Authentication required */
ret = nvme_auth_negotiate(ctrl, 0); ret = nvme_auth_negotiate(ctrl, 0);
if (ret) { if (ret) {
...@@ -486,7 +493,14 @@ int nvmf_connect_io_queue(struct nvme_ctrl *ctrl, u16 qid) ...@@ -486,7 +493,14 @@ int nvmf_connect_io_queue(struct nvme_ctrl *ctrl, u16 qid)
&cmd, data); &cmd, data);
} }
result = le32_to_cpu(res.u32); result = le32_to_cpu(res.u32);
if ((result >> 16) & 2) { if (result & (NVME_CONNECT_AUTHREQ_ATR | NVME_CONNECT_AUTHREQ_ASCR)) {
/* Secure concatenation is not implemented */
if (result & NVME_CONNECT_AUTHREQ_ASCR) {
dev_warn(ctrl->device,
"qid 0: secure concatenation is not supported\n");
ret = NVME_SC_AUTH_REQUIRED;
goto out_free_data;
}
/* Authentication required */ /* Authentication required */
ret = nvme_auth_negotiate(ctrl, qid); ret = nvme_auth_negotiate(ctrl, qid);
if (ret) { if (ret) {
...@@ -500,6 +514,7 @@ int nvmf_connect_io_queue(struct nvme_ctrl *ctrl, u16 qid) ...@@ -500,6 +514,7 @@ int nvmf_connect_io_queue(struct nvme_ctrl *ctrl, u16 qid)
"qid %u: authentication failed\n", qid); "qid %u: authentication failed\n", qid);
} }
} }
out_free_data:
kfree(data); kfree(data);
return ret; return ret;
} }
......
...@@ -219,6 +219,7 @@ static int nvme_submit_user_cmd(struct request_queue *q, ...@@ -219,6 +219,7 @@ static int nvme_submit_user_cmd(struct request_queue *q,
void __user *meta_buffer, unsigned meta_len, u32 meta_seed, void __user *meta_buffer, unsigned meta_len, u32 meta_seed,
u64 *result, unsigned timeout, unsigned int flags) u64 *result, unsigned timeout, unsigned int flags)
{ {
struct nvme_ns *ns = q->queuedata;
struct nvme_ctrl *ctrl; struct nvme_ctrl *ctrl;
struct request *req; struct request *req;
void *meta = NULL; void *meta = NULL;
...@@ -241,8 +242,8 @@ static int nvme_submit_user_cmd(struct request_queue *q, ...@@ -241,8 +242,8 @@ static int nvme_submit_user_cmd(struct request_queue *q,
bio = req->bio; bio = req->bio;
ctrl = nvme_req(req)->ctrl; ctrl = nvme_req(req)->ctrl;
ret = nvme_execute_passthru_rq(req, &effects); effects = nvme_passthru_start(ctrl, ns, cmd->common.opcode);
ret = nvme_execute_rq(req, false);
if (result) if (result)
*result = le64_to_cpu(nvme_req(req)->result.u64); *result = le64_to_cpu(nvme_req(req)->result.u64);
if (meta) if (meta)
......
...@@ -1070,7 +1070,8 @@ static inline void nvme_auth_free(struct nvme_ctrl *ctrl) {}; ...@@ -1070,7 +1070,8 @@ static inline void nvme_auth_free(struct nvme_ctrl *ctrl) {};
u32 nvme_command_effects(struct nvme_ctrl *ctrl, struct nvme_ns *ns, u32 nvme_command_effects(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
u8 opcode); u8 opcode);
int nvme_execute_passthru_rq(struct request *rq, u32 *effects); u32 nvme_passthru_start(struct nvme_ctrl *ctrl, struct nvme_ns *ns, u8 opcode);
int nvme_execute_rq(struct request *rq, bool at_head);
void nvme_passthru_end(struct nvme_ctrl *ctrl, u32 effects, void nvme_passthru_end(struct nvme_ctrl *ctrl, u32 effects,
struct nvme_command *cmd, int status); struct nvme_command *cmd, int status);
struct nvme_ctrl *nvme_ctrl_from_file(struct file *file); struct nvme_ctrl *nvme_ctrl_from_file(struct file *file);
...@@ -1086,6 +1087,7 @@ static inline bool nvme_multi_css(struct nvme_ctrl *ctrl) ...@@ -1086,6 +1087,7 @@ static inline bool nvme_multi_css(struct nvme_ctrl *ctrl)
const unsigned char *nvme_get_error_status_str(u16 status); const unsigned char *nvme_get_error_status_str(u16 status);
const unsigned char *nvme_get_opcode_str(u8 opcode); const unsigned char *nvme_get_opcode_str(u8 opcode);
const unsigned char *nvme_get_admin_opcode_str(u8 opcode); const unsigned char *nvme_get_admin_opcode_str(u8 opcode);
const unsigned char *nvme_get_fabrics_opcode_str(u8 opcode);
#else /* CONFIG_NVME_VERBOSE_ERRORS */ #else /* CONFIG_NVME_VERBOSE_ERRORS */
static inline const unsigned char *nvme_get_error_status_str(u16 status) static inline const unsigned char *nvme_get_error_status_str(u16 status)
{ {
...@@ -1099,6 +1101,18 @@ static inline const unsigned char *nvme_get_admin_opcode_str(u8 opcode) ...@@ -1099,6 +1101,18 @@ static inline const unsigned char *nvme_get_admin_opcode_str(u8 opcode)
{ {
return "Admin Cmd"; return "Admin Cmd";
} }
static inline const unsigned char *nvme_get_fabrics_opcode_str(u8 opcode)
{
return "Fabrics Cmd";
}
#endif /* CONFIG_NVME_VERBOSE_ERRORS */ #endif /* CONFIG_NVME_VERBOSE_ERRORS */
static inline const unsigned char *nvme_opcode_str(int qid, u8 opcode, u8 fctype)
{
if (opcode == nvme_fabrics_command)
return nvme_get_fabrics_opcode_str(fctype);
return qid ? nvme_get_opcode_str(opcode) :
nvme_get_admin_opcode_str(opcode);
}
#endif /* _NVME_H */ #endif /* _NVME_H */
...@@ -42,8 +42,9 @@ ...@@ -42,8 +42,9 @@
* These can be higher, but we need to ensure that any command doesn't * These can be higher, but we need to ensure that any command doesn't
* require an sg allocation that needs more than a page of data. * require an sg allocation that needs more than a page of data.
*/ */
#define NVME_MAX_KB_SZ 4096 #define NVME_MAX_KB_SZ 8192
#define NVME_MAX_SEGS 127 #define NVME_MAX_SEGS 128
#define NVME_MAX_NR_ALLOCATIONS 5
static int use_threaded_interrupts; static int use_threaded_interrupts;
module_param(use_threaded_interrupts, int, 0444); module_param(use_threaded_interrupts, int, 0444);
...@@ -215,6 +216,11 @@ struct nvme_queue { ...@@ -215,6 +216,11 @@ struct nvme_queue {
struct completion delete_done; struct completion delete_done;
}; };
union nvme_descriptor {
struct nvme_sgl_desc *sg_list;
__le64 *prp_list;
};
/* /*
* The nvme_iod describes the data in an I/O. * The nvme_iod describes the data in an I/O.
* *
...@@ -232,6 +238,7 @@ struct nvme_iod { ...@@ -232,6 +238,7 @@ struct nvme_iod {
dma_addr_t first_dma; dma_addr_t first_dma;
dma_addr_t meta_dma; dma_addr_t meta_dma;
struct sg_table sgt; struct sg_table sgt;
union nvme_descriptor list[NVME_MAX_NR_ALLOCATIONS];
}; };
static inline unsigned int nvme_dbbuf_size(struct nvme_dev *dev) static inline unsigned int nvme_dbbuf_size(struct nvme_dev *dev)
...@@ -386,16 +393,6 @@ static int nvme_pci_npages_prp(void) ...@@ -386,16 +393,6 @@ static int nvme_pci_npages_prp(void)
return DIV_ROUND_UP(8 * nprps, NVME_CTRL_PAGE_SIZE - 8); return DIV_ROUND_UP(8 * nprps, NVME_CTRL_PAGE_SIZE - 8);
} }
/*
* Calculates the number of pages needed for the SGL segments. For example a 4k
* page can accommodate 256 SGL descriptors.
*/
static int nvme_pci_npages_sgl(void)
{
return DIV_ROUND_UP(NVME_MAX_SEGS * sizeof(struct nvme_sgl_desc),
NVME_CTRL_PAGE_SIZE);
}
static int nvme_admin_init_hctx(struct blk_mq_hw_ctx *hctx, void *data, static int nvme_admin_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
unsigned int hctx_idx) unsigned int hctx_idx)
{ {
...@@ -509,16 +506,10 @@ static void nvme_commit_rqs(struct blk_mq_hw_ctx *hctx) ...@@ -509,16 +506,10 @@ static void nvme_commit_rqs(struct blk_mq_hw_ctx *hctx)
spin_unlock(&nvmeq->sq_lock); spin_unlock(&nvmeq->sq_lock);
} }
static void **nvme_pci_iod_list(struct request *req) static inline bool nvme_pci_use_sgls(struct nvme_dev *dev, struct request *req,
{ int nseg)
struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
return (void **)(iod->sgt.sgl + blk_rq_nr_phys_segments(req));
}
static inline bool nvme_pci_use_sgls(struct nvme_dev *dev, struct request *req)
{ {
struct nvme_queue *nvmeq = req->mq_hctx->driver_data; struct nvme_queue *nvmeq = req->mq_hctx->driver_data;
int nseg = blk_rq_nr_phys_segments(req);
unsigned int avg_seg_size; unsigned int avg_seg_size;
avg_seg_size = DIV_ROUND_UP(blk_rq_payload_bytes(req), nseg); avg_seg_size = DIV_ROUND_UP(blk_rq_payload_bytes(req), nseg);
...@@ -540,7 +531,7 @@ static void nvme_free_prps(struct nvme_dev *dev, struct request *req) ...@@ -540,7 +531,7 @@ static void nvme_free_prps(struct nvme_dev *dev, struct request *req)
int i; int i;
for (i = 0; i < iod->nr_allocations; i++) { for (i = 0; i < iod->nr_allocations; i++) {
__le64 *prp_list = nvme_pci_iod_list(req)[i]; __le64 *prp_list = iod->list[i].prp_list;
dma_addr_t next_dma_addr = le64_to_cpu(prp_list[last_prp]); dma_addr_t next_dma_addr = le64_to_cpu(prp_list[last_prp]);
dma_pool_free(dev->prp_page_pool, prp_list, dma_addr); dma_pool_free(dev->prp_page_pool, prp_list, dma_addr);
...@@ -548,22 +539,6 @@ static void nvme_free_prps(struct nvme_dev *dev, struct request *req) ...@@ -548,22 +539,6 @@ static void nvme_free_prps(struct nvme_dev *dev, struct request *req)
} }
} }
static void nvme_free_sgls(struct nvme_dev *dev, struct request *req)
{
const int last_sg = SGES_PER_PAGE - 1;
struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
dma_addr_t dma_addr = iod->first_dma;
int i;
for (i = 0; i < iod->nr_allocations; i++) {
struct nvme_sgl_desc *sg_list = nvme_pci_iod_list(req)[i];
dma_addr_t next_dma_addr = le64_to_cpu((sg_list[last_sg]).addr);
dma_pool_free(dev->prp_page_pool, sg_list, dma_addr);
dma_addr = next_dma_addr;
}
}
static void nvme_unmap_data(struct nvme_dev *dev, struct request *req) static void nvme_unmap_data(struct nvme_dev *dev, struct request *req)
{ {
struct nvme_iod *iod = blk_mq_rq_to_pdu(req); struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
...@@ -579,10 +554,11 @@ static void nvme_unmap_data(struct nvme_dev *dev, struct request *req) ...@@ -579,10 +554,11 @@ static void nvme_unmap_data(struct nvme_dev *dev, struct request *req)
dma_unmap_sgtable(dev->dev, &iod->sgt, rq_dma_dir(req), 0); dma_unmap_sgtable(dev->dev, &iod->sgt, rq_dma_dir(req), 0);
if (iod->nr_allocations == 0) if (iod->nr_allocations == 0)
dma_pool_free(dev->prp_small_pool, nvme_pci_iod_list(req)[0], dma_pool_free(dev->prp_small_pool, iod->list[0].sg_list,
iod->first_dma); iod->first_dma);
else if (iod->use_sgl) else if (iod->use_sgl)
nvme_free_sgls(dev, req); dma_pool_free(dev->prp_page_pool, iod->list[0].sg_list,
iod->first_dma);
else else
nvme_free_prps(dev, req); nvme_free_prps(dev, req);
mempool_free(iod->sgt.sgl, dev->iod_mempool); mempool_free(iod->sgt.sgl, dev->iod_mempool);
...@@ -613,7 +589,6 @@ static blk_status_t nvme_pci_setup_prps(struct nvme_dev *dev, ...@@ -613,7 +589,6 @@ static blk_status_t nvme_pci_setup_prps(struct nvme_dev *dev,
u64 dma_addr = sg_dma_address(sg); u64 dma_addr = sg_dma_address(sg);
int offset = dma_addr & (NVME_CTRL_PAGE_SIZE - 1); int offset = dma_addr & (NVME_CTRL_PAGE_SIZE - 1);
__le64 *prp_list; __le64 *prp_list;
void **list = nvme_pci_iod_list(req);
dma_addr_t prp_dma; dma_addr_t prp_dma;
int nprps, i; int nprps, i;
...@@ -651,7 +626,7 @@ static blk_status_t nvme_pci_setup_prps(struct nvme_dev *dev, ...@@ -651,7 +626,7 @@ static blk_status_t nvme_pci_setup_prps(struct nvme_dev *dev,
iod->nr_allocations = -1; iod->nr_allocations = -1;
return BLK_STS_RESOURCE; return BLK_STS_RESOURCE;
} }
list[0] = prp_list; iod->list[0].prp_list = prp_list;
iod->first_dma = prp_dma; iod->first_dma = prp_dma;
i = 0; i = 0;
for (;;) { for (;;) {
...@@ -660,7 +635,7 @@ static blk_status_t nvme_pci_setup_prps(struct nvme_dev *dev, ...@@ -660,7 +635,7 @@ static blk_status_t nvme_pci_setup_prps(struct nvme_dev *dev,
prp_list = dma_pool_alloc(pool, GFP_ATOMIC, &prp_dma); prp_list = dma_pool_alloc(pool, GFP_ATOMIC, &prp_dma);
if (!prp_list) if (!prp_list)
goto free_prps; goto free_prps;
list[iod->nr_allocations++] = prp_list; iod->list[iod->nr_allocations++].prp_list = prp_list;
prp_list[0] = old_prp_list[i - 1]; prp_list[0] = old_prp_list[i - 1];
old_prp_list[i - 1] = cpu_to_le64(prp_dma); old_prp_list[i - 1] = cpu_to_le64(prp_dma);
i = 1; i = 1;
...@@ -705,13 +680,8 @@ static void nvme_pci_sgl_set_seg(struct nvme_sgl_desc *sge, ...@@ -705,13 +680,8 @@ static void nvme_pci_sgl_set_seg(struct nvme_sgl_desc *sge,
dma_addr_t dma_addr, int entries) dma_addr_t dma_addr, int entries)
{ {
sge->addr = cpu_to_le64(dma_addr); sge->addr = cpu_to_le64(dma_addr);
if (entries < SGES_PER_PAGE) { sge->length = cpu_to_le32(entries * sizeof(*sge));
sge->length = cpu_to_le32(entries * sizeof(*sge)); sge->type = NVME_SGL_FMT_LAST_SEG_DESC << 4;
sge->type = NVME_SGL_FMT_LAST_SEG_DESC << 4;
} else {
sge->length = cpu_to_le32(NVME_CTRL_PAGE_SIZE);
sge->type = NVME_SGL_FMT_SEG_DESC << 4;
}
} }
static blk_status_t nvme_pci_setup_sgls(struct nvme_dev *dev, static blk_status_t nvme_pci_setup_sgls(struct nvme_dev *dev,
...@@ -747,34 +717,16 @@ static blk_status_t nvme_pci_setup_sgls(struct nvme_dev *dev, ...@@ -747,34 +717,16 @@ static blk_status_t nvme_pci_setup_sgls(struct nvme_dev *dev,
return BLK_STS_RESOURCE; return BLK_STS_RESOURCE;
} }
nvme_pci_iod_list(req)[0] = sg_list; iod->list[0].sg_list = sg_list;
iod->first_dma = sgl_dma; iod->first_dma = sgl_dma;
nvme_pci_sgl_set_seg(&cmd->dptr.sgl, sgl_dma, entries); nvme_pci_sgl_set_seg(&cmd->dptr.sgl, sgl_dma, entries);
do { do {
if (i == SGES_PER_PAGE) {
struct nvme_sgl_desc *old_sg_desc = sg_list;
struct nvme_sgl_desc *link = &old_sg_desc[i - 1];
sg_list = dma_pool_alloc(pool, GFP_ATOMIC, &sgl_dma);
if (!sg_list)
goto free_sgls;
i = 0;
nvme_pci_iod_list(req)[iod->nr_allocations++] = sg_list;
sg_list[i++] = *link;
nvme_pci_sgl_set_seg(link, sgl_dma, entries);
}
nvme_pci_sgl_set_data(&sg_list[i++], sg); nvme_pci_sgl_set_data(&sg_list[i++], sg);
sg = sg_next(sg); sg = sg_next(sg);
} while (--entries > 0); } while (--entries > 0);
return BLK_STS_OK; return BLK_STS_OK;
free_sgls:
nvme_free_sgls(dev, req);
return BLK_STS_RESOURCE;
} }
static blk_status_t nvme_setup_prp_simple(struct nvme_dev *dev, static blk_status_t nvme_setup_prp_simple(struct nvme_dev *dev,
...@@ -856,7 +808,7 @@ static blk_status_t nvme_map_data(struct nvme_dev *dev, struct request *req, ...@@ -856,7 +808,7 @@ static blk_status_t nvme_map_data(struct nvme_dev *dev, struct request *req,
goto out_free_sg; goto out_free_sg;
} }
iod->use_sgl = nvme_pci_use_sgls(dev, req); iod->use_sgl = nvme_pci_use_sgls(dev, req, iod->sgt.nents);
if (iod->use_sgl) if (iod->use_sgl)
ret = nvme_pci_setup_sgls(dev, req, &cmnd->rw); ret = nvme_pci_setup_sgls(dev, req, &cmnd->rw);
else else
...@@ -2704,11 +2656,8 @@ static void nvme_release_prp_pools(struct nvme_dev *dev) ...@@ -2704,11 +2656,8 @@ static void nvme_release_prp_pools(struct nvme_dev *dev)
static int nvme_pci_alloc_iod_mempool(struct nvme_dev *dev) static int nvme_pci_alloc_iod_mempool(struct nvme_dev *dev)
{ {
size_t npages = max(nvme_pci_npages_prp(), nvme_pci_npages_sgl()); size_t alloc_size = sizeof(struct scatterlist) * NVME_MAX_SEGS;
size_t alloc_size = sizeof(__le64 *) * npages +
sizeof(struct scatterlist) * NVME_MAX_SEGS;
WARN_ON_ONCE(alloc_size > PAGE_SIZE);
dev->iod_mempool = mempool_create_node(1, dev->iod_mempool = mempool_create_node(1,
mempool_kmalloc, mempool_kfree, mempool_kmalloc, mempool_kfree,
(void *)alloc_size, GFP_KERNEL, (void *)alloc_size, GFP_KERNEL,
...@@ -3530,8 +3479,9 @@ static int __init nvme_init(void) ...@@ -3530,8 +3479,9 @@ static int __init nvme_init(void)
BUILD_BUG_ON(sizeof(struct nvme_create_sq) != 64); BUILD_BUG_ON(sizeof(struct nvme_create_sq) != 64);
BUILD_BUG_ON(sizeof(struct nvme_delete_queue) != 64); BUILD_BUG_ON(sizeof(struct nvme_delete_queue) != 64);
BUILD_BUG_ON(IRQ_AFFINITY_MAX_SETS < 2); BUILD_BUG_ON(IRQ_AFFINITY_MAX_SETS < 2);
BUILD_BUG_ON(DIV_ROUND_UP(nvme_pci_npages_prp(), NVME_CTRL_PAGE_SIZE) > BUILD_BUG_ON(NVME_MAX_SEGS > SGES_PER_PAGE);
S8_MAX); BUILD_BUG_ON(sizeof(struct scatterlist) * NVME_MAX_SEGS > PAGE_SIZE);
BUILD_BUG_ON(nvme_pci_npages_prp() > NVME_MAX_NR_ALLOCATIONS);
return pci_register_driver(&nvme_driver); return pci_register_driver(&nvme_driver);
} }
......
...@@ -2282,10 +2282,13 @@ static enum blk_eh_timer_return nvme_tcp_timeout(struct request *rq) ...@@ -2282,10 +2282,13 @@ static enum blk_eh_timer_return nvme_tcp_timeout(struct request *rq)
struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq); struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
struct nvme_ctrl *ctrl = &req->queue->ctrl->ctrl; struct nvme_ctrl *ctrl = &req->queue->ctrl->ctrl;
struct nvme_tcp_cmd_pdu *pdu = req->pdu; struct nvme_tcp_cmd_pdu *pdu = req->pdu;
u8 opc = pdu->cmd.common.opcode, fctype = pdu->cmd.fabrics.fctype;
int qid = nvme_tcp_queue_id(req->queue);
dev_warn(ctrl->device, dev_warn(ctrl->device,
"queue %d: timeout request %#x type %d\n", "queue %d: timeout cid %#x type %d opcode %#x (%s)\n",
nvme_tcp_queue_id(req->queue), rq->tag, pdu->hdr.type); nvme_tcp_queue_id(req->queue), nvme_cid(rq), pdu->hdr.type,
opc, nvme_opcode_str(qid, opc, fctype));
if (ctrl->state != NVME_CTRL_LIVE) { if (ctrl->state != NVME_CTRL_LIVE) {
/* /*
......
...@@ -840,7 +840,7 @@ void nvmet_execute_set_features(struct nvmet_req *req) ...@@ -840,7 +840,7 @@ void nvmet_execute_set_features(struct nvmet_req *req)
u16 nsqr; u16 nsqr;
u16 ncqr; u16 ncqr;
if (!nvmet_check_transfer_len(req, 0)) if (!nvmet_check_data_len_lte(req, 0))
return; return;
switch (cdw10 & 0xff) { switch (cdw10 & 0xff) {
......
...@@ -216,11 +216,12 @@ static void nvmet_passthru_execute_cmd_work(struct work_struct *w) ...@@ -216,11 +216,12 @@ static void nvmet_passthru_execute_cmd_work(struct work_struct *w)
struct nvmet_req *req = container_of(w, struct nvmet_req, p.work); struct nvmet_req *req = container_of(w, struct nvmet_req, p.work);
struct request *rq = req->p.rq; struct request *rq = req->p.rq;
struct nvme_ctrl *ctrl = nvme_req(rq)->ctrl; struct nvme_ctrl *ctrl = nvme_req(rq)->ctrl;
struct nvme_ns *ns = rq->q->queuedata;
u32 effects; u32 effects;
int status; int status;
status = nvme_execute_passthru_rq(rq, &effects); effects = nvme_passthru_start(ctrl, ns, req->cmd->common.opcode);
status = nvme_execute_rq(rq, false);
if (status == NVME_SC_SUCCESS && if (status == NVME_SC_SUCCESS &&
req->cmd->common.opcode == nvme_admin_identify) { req->cmd->common.opcode == nvme_admin_identify) {
switch (req->cmd->identify.cns) { switch (req->cmd->identify.cns) {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment