Commit 236f2552 authored by Jens Axboe's avatar Jens Axboe

Merge tag 'nvme-6.5-2023-06-16' of git://git.infradead.org/nvme into for-6.5/block

Pull NVMe updates from Keith:

"nvme updates for Linux 6.5

 - Various cleanups all around (Irvin, Chaitanya, Christophe)
 - Better struct packing (Christophe JAILLET)
 - Reduce controller error logs for optional commands (Keith)
 - Support for >=64KiB block sizes (Daniel Gomez)
 - Fabrics fixes and code organization (Max, Chaitanya, Daniel Wagner)"

* tag 'nvme-6.5-2023-06-16' of git://git.infradead.org/nvme: (27 commits)
  nvme: forward port sysfs delete fix
  nvme: skip optional id ctrl csi if it failed
  nvme-core: use nvme_ns_head_multipath instead of ns->head->disk
  nvmet-fcloop: Do not wait on completion when unregister fails
  nvme-fabrics: open code __nvmf_host_find()
  nvme-fabrics: error out to unlock the mutex
  nvme: Increase block size variable size to 32-bit
  nvme-fcloop: no need to return from void function
  nvmet-auth: remove unnecessary break after goto
  nvmet-auth: remove some dead code
  nvme-core: remove redundant check from nvme_init_ns_head
  nvme: move sysfs code to a dedicated sysfs.c file
  nvme-fabrics: prevent overriding of existing host
  nvme-fabrics: check hostid using uuid_equal
  nvme-fabrics: unify common code in admin and io queue connect
  nvmet: reorder fields in 'struct nvmefc_fcp_req'
  nvmet: reorder fields in 'struct nvme_dhchap_queue_context'
  nvmet: reorder fields in 'struct nvmf_ctrl_options'
  nvme: reorder fields in 'struct nvme_ctrl'
  nvmet: reorder fields in 'struct nvmet_sq'
  ...
parents f0854489 1c606f7f
...@@ -10,7 +10,7 @@ obj-$(CONFIG_NVME_FC) += nvme-fc.o ...@@ -10,7 +10,7 @@ obj-$(CONFIG_NVME_FC) += nvme-fc.o
obj-$(CONFIG_NVME_TCP) += nvme-tcp.o obj-$(CONFIG_NVME_TCP) += nvme-tcp.o
obj-$(CONFIG_NVME_APPLE) += nvme-apple.o obj-$(CONFIG_NVME_APPLE) += nvme-apple.o
nvme-core-y += core.o ioctl.o nvme-core-y += core.o ioctl.o sysfs.o
nvme-core-$(CONFIG_NVME_VERBOSE_ERRORS) += constants.o nvme-core-$(CONFIG_NVME_VERBOSE_ERRORS) += constants.o
nvme-core-$(CONFIG_TRACING) += trace.o nvme-core-$(CONFIG_TRACING) += trace.o
nvme-core-$(CONFIG_NVME_MULTIPATH) += multipath.o nvme-core-$(CONFIG_NVME_MULTIPATH) += multipath.o
......
...@@ -30,18 +30,18 @@ struct nvme_dhchap_queue_context { ...@@ -30,18 +30,18 @@ struct nvme_dhchap_queue_context {
u32 s2; u32 s2;
u16 transaction; u16 transaction;
u8 status; u8 status;
u8 dhgroup_id;
u8 hash_id; u8 hash_id;
size_t hash_len; size_t hash_len;
u8 dhgroup_id;
u8 c1[64]; u8 c1[64];
u8 c2[64]; u8 c2[64];
u8 response[64]; u8 response[64];
u8 *host_response; u8 *host_response;
u8 *ctrl_key; u8 *ctrl_key;
int ctrl_key_len;
u8 *host_key; u8 *host_key;
int host_key_len;
u8 *sess_key; u8 *sess_key;
int ctrl_key_len;
int host_key_len;
int sess_key_len; int sess_key_len;
}; };
......
This diff is collapsed.
...@@ -21,35 +21,60 @@ static DEFINE_MUTEX(nvmf_hosts_mutex); ...@@ -21,35 +21,60 @@ static DEFINE_MUTEX(nvmf_hosts_mutex);
static struct nvmf_host *nvmf_default_host; static struct nvmf_host *nvmf_default_host;
static struct nvmf_host *__nvmf_host_find(const char *hostnqn) static struct nvmf_host *nvmf_host_alloc(const char *hostnqn, uuid_t *id)
{ {
struct nvmf_host *host; struct nvmf_host *host;
list_for_each_entry(host, &nvmf_hosts, list) { host = kmalloc(sizeof(*host), GFP_KERNEL);
if (!strcmp(host->nqn, hostnqn)) if (!host)
return host;
}
return NULL; return NULL;
kref_init(&host->ref);
uuid_copy(&host->id, id);
strscpy(host->nqn, hostnqn, NVMF_NQN_SIZE);
return host;
} }
static struct nvmf_host *nvmf_host_add(const char *hostnqn) static struct nvmf_host *nvmf_host_add(const char *hostnqn, uuid_t *id)
{ {
struct nvmf_host *host; struct nvmf_host *host;
mutex_lock(&nvmf_hosts_mutex); mutex_lock(&nvmf_hosts_mutex);
host = __nvmf_host_find(hostnqn);
if (host) { /*
* We have defined a host as how it is perceived by the target.
* Therefore, we don't allow different Host NQNs with the same Host ID.
* Similarly, we do not allow the usage of the same Host NQN with
* different Host IDs. This'll maintain unambiguous host identification.
*/
list_for_each_entry(host, &nvmf_hosts, list) {
bool same_hostnqn = !strcmp(host->nqn, hostnqn);
bool same_hostid = uuid_equal(&host->id, id);
if (same_hostnqn && same_hostid) {
kref_get(&host->ref); kref_get(&host->ref);
goto out_unlock; goto out_unlock;
} }
if (same_hostnqn) {
host = kmalloc(sizeof(*host), GFP_KERNEL); pr_err("found same hostnqn %s but different hostid %pUb\n",
if (!host) hostnqn, id);
host = ERR_PTR(-EINVAL);
goto out_unlock;
}
if (same_hostid) {
pr_err("found same hostid %pUb but different hostnqn %s\n",
id, hostnqn);
host = ERR_PTR(-EINVAL);
goto out_unlock; goto out_unlock;
}
}
kref_init(&host->ref); host = nvmf_host_alloc(hostnqn, id);
strscpy(host->nqn, hostnqn, NVMF_NQN_SIZE); if (!host) {
host = ERR_PTR(-ENOMEM);
goto out_unlock;
}
list_add_tail(&host->list, &nvmf_hosts); list_add_tail(&host->list, &nvmf_hosts);
out_unlock: out_unlock:
...@@ -60,16 +85,17 @@ static struct nvmf_host *nvmf_host_add(const char *hostnqn) ...@@ -60,16 +85,17 @@ static struct nvmf_host *nvmf_host_add(const char *hostnqn)
static struct nvmf_host *nvmf_host_default(void) static struct nvmf_host *nvmf_host_default(void)
{ {
struct nvmf_host *host; struct nvmf_host *host;
char nqn[NVMF_NQN_SIZE];
uuid_t id;
host = kmalloc(sizeof(*host), GFP_KERNEL); uuid_gen(&id);
snprintf(nqn, NVMF_NQN_SIZE,
"nqn.2014-08.org.nvmexpress:uuid:%pUb", &id);
host = nvmf_host_alloc(nqn, &id);
if (!host) if (!host)
return NULL; return NULL;
kref_init(&host->ref);
uuid_gen(&host->id);
snprintf(host->nqn, NVMF_NQN_SIZE,
"nqn.2014-08.org.nvmexpress:uuid:%pUb", &host->id);
mutex_lock(&nvmf_hosts_mutex); mutex_lock(&nvmf_hosts_mutex);
list_add_tail(&host->list, &nvmf_hosts); list_add_tail(&host->list, &nvmf_hosts);
mutex_unlock(&nvmf_hosts_mutex); mutex_unlock(&nvmf_hosts_mutex);
...@@ -349,6 +375,45 @@ static void nvmf_log_connect_error(struct nvme_ctrl *ctrl, ...@@ -349,6 +375,45 @@ static void nvmf_log_connect_error(struct nvme_ctrl *ctrl,
} }
} }
static struct nvmf_connect_data *nvmf_connect_data_prep(struct nvme_ctrl *ctrl,
u16 cntlid)
{
struct nvmf_connect_data *data;
data = kzalloc(sizeof(*data), GFP_KERNEL);
if (!data)
return NULL;
uuid_copy(&data->hostid, &ctrl->opts->host->id);
data->cntlid = cpu_to_le16(cntlid);
strncpy(data->subsysnqn, ctrl->opts->subsysnqn, NVMF_NQN_SIZE);
strncpy(data->hostnqn, ctrl->opts->host->nqn, NVMF_NQN_SIZE);
return data;
}
static void nvmf_connect_cmd_prep(struct nvme_ctrl *ctrl, u16 qid,
struct nvme_command *cmd)
{
cmd->connect.opcode = nvme_fabrics_command;
cmd->connect.fctype = nvme_fabrics_type_connect;
cmd->connect.qid = cpu_to_le16(qid);
if (qid) {
cmd->connect.sqsize = cpu_to_le16(ctrl->sqsize);
} else {
cmd->connect.sqsize = cpu_to_le16(NVME_AQ_DEPTH - 1);
/*
* set keep-alive timeout in seconds granularity (ms * 1000)
*/
cmd->connect.kato = cpu_to_le32(ctrl->kato * 1000);
}
if (ctrl->opts->disable_sqflow)
cmd->connect.cattr |= NVME_CONNECT_DISABLE_SQFLOW;
}
/** /**
* nvmf_connect_admin_queue() - NVMe Fabrics Admin Queue "Connect" * nvmf_connect_admin_queue() - NVMe Fabrics Admin Queue "Connect"
* API function. * API function.
...@@ -377,28 +442,12 @@ int nvmf_connect_admin_queue(struct nvme_ctrl *ctrl) ...@@ -377,28 +442,12 @@ int nvmf_connect_admin_queue(struct nvme_ctrl *ctrl)
int ret; int ret;
u32 result; u32 result;
cmd.connect.opcode = nvme_fabrics_command; nvmf_connect_cmd_prep(ctrl, 0, &cmd);
cmd.connect.fctype = nvme_fabrics_type_connect;
cmd.connect.qid = 0;
cmd.connect.sqsize = cpu_to_le16(NVME_AQ_DEPTH - 1);
/*
* Set keep-alive timeout in seconds granularity (ms * 1000)
*/
cmd.connect.kato = cpu_to_le32(ctrl->kato * 1000);
if (ctrl->opts->disable_sqflow)
cmd.connect.cattr |= NVME_CONNECT_DISABLE_SQFLOW;
data = kzalloc(sizeof(*data), GFP_KERNEL); data = nvmf_connect_data_prep(ctrl, 0xffff);
if (!data) if (!data)
return -ENOMEM; return -ENOMEM;
uuid_copy(&data->hostid, &ctrl->opts->host->id);
data->cntlid = cpu_to_le16(0xffff);
strncpy(data->subsysnqn, ctrl->opts->subsysnqn, NVMF_NQN_SIZE);
strncpy(data->hostnqn, ctrl->opts->host->nqn, NVMF_NQN_SIZE);
ret = __nvme_submit_sync_cmd(ctrl->fabrics_q, &cmd, &res, ret = __nvme_submit_sync_cmd(ctrl->fabrics_q, &cmd, &res,
data, sizeof(*data), NVME_QID_ANY, 1, data, sizeof(*data), NVME_QID_ANY, 1,
BLK_MQ_REQ_RESERVED | BLK_MQ_REQ_NOWAIT); BLK_MQ_REQ_RESERVED | BLK_MQ_REQ_NOWAIT);
...@@ -468,23 +517,12 @@ int nvmf_connect_io_queue(struct nvme_ctrl *ctrl, u16 qid) ...@@ -468,23 +517,12 @@ int nvmf_connect_io_queue(struct nvme_ctrl *ctrl, u16 qid)
int ret; int ret;
u32 result; u32 result;
cmd.connect.opcode = nvme_fabrics_command; nvmf_connect_cmd_prep(ctrl, qid, &cmd);
cmd.connect.fctype = nvme_fabrics_type_connect;
cmd.connect.qid = cpu_to_le16(qid);
cmd.connect.sqsize = cpu_to_le16(ctrl->sqsize);
if (ctrl->opts->disable_sqflow)
cmd.connect.cattr |= NVME_CONNECT_DISABLE_SQFLOW;
data = kzalloc(sizeof(*data), GFP_KERNEL); data = nvmf_connect_data_prep(ctrl, ctrl->cntlid);
if (!data) if (!data)
return -ENOMEM; return -ENOMEM;
uuid_copy(&data->hostid, &ctrl->opts->host->id);
data->cntlid = cpu_to_le16(ctrl->cntlid);
strncpy(data->subsysnqn, ctrl->opts->subsysnqn, NVMF_NQN_SIZE);
strncpy(data->hostnqn, ctrl->opts->host->nqn, NVMF_NQN_SIZE);
ret = __nvme_submit_sync_cmd(ctrl->connect_q, &cmd, &res, ret = __nvme_submit_sync_cmd(ctrl->connect_q, &cmd, &res,
data, sizeof(*data), qid, 1, data, sizeof(*data), qid, 1,
BLK_MQ_REQ_RESERVED | BLK_MQ_REQ_NOWAIT); BLK_MQ_REQ_RESERVED | BLK_MQ_REQ_NOWAIT);
...@@ -621,6 +659,7 @@ static int nvmf_parse_options(struct nvmf_ctrl_options *opts, ...@@ -621,6 +659,7 @@ static int nvmf_parse_options(struct nvmf_ctrl_options *opts,
size_t nqnlen = 0; size_t nqnlen = 0;
int ctrl_loss_tmo = NVMF_DEF_CTRL_LOSS_TMO; int ctrl_loss_tmo = NVMF_DEF_CTRL_LOSS_TMO;
uuid_t hostid; uuid_t hostid;
char hostnqn[NVMF_NQN_SIZE];
/* Set defaults */ /* Set defaults */
opts->queue_size = NVMF_DEF_QUEUE_SIZE; opts->queue_size = NVMF_DEF_QUEUE_SIZE;
...@@ -637,7 +676,9 @@ static int nvmf_parse_options(struct nvmf_ctrl_options *opts, ...@@ -637,7 +676,9 @@ static int nvmf_parse_options(struct nvmf_ctrl_options *opts,
if (!options) if (!options)
return -ENOMEM; return -ENOMEM;
uuid_gen(&hostid); /* use default host if not given by user space */
uuid_copy(&hostid, &nvmf_default_host->id);
strscpy(hostnqn, nvmf_default_host->nqn, NVMF_NQN_SIZE);
while ((p = strsep(&o, ",\n")) != NULL) { while ((p = strsep(&o, ",\n")) != NULL) {
if (!*p) if (!*p)
...@@ -783,12 +824,8 @@ static int nvmf_parse_options(struct nvmf_ctrl_options *opts, ...@@ -783,12 +824,8 @@ static int nvmf_parse_options(struct nvmf_ctrl_options *opts,
ret = -EINVAL; ret = -EINVAL;
goto out; goto out;
} }
opts->host = nvmf_host_add(p); strscpy(hostnqn, p, NVMF_NQN_SIZE);
kfree(p); kfree(p);
if (!opts->host) {
ret = -ENOMEM;
goto out;
}
break; break;
case NVMF_OPT_RECONNECT_DELAY: case NVMF_OPT_RECONNECT_DELAY:
if (match_int(args, &token)) { if (match_int(args, &token)) {
...@@ -945,18 +982,94 @@ static int nvmf_parse_options(struct nvmf_ctrl_options *opts, ...@@ -945,18 +982,94 @@ static int nvmf_parse_options(struct nvmf_ctrl_options *opts,
opts->fast_io_fail_tmo, ctrl_loss_tmo); opts->fast_io_fail_tmo, ctrl_loss_tmo);
} }
if (!opts->host) { opts->host = nvmf_host_add(hostnqn, &hostid);
kref_get(&nvmf_default_host->ref); if (IS_ERR(opts->host)) {
opts->host = nvmf_default_host; ret = PTR_ERR(opts->host);
opts->host = NULL;
goto out;
} }
uuid_copy(&opts->host->id, &hostid);
out: out:
kfree(options); kfree(options);
return ret; return ret;
} }
void nvmf_set_io_queues(struct nvmf_ctrl_options *opts, u32 nr_io_queues,
u32 io_queues[HCTX_MAX_TYPES])
{
if (opts->nr_write_queues && opts->nr_io_queues < nr_io_queues) {
/*
* separate read/write queues
* hand out dedicated default queues only after we have
* sufficient read queues.
*/
io_queues[HCTX_TYPE_READ] = opts->nr_io_queues;
nr_io_queues -= io_queues[HCTX_TYPE_READ];
io_queues[HCTX_TYPE_DEFAULT] =
min(opts->nr_write_queues, nr_io_queues);
nr_io_queues -= io_queues[HCTX_TYPE_DEFAULT];
} else {
/*
* shared read/write queues
* either no write queues were requested, or we don't have
* sufficient queue count to have dedicated default queues.
*/
io_queues[HCTX_TYPE_DEFAULT] =
min(opts->nr_io_queues, nr_io_queues);
nr_io_queues -= io_queues[HCTX_TYPE_DEFAULT];
}
if (opts->nr_poll_queues && nr_io_queues) {
/* map dedicated poll queues only if we have queues left */
io_queues[HCTX_TYPE_POLL] =
min(opts->nr_poll_queues, nr_io_queues);
}
}
EXPORT_SYMBOL_GPL(nvmf_set_io_queues);
void nvmf_map_queues(struct blk_mq_tag_set *set, struct nvme_ctrl *ctrl,
u32 io_queues[HCTX_MAX_TYPES])
{
struct nvmf_ctrl_options *opts = ctrl->opts;
if (opts->nr_write_queues && io_queues[HCTX_TYPE_READ]) {
/* separate read/write queues */
set->map[HCTX_TYPE_DEFAULT].nr_queues =
io_queues[HCTX_TYPE_DEFAULT];
set->map[HCTX_TYPE_DEFAULT].queue_offset = 0;
set->map[HCTX_TYPE_READ].nr_queues =
io_queues[HCTX_TYPE_READ];
set->map[HCTX_TYPE_READ].queue_offset =
io_queues[HCTX_TYPE_DEFAULT];
} else {
/* shared read/write queues */
set->map[HCTX_TYPE_DEFAULT].nr_queues =
io_queues[HCTX_TYPE_DEFAULT];
set->map[HCTX_TYPE_DEFAULT].queue_offset = 0;
set->map[HCTX_TYPE_READ].nr_queues =
io_queues[HCTX_TYPE_DEFAULT];
set->map[HCTX_TYPE_READ].queue_offset = 0;
}
blk_mq_map_queues(&set->map[HCTX_TYPE_DEFAULT]);
blk_mq_map_queues(&set->map[HCTX_TYPE_READ]);
if (opts->nr_poll_queues && io_queues[HCTX_TYPE_POLL]) {
/* map dedicated poll queues only if we have queues left */
set->map[HCTX_TYPE_POLL].nr_queues = io_queues[HCTX_TYPE_POLL];
set->map[HCTX_TYPE_POLL].queue_offset =
io_queues[HCTX_TYPE_DEFAULT] +
io_queues[HCTX_TYPE_READ];
blk_mq_map_queues(&set->map[HCTX_TYPE_POLL]);
}
dev_info(ctrl->device,
"mapped %d/%d/%d default/read/poll queues.\n",
io_queues[HCTX_TYPE_DEFAULT],
io_queues[HCTX_TYPE_READ],
io_queues[HCTX_TYPE_POLL]);
}
EXPORT_SYMBOL_GPL(nvmf_map_queues);
static int nvmf_check_required_opts(struct nvmf_ctrl_options *opts, static int nvmf_check_required_opts(struct nvmf_ctrl_options *opts,
unsigned int required_opts) unsigned int required_opts)
{ {
......
...@@ -77,6 +77,9 @@ enum { ...@@ -77,6 +77,9 @@ enum {
* with the parsing opts enum. * with the parsing opts enum.
* @mask: Used by the fabrics library to parse through sysfs options * @mask: Used by the fabrics library to parse through sysfs options
* on adding a NVMe controller. * on adding a NVMe controller.
* @max_reconnects: maximum number of allowed reconnect attempts before removing
* the controller, (-1) means reconnect forever, zero means remove
* immediately;
* @transport: Holds the fabric transport "technology name" (for a lack of * @transport: Holds the fabric transport "technology name" (for a lack of
* better description) that will be used by an NVMe controller * better description) that will be used by an NVMe controller
* being added. * being added.
...@@ -96,9 +99,6 @@ enum { ...@@ -96,9 +99,6 @@ enum {
* @discovery_nqn: indicates if the subsysnqn is the well-known discovery NQN. * @discovery_nqn: indicates if the subsysnqn is the well-known discovery NQN.
* @kato: Keep-alive timeout. * @kato: Keep-alive timeout.
* @host: Virtual NVMe host, contains the NQN and Host ID. * @host: Virtual NVMe host, contains the NQN and Host ID.
* @max_reconnects: maximum number of allowed reconnect attempts before removing
* the controller, (-1) means reconnect forever, zero means remove
* immediately;
* @dhchap_secret: DH-HMAC-CHAP secret * @dhchap_secret: DH-HMAC-CHAP secret
* @dhchap_ctrl_secret: DH-HMAC-CHAP controller secret for bi-directional * @dhchap_ctrl_secret: DH-HMAC-CHAP controller secret for bi-directional
* authentication * authentication
...@@ -112,6 +112,7 @@ enum { ...@@ -112,6 +112,7 @@ enum {
*/ */
struct nvmf_ctrl_options { struct nvmf_ctrl_options {
unsigned mask; unsigned mask;
int max_reconnects;
char *transport; char *transport;
char *subsysnqn; char *subsysnqn;
char *traddr; char *traddr;
...@@ -125,7 +126,6 @@ struct nvmf_ctrl_options { ...@@ -125,7 +126,6 @@ struct nvmf_ctrl_options {
bool duplicate_connect; bool duplicate_connect;
unsigned int kato; unsigned int kato;
struct nvmf_host *host; struct nvmf_host *host;
int max_reconnects;
char *dhchap_secret; char *dhchap_secret;
char *dhchap_ctrl_secret; char *dhchap_ctrl_secret;
bool disable_sqflow; bool disable_sqflow;
...@@ -181,7 +181,7 @@ nvmf_ctlr_matches_baseopts(struct nvme_ctrl *ctrl, ...@@ -181,7 +181,7 @@ nvmf_ctlr_matches_baseopts(struct nvme_ctrl *ctrl,
ctrl->state == NVME_CTRL_DEAD || ctrl->state == NVME_CTRL_DEAD ||
strcmp(opts->subsysnqn, ctrl->opts->subsysnqn) || strcmp(opts->subsysnqn, ctrl->opts->subsysnqn) ||
strcmp(opts->host->nqn, ctrl->opts->host->nqn) || strcmp(opts->host->nqn, ctrl->opts->host->nqn) ||
memcmp(&opts->host->id, &ctrl->opts->host->id, sizeof(uuid_t))) !uuid_equal(&opts->host->id, &ctrl->opts->host->id))
return false; return false;
return true; return true;
...@@ -203,6 +203,13 @@ static inline void nvmf_complete_timed_out_request(struct request *rq) ...@@ -203,6 +203,13 @@ static inline void nvmf_complete_timed_out_request(struct request *rq)
} }
} }
static inline unsigned int nvmf_nr_io_queues(struct nvmf_ctrl_options *opts)
{
return min(opts->nr_io_queues, num_online_cpus()) +
min(opts->nr_write_queues, num_online_cpus()) +
min(opts->nr_poll_queues, num_online_cpus());
}
int nvmf_reg_read32(struct nvme_ctrl *ctrl, u32 off, u32 *val); int nvmf_reg_read32(struct nvme_ctrl *ctrl, u32 off, u32 *val);
int nvmf_reg_read64(struct nvme_ctrl *ctrl, u32 off, u64 *val); int nvmf_reg_read64(struct nvme_ctrl *ctrl, u32 off, u64 *val);
int nvmf_reg_write32(struct nvme_ctrl *ctrl, u32 off, u32 val); int nvmf_reg_write32(struct nvme_ctrl *ctrl, u32 off, u32 val);
...@@ -215,5 +222,9 @@ int nvmf_get_address(struct nvme_ctrl *ctrl, char *buf, int size); ...@@ -215,5 +222,9 @@ int nvmf_get_address(struct nvme_ctrl *ctrl, char *buf, int size);
bool nvmf_should_reconnect(struct nvme_ctrl *ctrl); bool nvmf_should_reconnect(struct nvme_ctrl *ctrl);
bool nvmf_ip_options_match(struct nvme_ctrl *ctrl, bool nvmf_ip_options_match(struct nvme_ctrl *ctrl,
struct nvmf_ctrl_options *opts); struct nvmf_ctrl_options *opts);
void nvmf_set_io_queues(struct nvmf_ctrl_options *opts, u32 nr_io_queues,
u32 io_queues[HCTX_MAX_TYPES]);
void nvmf_map_queues(struct blk_mq_tag_set *set, struct nvme_ctrl *ctrl,
u32 io_queues[HCTX_MAX_TYPES]);
#endif /* _NVME_FABRICS_H */ #endif /* _NVME_FABRICS_H */
...@@ -242,12 +242,13 @@ enum nvme_ctrl_flags { ...@@ -242,12 +242,13 @@ enum nvme_ctrl_flags {
NVME_CTRL_ADMIN_Q_STOPPED = 1, NVME_CTRL_ADMIN_Q_STOPPED = 1,
NVME_CTRL_STARTED_ONCE = 2, NVME_CTRL_STARTED_ONCE = 2,
NVME_CTRL_STOPPED = 3, NVME_CTRL_STOPPED = 3,
NVME_CTRL_SKIP_ID_CNS_CS = 4,
}; };
struct nvme_ctrl { struct nvme_ctrl {
bool comp_seen; bool comp_seen;
enum nvme_ctrl_state state;
bool identified; bool identified;
enum nvme_ctrl_state state;
spinlock_t lock; spinlock_t lock;
struct mutex scan_lock; struct mutex scan_lock;
const struct nvme_ctrl_ops *ops; const struct nvme_ctrl_ops *ops;
...@@ -279,8 +280,8 @@ struct nvme_ctrl { ...@@ -279,8 +280,8 @@ struct nvme_ctrl {
char name[12]; char name[12];
u16 cntlid; u16 cntlid;
u32 ctrl_config;
u16 mtfa; u16 mtfa;
u32 ctrl_config;
u32 queue_count; u32 queue_count;
u64 cap; u64 cap;
...@@ -353,10 +354,10 @@ struct nvme_ctrl { ...@@ -353,10 +354,10 @@ struct nvme_ctrl {
bool apst_enabled; bool apst_enabled;
/* PCIe only: */ /* PCIe only: */
u16 hmmaxd;
u32 hmpre; u32 hmpre;
u32 hmmin; u32 hmmin;
u32 hmminds; u32 hmminds;
u16 hmmaxd;
/* Fabrics only */ /* Fabrics only */
u32 ioccsz; u32 ioccsz;
...@@ -860,7 +861,11 @@ extern const struct attribute_group *nvme_ns_id_attr_groups[]; ...@@ -860,7 +861,11 @@ extern const struct attribute_group *nvme_ns_id_attr_groups[];
extern const struct pr_ops nvme_pr_ops; extern const struct pr_ops nvme_pr_ops;
extern const struct block_device_operations nvme_ns_head_ops; extern const struct block_device_operations nvme_ns_head_ops;
extern const struct attribute_group nvme_dev_attrs_group; extern const struct attribute_group nvme_dev_attrs_group;
extern const struct attribute_group *nvme_subsys_attrs_groups[];
extern const struct attribute_group *nvme_dev_attr_groups[];
extern const struct block_device_operations nvme_bdev_ops;
void nvme_delete_ctrl_sync(struct nvme_ctrl *ctrl);
struct nvme_ns *nvme_find_path(struct nvme_ns_head *head); struct nvme_ns *nvme_find_path(struct nvme_ns_head *head);
#ifdef CONFIG_NVME_MULTIPATH #ifdef CONFIG_NVME_MULTIPATH
static inline bool nvme_ctrl_use_ana(struct nvme_ctrl *ctrl) static inline bool nvme_ctrl_use_ana(struct nvme_ctrl *ctrl)
......
...@@ -420,10 +420,9 @@ static int nvme_pci_init_request(struct blk_mq_tag_set *set, ...@@ -420,10 +420,9 @@ static int nvme_pci_init_request(struct blk_mq_tag_set *set,
struct request *req, unsigned int hctx_idx, struct request *req, unsigned int hctx_idx,
unsigned int numa_node) unsigned int numa_node)
{ {
struct nvme_dev *dev = to_nvme_dev(set->driver_data);
struct nvme_iod *iod = blk_mq_rq_to_pdu(req); struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
nvme_req(req)->ctrl = &dev->ctrl; nvme_req(req)->ctrl = set->driver_data;
nvme_req(req)->cmd = &iod->cmd; nvme_req(req)->cmd = &iod->cmd;
return 0; return 0;
} }
......
...@@ -501,7 +501,7 @@ static int nvme_rdma_create_queue_ib(struct nvme_rdma_queue *queue) ...@@ -501,7 +501,7 @@ static int nvme_rdma_create_queue_ib(struct nvme_rdma_queue *queue)
} }
ibdev = queue->device->dev; ibdev = queue->device->dev;
/* +1 for ib_stop_cq */ /* +1 for ib_drain_qp */
queue->cq_size = cq_factor * queue->queue_size + 1; queue->cq_size = cq_factor * queue->queue_size + 1;
ret = nvme_rdma_create_cq(ibdev, queue); ret = nvme_rdma_create_cq(ibdev, queue);
...@@ -713,18 +713,10 @@ static int nvme_rdma_start_io_queues(struct nvme_rdma_ctrl *ctrl, ...@@ -713,18 +713,10 @@ static int nvme_rdma_start_io_queues(struct nvme_rdma_ctrl *ctrl,
static int nvme_rdma_alloc_io_queues(struct nvme_rdma_ctrl *ctrl) static int nvme_rdma_alloc_io_queues(struct nvme_rdma_ctrl *ctrl)
{ {
struct nvmf_ctrl_options *opts = ctrl->ctrl.opts; struct nvmf_ctrl_options *opts = ctrl->ctrl.opts;
struct ib_device *ibdev = ctrl->device->dev; unsigned int nr_io_queues;
unsigned int nr_io_queues, nr_default_queues;
unsigned int nr_read_queues, nr_poll_queues;
int i, ret; int i, ret;
nr_read_queues = min_t(unsigned int, ibdev->num_comp_vectors, nr_io_queues = nvmf_nr_io_queues(opts);
min(opts->nr_io_queues, num_online_cpus()));
nr_default_queues = min_t(unsigned int, ibdev->num_comp_vectors,
min(opts->nr_write_queues, num_online_cpus()));
nr_poll_queues = min(opts->nr_poll_queues, num_online_cpus());
nr_io_queues = nr_read_queues + nr_default_queues + nr_poll_queues;
ret = nvme_set_queue_count(&ctrl->ctrl, &nr_io_queues); ret = nvme_set_queue_count(&ctrl->ctrl, &nr_io_queues);
if (ret) if (ret)
return ret; return ret;
...@@ -739,34 +731,7 @@ static int nvme_rdma_alloc_io_queues(struct nvme_rdma_ctrl *ctrl) ...@@ -739,34 +731,7 @@ static int nvme_rdma_alloc_io_queues(struct nvme_rdma_ctrl *ctrl)
dev_info(ctrl->ctrl.device, dev_info(ctrl->ctrl.device,
"creating %d I/O queues.\n", nr_io_queues); "creating %d I/O queues.\n", nr_io_queues);
if (opts->nr_write_queues && nr_read_queues < nr_io_queues) { nvmf_set_io_queues(opts, nr_io_queues, ctrl->io_queues);
/*
* separate read/write queues
* hand out dedicated default queues only after we have
* sufficient read queues.
*/
ctrl->io_queues[HCTX_TYPE_READ] = nr_read_queues;
nr_io_queues -= ctrl->io_queues[HCTX_TYPE_READ];
ctrl->io_queues[HCTX_TYPE_DEFAULT] =
min(nr_default_queues, nr_io_queues);
nr_io_queues -= ctrl->io_queues[HCTX_TYPE_DEFAULT];
} else {
/*
* shared read/write queues
* either no write queues were requested, or we don't have
* sufficient queue count to have dedicated default queues.
*/
ctrl->io_queues[HCTX_TYPE_DEFAULT] =
min(nr_read_queues, nr_io_queues);
nr_io_queues -= ctrl->io_queues[HCTX_TYPE_DEFAULT];
}
if (opts->nr_poll_queues && nr_io_queues) {
/* map dedicated poll queues only if we have queues left */
ctrl->io_queues[HCTX_TYPE_POLL] =
min(nr_poll_queues, nr_io_queues);
}
for (i = 1; i < ctrl->ctrl.queue_count; i++) { for (i = 1; i < ctrl->ctrl.queue_count; i++) {
ret = nvme_rdma_alloc_queue(ctrl, i, ret = nvme_rdma_alloc_queue(ctrl, i,
ctrl->ctrl.sqsize + 1); ctrl->ctrl.sqsize + 1);
...@@ -2138,44 +2103,8 @@ static void nvme_rdma_complete_rq(struct request *rq) ...@@ -2138,44 +2103,8 @@ static void nvme_rdma_complete_rq(struct request *rq)
static void nvme_rdma_map_queues(struct blk_mq_tag_set *set) static void nvme_rdma_map_queues(struct blk_mq_tag_set *set)
{ {
struct nvme_rdma_ctrl *ctrl = to_rdma_ctrl(set->driver_data); struct nvme_rdma_ctrl *ctrl = to_rdma_ctrl(set->driver_data);
struct nvmf_ctrl_options *opts = ctrl->ctrl.opts;
if (opts->nr_write_queues && ctrl->io_queues[HCTX_TYPE_READ]) { nvmf_map_queues(set, &ctrl->ctrl, ctrl->io_queues);
/* separate read/write queues */
set->map[HCTX_TYPE_DEFAULT].nr_queues =
ctrl->io_queues[HCTX_TYPE_DEFAULT];
set->map[HCTX_TYPE_DEFAULT].queue_offset = 0;
set->map[HCTX_TYPE_READ].nr_queues =
ctrl->io_queues[HCTX_TYPE_READ];
set->map[HCTX_TYPE_READ].queue_offset =
ctrl->io_queues[HCTX_TYPE_DEFAULT];
} else {
/* shared read/write queues */
set->map[HCTX_TYPE_DEFAULT].nr_queues =
ctrl->io_queues[HCTX_TYPE_DEFAULT];
set->map[HCTX_TYPE_DEFAULT].queue_offset = 0;
set->map[HCTX_TYPE_READ].nr_queues =
ctrl->io_queues[HCTX_TYPE_DEFAULT];
set->map[HCTX_TYPE_READ].queue_offset = 0;
}
blk_mq_map_queues(&set->map[HCTX_TYPE_DEFAULT]);
blk_mq_map_queues(&set->map[HCTX_TYPE_READ]);
if (opts->nr_poll_queues && ctrl->io_queues[HCTX_TYPE_POLL]) {
/* map dedicated poll queues only if we have queues left */
set->map[HCTX_TYPE_POLL].nr_queues =
ctrl->io_queues[HCTX_TYPE_POLL];
set->map[HCTX_TYPE_POLL].queue_offset =
ctrl->io_queues[HCTX_TYPE_DEFAULT] +
ctrl->io_queues[HCTX_TYPE_READ];
blk_mq_map_queues(&set->map[HCTX_TYPE_POLL]);
}
dev_info(ctrl->ctrl.device,
"mapped %d/%d/%d default/read/poll queues.\n",
ctrl->io_queues[HCTX_TYPE_DEFAULT],
ctrl->io_queues[HCTX_TYPE_READ],
ctrl->io_queues[HCTX_TYPE_POLL]);
} }
static const struct blk_mq_ops nvme_rdma_mq_ops = { static const struct blk_mq_ops nvme_rdma_mq_ops = {
......
This diff is collapsed.
...@@ -1802,58 +1802,12 @@ static int __nvme_tcp_alloc_io_queues(struct nvme_ctrl *ctrl) ...@@ -1802,58 +1802,12 @@ static int __nvme_tcp_alloc_io_queues(struct nvme_ctrl *ctrl)
return ret; return ret;
} }
static unsigned int nvme_tcp_nr_io_queues(struct nvme_ctrl *ctrl)
{
unsigned int nr_io_queues;
nr_io_queues = min(ctrl->opts->nr_io_queues, num_online_cpus());
nr_io_queues += min(ctrl->opts->nr_write_queues, num_online_cpus());
nr_io_queues += min(ctrl->opts->nr_poll_queues, num_online_cpus());
return nr_io_queues;
}
static void nvme_tcp_set_io_queues(struct nvme_ctrl *nctrl,
unsigned int nr_io_queues)
{
struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl);
struct nvmf_ctrl_options *opts = nctrl->opts;
if (opts->nr_write_queues && opts->nr_io_queues < nr_io_queues) {
/*
* separate read/write queues
* hand out dedicated default queues only after we have
* sufficient read queues.
*/
ctrl->io_queues[HCTX_TYPE_READ] = opts->nr_io_queues;
nr_io_queues -= ctrl->io_queues[HCTX_TYPE_READ];
ctrl->io_queues[HCTX_TYPE_DEFAULT] =
min(opts->nr_write_queues, nr_io_queues);
nr_io_queues -= ctrl->io_queues[HCTX_TYPE_DEFAULT];
} else {
/*
* shared read/write queues
* either no write queues were requested, or we don't have
* sufficient queue count to have dedicated default queues.
*/
ctrl->io_queues[HCTX_TYPE_DEFAULT] =
min(opts->nr_io_queues, nr_io_queues);
nr_io_queues -= ctrl->io_queues[HCTX_TYPE_DEFAULT];
}
if (opts->nr_poll_queues && nr_io_queues) {
/* map dedicated poll queues only if we have queues left */
ctrl->io_queues[HCTX_TYPE_POLL] =
min(opts->nr_poll_queues, nr_io_queues);
}
}
static int nvme_tcp_alloc_io_queues(struct nvme_ctrl *ctrl) static int nvme_tcp_alloc_io_queues(struct nvme_ctrl *ctrl)
{ {
unsigned int nr_io_queues; unsigned int nr_io_queues;
int ret; int ret;
nr_io_queues = nvme_tcp_nr_io_queues(ctrl); nr_io_queues = nvmf_nr_io_queues(ctrl->opts);
ret = nvme_set_queue_count(ctrl, &nr_io_queues); ret = nvme_set_queue_count(ctrl, &nr_io_queues);
if (ret) if (ret)
return ret; return ret;
...@@ -1868,8 +1822,8 @@ static int nvme_tcp_alloc_io_queues(struct nvme_ctrl *ctrl) ...@@ -1868,8 +1822,8 @@ static int nvme_tcp_alloc_io_queues(struct nvme_ctrl *ctrl)
dev_info(ctrl->device, dev_info(ctrl->device,
"creating %d I/O queues.\n", nr_io_queues); "creating %d I/O queues.\n", nr_io_queues);
nvme_tcp_set_io_queues(ctrl, nr_io_queues); nvmf_set_io_queues(ctrl->opts, nr_io_queues,
to_tcp_ctrl(ctrl)->io_queues);
return __nvme_tcp_alloc_io_queues(ctrl); return __nvme_tcp_alloc_io_queues(ctrl);
} }
...@@ -2449,44 +2403,8 @@ static blk_status_t nvme_tcp_queue_rq(struct blk_mq_hw_ctx *hctx, ...@@ -2449,44 +2403,8 @@ static blk_status_t nvme_tcp_queue_rq(struct blk_mq_hw_ctx *hctx,
static void nvme_tcp_map_queues(struct blk_mq_tag_set *set) static void nvme_tcp_map_queues(struct blk_mq_tag_set *set)
{ {
struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(set->driver_data); struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(set->driver_data);
struct nvmf_ctrl_options *opts = ctrl->ctrl.opts;
if (opts->nr_write_queues && ctrl->io_queues[HCTX_TYPE_READ]) {
/* separate read/write queues */
set->map[HCTX_TYPE_DEFAULT].nr_queues =
ctrl->io_queues[HCTX_TYPE_DEFAULT];
set->map[HCTX_TYPE_DEFAULT].queue_offset = 0;
set->map[HCTX_TYPE_READ].nr_queues =
ctrl->io_queues[HCTX_TYPE_READ];
set->map[HCTX_TYPE_READ].queue_offset =
ctrl->io_queues[HCTX_TYPE_DEFAULT];
} else {
/* shared read/write queues */
set->map[HCTX_TYPE_DEFAULT].nr_queues =
ctrl->io_queues[HCTX_TYPE_DEFAULT];
set->map[HCTX_TYPE_DEFAULT].queue_offset = 0;
set->map[HCTX_TYPE_READ].nr_queues =
ctrl->io_queues[HCTX_TYPE_DEFAULT];
set->map[HCTX_TYPE_READ].queue_offset = 0;
}
blk_mq_map_queues(&set->map[HCTX_TYPE_DEFAULT]);
blk_mq_map_queues(&set->map[HCTX_TYPE_READ]);
if (opts->nr_poll_queues && ctrl->io_queues[HCTX_TYPE_POLL]) {
/* map dedicated poll queues only if we have queues left */
set->map[HCTX_TYPE_POLL].nr_queues =
ctrl->io_queues[HCTX_TYPE_POLL];
set->map[HCTX_TYPE_POLL].queue_offset =
ctrl->io_queues[HCTX_TYPE_DEFAULT] +
ctrl->io_queues[HCTX_TYPE_READ];
blk_mq_map_queues(&set->map[HCTX_TYPE_POLL]);
}
dev_info(ctrl->ctrl.device, nvmf_map_queues(set, &ctrl->ctrl, ctrl->io_queues);
"mapped %d/%d/%d default/read/poll queues.\n",
ctrl->io_queues[HCTX_TYPE_DEFAULT],
ctrl->io_queues[HCTX_TYPE_READ],
ctrl->io_queues[HCTX_TYPE_POLL]);
} }
static int nvme_tcp_poll(struct blk_mq_hw_ctx *hctx, struct io_comp_batch *iob) static int nvme_tcp_poll(struct blk_mq_hw_ctx *hctx, struct io_comp_batch *iob)
......
...@@ -295,13 +295,11 @@ void nvmet_execute_auth_send(struct nvmet_req *req) ...@@ -295,13 +295,11 @@ void nvmet_execute_auth_send(struct nvmet_req *req)
status = 0; status = 0;
} }
goto done_kfree; goto done_kfree;
break;
case NVME_AUTH_DHCHAP_MESSAGE_SUCCESS2: case NVME_AUTH_DHCHAP_MESSAGE_SUCCESS2:
req->sq->authenticated = true; req->sq->authenticated = true;
pr_debug("%s: ctrl %d qid %d ctrl authenticated\n", pr_debug("%s: ctrl %d qid %d ctrl authenticated\n",
__func__, ctrl->cntlid, req->sq->qid); __func__, ctrl->cntlid, req->sq->qid);
goto done_kfree; goto done_kfree;
break;
case NVME_AUTH_DHCHAP_MESSAGE_FAILURE2: case NVME_AUTH_DHCHAP_MESSAGE_FAILURE2:
status = nvmet_auth_failure2(d); status = nvmet_auth_failure2(d);
if (status) { if (status) {
...@@ -312,7 +310,6 @@ void nvmet_execute_auth_send(struct nvmet_req *req) ...@@ -312,7 +310,6 @@ void nvmet_execute_auth_send(struct nvmet_req *req)
status = 0; status = 0;
} }
goto done_kfree; goto done_kfree;
break;
default: default:
req->sq->dhchap_status = req->sq->dhchap_status =
NVME_AUTH_DHCHAP_FAILURE_INCORRECT_MESSAGE; NVME_AUTH_DHCHAP_FAILURE_INCORRECT_MESSAGE;
...@@ -320,7 +317,6 @@ void nvmet_execute_auth_send(struct nvmet_req *req) ...@@ -320,7 +317,6 @@ void nvmet_execute_auth_send(struct nvmet_req *req)
NVME_AUTH_DHCHAP_MESSAGE_FAILURE2; NVME_AUTH_DHCHAP_MESSAGE_FAILURE2;
req->sq->authenticated = false; req->sq->authenticated = false;
goto done_kfree; goto done_kfree;
break;
} }
done_failure1: done_failure1:
req->sq->dhchap_status = NVME_AUTH_DHCHAP_FAILURE_INCORRECT_MESSAGE; req->sq->dhchap_status = NVME_AUTH_DHCHAP_FAILURE_INCORRECT_MESSAGE;
...@@ -483,15 +479,6 @@ void nvmet_execute_auth_receive(struct nvmet_req *req) ...@@ -483,15 +479,6 @@ void nvmet_execute_auth_receive(struct nvmet_req *req)
status = NVME_SC_INTERNAL; status = NVME_SC_INTERNAL;
break; break;
} }
if (status) {
req->sq->dhchap_status = status;
nvmet_auth_failure1(req, d, al);
pr_warn("ctrl %d qid %d: challenge status (%x)\n",
ctrl->cntlid, req->sq->qid,
req->sq->dhchap_status);
status = 0;
break;
}
req->sq->dhchap_step = NVME_AUTH_DHCHAP_MESSAGE_REPLY; req->sq->dhchap_step = NVME_AUTH_DHCHAP_MESSAGE_REPLY;
break; break;
case NVME_AUTH_DHCHAP_MESSAGE_SUCCESS1: case NVME_AUTH_DHCHAP_MESSAGE_SUCCESS1:
......
...@@ -645,8 +645,6 @@ fcloop_fcp_recv_work(struct work_struct *work) ...@@ -645,8 +645,6 @@ fcloop_fcp_recv_work(struct work_struct *work)
} }
if (ret) if (ret)
fcloop_call_host_done(fcpreq, tfcp_req, ret); fcloop_call_host_done(fcpreq, tfcp_req, ret);
return;
} }
static void static void
...@@ -1168,6 +1166,7 @@ __wait_localport_unreg(struct fcloop_lport *lport) ...@@ -1168,6 +1166,7 @@ __wait_localport_unreg(struct fcloop_lport *lport)
ret = nvme_fc_unregister_localport(lport->localport); ret = nvme_fc_unregister_localport(lport->localport);
if (!ret)
wait_for_completion(&lport->unreg_done); wait_for_completion(&lport->unreg_done);
kfree(lport); kfree(lport);
......
...@@ -109,8 +109,8 @@ struct nvmet_sq { ...@@ -109,8 +109,8 @@ struct nvmet_sq {
u32 sqhd; u32 sqhd;
bool sqhd_disabled; bool sqhd_disabled;
#ifdef CONFIG_NVME_TARGET_AUTH #ifdef CONFIG_NVME_TARGET_AUTH
struct delayed_work auth_expired_work;
bool authenticated; bool authenticated;
struct delayed_work auth_expired_work;
u16 dhchap_tid; u16 dhchap_tid;
u16 dhchap_status; u16 dhchap_status;
int dhchap_step; int dhchap_step;
......
...@@ -185,7 +185,6 @@ enum nvmefc_fcp_datadir { ...@@ -185,7 +185,6 @@ enum nvmefc_fcp_datadir {
* @first_sgl: memory for 1st scatter/gather list segment for payload data * @first_sgl: memory for 1st scatter/gather list segment for payload data
* @sg_cnt: number of elements in the scatter/gather list * @sg_cnt: number of elements in the scatter/gather list
* @io_dir: direction of the FCP request (see NVMEFC_FCP_xxx) * @io_dir: direction of the FCP request (see NVMEFC_FCP_xxx)
* @sqid: The nvme SQID the command is being issued on
* @done: The callback routine the LLDD is to invoke upon completion of * @done: The callback routine the LLDD is to invoke upon completion of
* the FCP operation. req argument is the pointer to the original * the FCP operation. req argument is the pointer to the original
* FCP IO operation. * FCP IO operation.
...@@ -194,12 +193,13 @@ enum nvmefc_fcp_datadir { ...@@ -194,12 +193,13 @@ enum nvmefc_fcp_datadir {
* while processing the operation. The length of the buffer * while processing the operation. The length of the buffer
* corresponds to the fcprqst_priv_sz value specified in the * corresponds to the fcprqst_priv_sz value specified in the
* nvme_fc_port_template supplied by the LLDD. * nvme_fc_port_template supplied by the LLDD.
* @sqid: The nvme SQID the command is being issued on
* *
* Values set by the LLDD indicating completion status of the FCP operation. * Values set by the LLDD indicating completion status of the FCP operation.
* Must be set prior to calling the done() callback. * Must be set prior to calling the done() callback.
* @rcv_rsplen: length, in bytes, of the FCP RSP IU received.
* @transferred_length: amount of payload data, in bytes, that were * @transferred_length: amount of payload data, in bytes, that were
* transferred. Should equal payload_length on success. * transferred. Should equal payload_length on success.
* @rcv_rsplen: length, in bytes, of the FCP RSP IU received.
* @status: Completion status of the FCP operation. must be 0 upon success, * @status: Completion status of the FCP operation. must be 0 upon success,
* negative errno value upon failure (ex: -EIO). Note: this is * negative errno value upon failure (ex: -EIO). Note: this is
* NOT a reflection of the NVME CQE completion status. Only the * NOT a reflection of the NVME CQE completion status. Only the
...@@ -219,14 +219,14 @@ struct nvmefc_fcp_req { ...@@ -219,14 +219,14 @@ struct nvmefc_fcp_req {
int sg_cnt; int sg_cnt;
enum nvmefc_fcp_datadir io_dir; enum nvmefc_fcp_datadir io_dir;
__le16 sqid;
void (*done)(struct nvmefc_fcp_req *req); void (*done)(struct nvmefc_fcp_req *req);
void *private; void *private;
u32 transferred_length; __le16 sqid;
u16 rcv_rsplen; u16 rcv_rsplen;
u32 transferred_length;
u32 status; u32 status;
} __aligned(sizeof(u64)); /* alignment for other things alloc'd with */ } __aligned(sizeof(u64)); /* alignment for other things alloc'd with */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment