Commit 600abd34 authored by Jens Axboe's avatar Jens Axboe

Merge tag 'nvme-5.14-2021-06-08' of git://git.infradead.org/nvme into for-5.14/drivers

Pull NVMe updates from Christoph:

"nvme updates for Linux 5.14

 - improve the APST configuration algorithm (Alexey Bogoslavsky)
 - look for StorageD3Enable on companion ACPI device (Mario Limonciello)
 - allow selecting the network interface for TCP connections
   (Martin Belanger)
 - misc cleanups (Amit Engel, Chaitanya Kulkarni, Colin Ian King, me)"

* tag 'nvme-5.14-2021-06-08' of git://git.infradead.org/nvme:
  nvmet: remove a superfluous variable
  nvmet: move ka_work initialization to nvmet_alloc_ctrl
  nvme: remove nvme_{get,put}_ns_from_disk
  nvme: split nvme_report_zones
  nvme: move the CSI sanity check into nvme_ns_report_zones
  nvme: add a sparse annotation to nvme_ns_head_ctrl_ioctl
  nvme: open code nvme_put_ns_from_disk in nvme_ns_head_ctrl_ioctl
  nvme: open code nvme_{get,put}_ns_from_disk in nvme_ns_head_ioctl
  nvme: open code nvme_put_ns_from_disk in nvme_ns_head_chr_ioctl
  nvme-fabrics: remove extra braces
  nvme-fabrics: remove an extra comment
  nvme-fabrics: remove extra new lines in the switch
  nvme-fabrics: fix the kerneldco comment for nvmf_log_connect_error()
  nvme-tcp: allow selecting the network interface for connections
  nvme-pci: look for StorageD3Enable on companion ACPI device instead
  nvme: extend and modify the APST configuration algorithm
  nvme: remove redundant initialization of variable ret
parents 81840358 346ac785
...@@ -57,6 +57,26 @@ static bool force_apst; ...@@ -57,6 +57,26 @@ static bool force_apst;
module_param(force_apst, bool, 0644); module_param(force_apst, bool, 0644);
MODULE_PARM_DESC(force_apst, "allow APST for newly enumerated devices even if quirked off"); MODULE_PARM_DESC(force_apst, "allow APST for newly enumerated devices even if quirked off");
static unsigned long apst_primary_timeout_ms = 100;
module_param(apst_primary_timeout_ms, ulong, 0644);
MODULE_PARM_DESC(apst_primary_timeout_ms,
"primary APST timeout in ms");
static unsigned long apst_secondary_timeout_ms = 2000;
module_param(apst_secondary_timeout_ms, ulong, 0644);
MODULE_PARM_DESC(apst_secondary_timeout_ms,
"secondary APST timeout in ms");
static unsigned long apst_primary_latency_tol_us = 15000;
module_param(apst_primary_latency_tol_us, ulong, 0644);
MODULE_PARM_DESC(apst_primary_latency_tol_us,
"primary APST latency tolerance in us");
static unsigned long apst_secondary_latency_tol_us = 100000;
module_param(apst_secondary_latency_tol_us, ulong, 0644);
MODULE_PARM_DESC(apst_secondary_latency_tol_us,
"secondary APST latency tolerance in us");
static bool streams; static bool streams;
module_param(streams, bool, 0644); module_param(streams, bool, 0644);
MODULE_PARM_DESC(streams, "turn on support for Streams write directives"); MODULE_PARM_DESC(streams, "turn on support for Streams write directives");
...@@ -1522,36 +1542,6 @@ static void nvme_enable_aen(struct nvme_ctrl *ctrl) ...@@ -1522,36 +1542,6 @@ static void nvme_enable_aen(struct nvme_ctrl *ctrl)
queue_work(nvme_wq, &ctrl->async_event_work); queue_work(nvme_wq, &ctrl->async_event_work);
} }
/*
* Issue ioctl requests on the first available path. Note that unlike normal
* block layer requests we will not retry failed request on another controller.
*/
struct nvme_ns *nvme_get_ns_from_disk(struct gendisk *disk,
struct nvme_ns_head **head, int *srcu_idx)
{
#ifdef CONFIG_NVME_MULTIPATH
if (disk->fops == &nvme_ns_head_ops) {
struct nvme_ns *ns;
*head = disk->private_data;
*srcu_idx = srcu_read_lock(&(*head)->srcu);
ns = nvme_find_path(*head);
if (!ns)
srcu_read_unlock(&(*head)->srcu, *srcu_idx);
return ns;
}
#endif
*head = NULL;
*srcu_idx = -1;
return disk->private_data;
}
void nvme_put_ns_from_disk(struct nvme_ns_head *head, int idx)
{
if (head)
srcu_read_unlock(&head->srcu, idx);
}
static int nvme_ns_open(struct nvme_ns *ns) static int nvme_ns_open(struct nvme_ns *ns)
{ {
...@@ -1948,30 +1938,46 @@ static char nvme_pr_type(enum pr_type type) ...@@ -1948,30 +1938,46 @@ static char nvme_pr_type(enum pr_type type)
} }
}; };
static int nvme_send_ns_head_pr_command(struct block_device *bdev,
struct nvme_command *c, u8 data[16])
{
struct nvme_ns_head *head = bdev->bd_disk->private_data;
int srcu_idx = srcu_read_lock(&head->srcu);
struct nvme_ns *ns = nvme_find_path(head);
int ret = -EWOULDBLOCK;
if (ns) {
c->common.nsid = cpu_to_le32(ns->head->ns_id);
ret = nvme_submit_sync_cmd(ns->queue, c, data, 16);
}
srcu_read_unlock(&head->srcu, srcu_idx);
return ret;
}
static int nvme_send_ns_pr_command(struct nvme_ns *ns, struct nvme_command *c,
u8 data[16])
{
c->common.nsid = cpu_to_le32(ns->head->ns_id);
return nvme_submit_sync_cmd(ns->queue, c, data, 16);
}
static int nvme_pr_command(struct block_device *bdev, u32 cdw10, static int nvme_pr_command(struct block_device *bdev, u32 cdw10,
u64 key, u64 sa_key, u8 op) u64 key, u64 sa_key, u8 op)
{ {
struct nvme_ns_head *head = NULL;
struct nvme_ns *ns;
struct nvme_command c; struct nvme_command c;
int srcu_idx, ret;
u8 data[16] = { 0, }; u8 data[16] = { 0, };
ns = nvme_get_ns_from_disk(bdev->bd_disk, &head, &srcu_idx);
if (unlikely(!ns))
return -EWOULDBLOCK;
put_unaligned_le64(key, &data[0]); put_unaligned_le64(key, &data[0]);
put_unaligned_le64(sa_key, &data[8]); put_unaligned_le64(sa_key, &data[8]);
memset(&c, 0, sizeof(c)); memset(&c, 0, sizeof(c));
c.common.opcode = op; c.common.opcode = op;
c.common.nsid = cpu_to_le32(ns->head->ns_id);
c.common.cdw10 = cpu_to_le32(cdw10); c.common.cdw10 = cpu_to_le32(cdw10);
ret = nvme_submit_sync_cmd(ns->queue, &c, data, 16); if (IS_ENABLED(CONFIG_NVME_MULTIPATH) &&
nvme_put_ns_from_disk(head, srcu_idx); bdev->bd_disk->fops == &nvme_ns_head_ops)
return ret; return nvme_send_ns_head_pr_command(bdev, &c, data);
return nvme_send_ns_pr_command(bdev->bd_disk->private_data, &c, data);
} }
static int nvme_pr_register(struct block_device *bdev, u64 old, static int nvme_pr_register(struct block_device *bdev, u64 old,
...@@ -2053,6 +2059,17 @@ int nvme_sec_submit(void *data, u16 spsp, u8 secp, void *buffer, size_t len, ...@@ -2053,6 +2059,17 @@ int nvme_sec_submit(void *data, u16 spsp, u8 secp, void *buffer, size_t len,
EXPORT_SYMBOL_GPL(nvme_sec_submit); EXPORT_SYMBOL_GPL(nvme_sec_submit);
#endif /* CONFIG_BLK_SED_OPAL */ #endif /* CONFIG_BLK_SED_OPAL */
#ifdef CONFIG_BLK_DEV_ZONED
static int nvme_report_zones(struct gendisk *disk, sector_t sector,
unsigned int nr_zones, report_zones_cb cb, void *data)
{
return nvme_ns_report_zones(disk->private_data, sector, nr_zones, cb,
data);
}
#else
#define nvme_report_zones NULL
#endif /* CONFIG_BLK_DEV_ZONED */
static const struct block_device_operations nvme_bdev_ops = { static const struct block_device_operations nvme_bdev_ops = {
.owner = THIS_MODULE, .owner = THIS_MODULE,
.ioctl = nvme_ioctl, .ioctl = nvme_ioctl,
...@@ -2217,14 +2234,54 @@ static int nvme_configure_acre(struct nvme_ctrl *ctrl) ...@@ -2217,14 +2234,54 @@ static int nvme_configure_acre(struct nvme_ctrl *ctrl)
return ret; return ret;
} }
/*
* The function checks whether the given total (exlat + enlat) latency of
* a power state allows the latter to be used as an APST transition target.
* It does so by comparing the latency to the primary and secondary latency
* tolerances defined by module params. If there's a match, the corresponding
* timeout value is returned and the matching tolerance index (1 or 2) is
* reported.
*/
static bool nvme_apst_get_transition_time(u64 total_latency,
u64 *transition_time, unsigned *last_index)
{
if (total_latency <= apst_primary_latency_tol_us) {
if (*last_index == 1)
return false;
*last_index = 1;
*transition_time = apst_primary_timeout_ms;
return true;
}
if (apst_secondary_timeout_ms &&
total_latency <= apst_secondary_latency_tol_us) {
if (*last_index <= 2)
return false;
*last_index = 2;
*transition_time = apst_secondary_timeout_ms;
return true;
}
return false;
}
/* /*
* APST (Autonomous Power State Transition) lets us program a table of power * APST (Autonomous Power State Transition) lets us program a table of power
* state transitions that the controller will perform automatically. * state transitions that the controller will perform automatically.
* We configure it with a simple heuristic: we are willing to spend at most 2% *
* of the time transitioning between power states. Therefore, when running in * Depending on module params, one of the two supported techniques will be used:
* any given state, we will enter the next lower-power non-operational state *
* after waiting 50 * (enlat + exlat) microseconds, as long as that state's exit * - If the parameters provide explicit timeouts and tolerances, they will be
* latency is under the requested maximum latency. * used to build a table with up to 2 non-operational states to transition to.
* The default parameter values were selected based on the values used by
* Microsoft's and Intel's NVMe drivers. Yet, since we don't implement dynamic
* regeneration of the APST table in the event of switching between external
* and battery power, the timeouts and tolerances reflect a compromise
* between values used by Microsoft for AC and battery scenarios.
* - If not, we'll configure the table with a simple heuristic: we are willing
* to spend at most 2% of the time transitioning between power states.
* Therefore, when running in any given state, we will enter the next
* lower-power non-operational state after waiting 50 * (enlat + exlat)
* microseconds, as long as that state's exit latency is under the requested
* maximum latency.
* *
* We will not autonomously enter any non-operational state for which the total * We will not autonomously enter any non-operational state for which the total
* latency exceeds ps_max_latency_us. * latency exceeds ps_max_latency_us.
...@@ -2240,6 +2297,7 @@ static int nvme_configure_apst(struct nvme_ctrl *ctrl) ...@@ -2240,6 +2297,7 @@ static int nvme_configure_apst(struct nvme_ctrl *ctrl)
int max_ps = -1; int max_ps = -1;
int state; int state;
int ret; int ret;
unsigned last_lt_index = UINT_MAX;
/* /*
* If APST isn't supported or if we haven't been initialized yet, * If APST isn't supported or if we haven't been initialized yet,
...@@ -2298,13 +2356,19 @@ static int nvme_configure_apst(struct nvme_ctrl *ctrl) ...@@ -2298,13 +2356,19 @@ static int nvme_configure_apst(struct nvme_ctrl *ctrl)
le32_to_cpu(ctrl->psd[state].entry_lat); le32_to_cpu(ctrl->psd[state].entry_lat);
/* /*
* This state is good. Use it as the APST idle target for * This state is good. It can be used as the APST idle target
* higher power states. * for higher power states.
*/ */
transition_ms = total_latency_us + 19; if (apst_primary_timeout_ms && apst_primary_latency_tol_us) {
do_div(transition_ms, 20); if (!nvme_apst_get_transition_time(total_latency_us,
if (transition_ms > (1 << 24) - 1) &transition_ms, &last_lt_index))
transition_ms = (1 << 24) - 1; continue;
} else {
transition_ms = total_latency_us + 19;
do_div(transition_ms, 20);
if (transition_ms > (1 << 24) - 1)
transition_ms = (1 << 24) - 1;
}
target = cpu_to_le64((state << 3) | (transition_ms << 8)); target = cpu_to_le64((state << 3) | (transition_ms << 8));
if (max_ps == -1) if (max_ps == -1)
...@@ -4067,6 +4131,11 @@ static int nvme_class_uevent(struct device *dev, struct kobj_uevent_env *env) ...@@ -4067,6 +4131,11 @@ static int nvme_class_uevent(struct device *dev, struct kobj_uevent_env *env)
ret = add_uevent_var(env, "NVME_HOST_TRADDR=%s", ret = add_uevent_var(env, "NVME_HOST_TRADDR=%s",
opts->host_traddr ?: "none"); opts->host_traddr ?: "none");
if (ret)
return ret;
ret = add_uevent_var(env, "NVME_HOST_IFACE=%s",
opts->host_iface ?: "none");
} }
return ret; return ret;
} }
......
...@@ -112,6 +112,9 @@ int nvmf_get_address(struct nvme_ctrl *ctrl, char *buf, int size) ...@@ -112,6 +112,9 @@ int nvmf_get_address(struct nvme_ctrl *ctrl, char *buf, int size)
if (ctrl->opts->mask & NVMF_OPT_HOST_TRADDR) if (ctrl->opts->mask & NVMF_OPT_HOST_TRADDR)
len += scnprintf(buf + len, size - len, "%shost_traddr=%s", len += scnprintf(buf + len, size - len, "%shost_traddr=%s",
(len) ? "," : "", ctrl->opts->host_traddr); (len) ? "," : "", ctrl->opts->host_traddr);
if (ctrl->opts->mask & NVMF_OPT_HOST_IFACE)
len += scnprintf(buf + len, size - len, "%shost_iface=%s",
(len) ? "," : "", ctrl->opts->host_iface);
len += scnprintf(buf + len, size - len, "\n"); len += scnprintf(buf + len, size - len, "\n");
return len; return len;
...@@ -254,28 +257,23 @@ int nvmf_reg_write32(struct nvme_ctrl *ctrl, u32 off, u32 val) ...@@ -254,28 +257,23 @@ int nvmf_reg_write32(struct nvme_ctrl *ctrl, u32 off, u32 val)
EXPORT_SYMBOL_GPL(nvmf_reg_write32); EXPORT_SYMBOL_GPL(nvmf_reg_write32);
/** /**
* nvmf_log_connect_error() - Error-parsing-diagnostic print * nvmf_log_connect_error() - Error-parsing-diagnostic print out function for
* out function for connect() errors. * connect() errors.
* * @ctrl: The specific /dev/nvmeX device that had the error.
* @ctrl: the specific /dev/nvmeX device that had the error. * @errval: Error code to be decoded in a more human-friendly
* * printout.
* @errval: Error code to be decoded in a more human-friendly * @offset: For use with the NVMe error code
* printout. * NVME_SC_CONNECT_INVALID_PARAM.
* * @cmd: This is the SQE portion of a submission capsule.
* @offset: For use with the NVMe error code NVME_SC_CONNECT_INVALID_PARAM. * @data: This is the "Data" portion of a submission capsule.
*
* @cmd: This is the SQE portion of a submission capsule.
*
* @data: This is the "Data" portion of a submission capsule.
*/ */
static void nvmf_log_connect_error(struct nvme_ctrl *ctrl, static void nvmf_log_connect_error(struct nvme_ctrl *ctrl,
int errval, int offset, struct nvme_command *cmd, int errval, int offset, struct nvme_command *cmd,
struct nvmf_connect_data *data) struct nvmf_connect_data *data)
{ {
int err_sctype = errval & (~NVME_SC_DNR); int err_sctype = errval & ~NVME_SC_DNR;
switch (err_sctype) { switch (err_sctype) {
case (NVME_SC_CONNECT_INVALID_PARAM): case (NVME_SC_CONNECT_INVALID_PARAM):
if (offset >> 16) { if (offset >> 16) {
char *inv_data = "Connect Invalid Data Parameter"; char *inv_data = "Connect Invalid Data Parameter";
...@@ -318,30 +316,30 @@ static void nvmf_log_connect_error(struct nvme_ctrl *ctrl, ...@@ -318,30 +316,30 @@ static void nvmf_log_connect_error(struct nvme_ctrl *ctrl,
} }
} }
break; break;
case NVME_SC_CONNECT_INVALID_HOST: case NVME_SC_CONNECT_INVALID_HOST:
dev_err(ctrl->device, dev_err(ctrl->device,
"Connect for subsystem %s is not allowed, hostnqn: %s\n", "Connect for subsystem %s is not allowed, hostnqn: %s\n",
data->subsysnqn, data->hostnqn); data->subsysnqn, data->hostnqn);
break; break;
case NVME_SC_CONNECT_CTRL_BUSY: case NVME_SC_CONNECT_CTRL_BUSY:
dev_err(ctrl->device, dev_err(ctrl->device,
"Connect command failed: controller is busy or not available\n"); "Connect command failed: controller is busy or not available\n");
break; break;
case NVME_SC_CONNECT_FORMAT: case NVME_SC_CONNECT_FORMAT:
dev_err(ctrl->device, dev_err(ctrl->device,
"Connect incompatible format: %d", "Connect incompatible format: %d",
cmd->connect.recfmt); cmd->connect.recfmt);
break; break;
case NVME_SC_HOST_PATH_ERROR:
dev_err(ctrl->device,
"Connect command failed: host path error\n");
break;
default: default:
dev_err(ctrl->device, dev_err(ctrl->device,
"Connect command failed, error wo/DNR bit: %d\n", "Connect command failed, error wo/DNR bit: %d\n",
err_sctype); err_sctype);
break; break;
} /* switch (err_sctype) */ }
} }
/** /**
...@@ -545,6 +543,7 @@ static const match_table_t opt_tokens = { ...@@ -545,6 +543,7 @@ static const match_table_t opt_tokens = {
{ NVMF_OPT_KATO, "keep_alive_tmo=%d" }, { NVMF_OPT_KATO, "keep_alive_tmo=%d" },
{ NVMF_OPT_HOSTNQN, "hostnqn=%s" }, { NVMF_OPT_HOSTNQN, "hostnqn=%s" },
{ NVMF_OPT_HOST_TRADDR, "host_traddr=%s" }, { NVMF_OPT_HOST_TRADDR, "host_traddr=%s" },
{ NVMF_OPT_HOST_IFACE, "host_iface=%s" },
{ NVMF_OPT_HOST_ID, "hostid=%s" }, { NVMF_OPT_HOST_ID, "hostid=%s" },
{ NVMF_OPT_DUP_CONNECT, "duplicate_connect" }, { NVMF_OPT_DUP_CONNECT, "duplicate_connect" },
{ NVMF_OPT_DISABLE_SQFLOW, "disable_sqflow" }, { NVMF_OPT_DISABLE_SQFLOW, "disable_sqflow" },
...@@ -754,6 +753,15 @@ static int nvmf_parse_options(struct nvmf_ctrl_options *opts, ...@@ -754,6 +753,15 @@ static int nvmf_parse_options(struct nvmf_ctrl_options *opts,
kfree(opts->host_traddr); kfree(opts->host_traddr);
opts->host_traddr = p; opts->host_traddr = p;
break; break;
case NVMF_OPT_HOST_IFACE:
p = match_strdup(args);
if (!p) {
ret = -ENOMEM;
goto out;
}
kfree(opts->host_iface);
opts->host_iface = p;
break;
case NVMF_OPT_HOST_ID: case NVMF_OPT_HOST_ID:
p = match_strdup(args); p = match_strdup(args);
if (!p) { if (!p) {
...@@ -938,6 +946,7 @@ void nvmf_free_options(struct nvmf_ctrl_options *opts) ...@@ -938,6 +946,7 @@ void nvmf_free_options(struct nvmf_ctrl_options *opts)
kfree(opts->trsvcid); kfree(opts->trsvcid);
kfree(opts->subsysnqn); kfree(opts->subsysnqn);
kfree(opts->host_traddr); kfree(opts->host_traddr);
kfree(opts->host_iface);
kfree(opts); kfree(opts);
} }
EXPORT_SYMBOL_GPL(nvmf_free_options); EXPORT_SYMBOL_GPL(nvmf_free_options);
......
...@@ -66,6 +66,7 @@ enum { ...@@ -66,6 +66,7 @@ enum {
NVMF_OPT_NR_POLL_QUEUES = 1 << 18, NVMF_OPT_NR_POLL_QUEUES = 1 << 18,
NVMF_OPT_TOS = 1 << 19, NVMF_OPT_TOS = 1 << 19,
NVMF_OPT_FAIL_FAST_TMO = 1 << 20, NVMF_OPT_FAIL_FAST_TMO = 1 << 20,
NVMF_OPT_HOST_IFACE = 1 << 21,
}; };
/** /**
...@@ -83,7 +84,9 @@ enum { ...@@ -83,7 +84,9 @@ enum {
* @trsvcid: The transport-specific TRSVCID field for a port on the * @trsvcid: The transport-specific TRSVCID field for a port on the
* subsystem which is adding a controller. * subsystem which is adding a controller.
* @host_traddr: A transport-specific field identifying the NVME host port * @host_traddr: A transport-specific field identifying the NVME host port
* to use for the connection to the controller. * to use for the connection to the controller.
* @host_iface: A transport-specific field identifying the NVME host
* interface to use for the connection to the controller.
* @queue_size: Number of IO queue elements. * @queue_size: Number of IO queue elements.
* @nr_io_queues: Number of controller IO queues that will be established. * @nr_io_queues: Number of controller IO queues that will be established.
* @reconnect_delay: Time between two consecutive reconnect attempts. * @reconnect_delay: Time between two consecutive reconnect attempts.
...@@ -108,6 +111,7 @@ struct nvmf_ctrl_options { ...@@ -108,6 +111,7 @@ struct nvmf_ctrl_options {
char *traddr; char *traddr;
char *trsvcid; char *trsvcid;
char *host_traddr; char *host_traddr;
char *host_iface;
size_t queue_size; size_t queue_size;
unsigned int nr_io_queues; unsigned int nr_io_queues;
unsigned int reconnect_delay; unsigned int reconnect_delay;
......
...@@ -372,12 +372,13 @@ long nvme_ns_chr_ioctl(struct file *file, unsigned int cmd, unsigned long arg) ...@@ -372,12 +372,13 @@ long nvme_ns_chr_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
#ifdef CONFIG_NVME_MULTIPATH #ifdef CONFIG_NVME_MULTIPATH
static int nvme_ns_head_ctrl_ioctl(struct nvme_ns *ns, unsigned int cmd, static int nvme_ns_head_ctrl_ioctl(struct nvme_ns *ns, unsigned int cmd,
void __user *argp, struct nvme_ns_head *head, int srcu_idx) void __user *argp, struct nvme_ns_head *head, int srcu_idx)
__releases(&head->srcu)
{ {
struct nvme_ctrl *ctrl = ns->ctrl; struct nvme_ctrl *ctrl = ns->ctrl;
int ret; int ret;
nvme_get_ctrl(ns->ctrl); nvme_get_ctrl(ns->ctrl);
nvme_put_ns_from_disk(head, srcu_idx); srcu_read_unlock(&head->srcu, srcu_idx);
ret = nvme_ctrl_ioctl(ns->ctrl, cmd, argp); ret = nvme_ctrl_ioctl(ns->ctrl, cmd, argp);
nvme_put_ctrl(ctrl); nvme_put_ctrl(ctrl);
...@@ -387,14 +388,15 @@ static int nvme_ns_head_ctrl_ioctl(struct nvme_ns *ns, unsigned int cmd, ...@@ -387,14 +388,15 @@ static int nvme_ns_head_ctrl_ioctl(struct nvme_ns *ns, unsigned int cmd,
int nvme_ns_head_ioctl(struct block_device *bdev, fmode_t mode, int nvme_ns_head_ioctl(struct block_device *bdev, fmode_t mode,
unsigned int cmd, unsigned long arg) unsigned int cmd, unsigned long arg)
{ {
struct nvme_ns_head *head = NULL; struct nvme_ns_head *head = bdev->bd_disk->private_data;
void __user *argp = (void __user *)arg; void __user *argp = (void __user *)arg;
struct nvme_ns *ns; struct nvme_ns *ns;
int srcu_idx, ret; int srcu_idx, ret = -EWOULDBLOCK;
ns = nvme_get_ns_from_disk(bdev->bd_disk, &head, &srcu_idx); srcu_idx = srcu_read_lock(&head->srcu);
if (unlikely(!ns)) ns = nvme_find_path(head);
return -EWOULDBLOCK; if (!ns)
goto out_unlock;
/* /*
* Handle ioctls that apply to the controller instead of the namespace * Handle ioctls that apply to the controller instead of the namespace
...@@ -402,12 +404,11 @@ int nvme_ns_head_ioctl(struct block_device *bdev, fmode_t mode, ...@@ -402,12 +404,11 @@ int nvme_ns_head_ioctl(struct block_device *bdev, fmode_t mode,
* deadlock when deleting namespaces using the passthrough interface. * deadlock when deleting namespaces using the passthrough interface.
*/ */
if (is_ctrl_ioctl(cmd)) if (is_ctrl_ioctl(cmd))
ret = nvme_ns_head_ctrl_ioctl(ns, cmd, argp, head, srcu_idx); return nvme_ns_head_ctrl_ioctl(ns, cmd, argp, head, srcu_idx);
else {
ret = nvme_ns_ioctl(ns, cmd, argp);
nvme_put_ns_from_disk(head, srcu_idx);
}
ret = nvme_ns_ioctl(ns, cmd, argp);
out_unlock:
srcu_read_unlock(&head->srcu, srcu_idx);
return ret; return ret;
} }
...@@ -419,21 +420,19 @@ long nvme_ns_head_chr_ioctl(struct file *file, unsigned int cmd, ...@@ -419,21 +420,19 @@ long nvme_ns_head_chr_ioctl(struct file *file, unsigned int cmd,
container_of(cdev, struct nvme_ns_head, cdev); container_of(cdev, struct nvme_ns_head, cdev);
void __user *argp = (void __user *)arg; void __user *argp = (void __user *)arg;
struct nvme_ns *ns; struct nvme_ns *ns;
int srcu_idx, ret; int srcu_idx, ret = -EWOULDBLOCK;
srcu_idx = srcu_read_lock(&head->srcu); srcu_idx = srcu_read_lock(&head->srcu);
ns = nvme_find_path(head); ns = nvme_find_path(head);
if (!ns) { if (!ns)
srcu_read_unlock(&head->srcu, srcu_idx); goto out_unlock;
return -EWOULDBLOCK;
}
if (is_ctrl_ioctl(cmd)) if (is_ctrl_ioctl(cmd))
return nvme_ns_head_ctrl_ioctl(ns, cmd, argp, head, srcu_idx); return nvme_ns_head_ctrl_ioctl(ns, cmd, argp, head, srcu_idx);
ret = nvme_ns_ioctl(ns, cmd, argp); ret = nvme_ns_ioctl(ns, cmd, argp);
nvme_put_ns_from_disk(head, srcu_idx); out_unlock:
srcu_read_unlock(&head->srcu, srcu_idx);
return ret; return ret;
} }
#endif /* CONFIG_NVME_MULTIPATH */ #endif /* CONFIG_NVME_MULTIPATH */
......
...@@ -349,6 +349,25 @@ static void nvme_ns_head_release(struct gendisk *disk, fmode_t mode) ...@@ -349,6 +349,25 @@ static void nvme_ns_head_release(struct gendisk *disk, fmode_t mode)
nvme_put_ns_head(disk->private_data); nvme_put_ns_head(disk->private_data);
} }
#ifdef CONFIG_BLK_DEV_ZONED
static int nvme_ns_head_report_zones(struct gendisk *disk, sector_t sector,
unsigned int nr_zones, report_zones_cb cb, void *data)
{
struct nvme_ns_head *head = disk->private_data;
struct nvme_ns *ns;
int srcu_idx, ret = -EWOULDBLOCK;
srcu_idx = srcu_read_lock(&head->srcu);
ns = nvme_find_path(head);
if (ns)
ret = nvme_ns_report_zones(ns, sector, nr_zones, cb, data);
srcu_read_unlock(&head->srcu, srcu_idx);
return ret;
}
#else
#define nvme_ns_head_report_zones NULL
#endif /* CONFIG_BLK_DEV_ZONED */
const struct block_device_operations nvme_ns_head_ops = { const struct block_device_operations nvme_ns_head_ops = {
.owner = THIS_MODULE, .owner = THIS_MODULE,
.submit_bio = nvme_ns_head_submit_bio, .submit_bio = nvme_ns_head_submit_bio,
...@@ -356,7 +375,7 @@ const struct block_device_operations nvme_ns_head_ops = { ...@@ -356,7 +375,7 @@ const struct block_device_operations nvme_ns_head_ops = {
.release = nvme_ns_head_release, .release = nvme_ns_head_release,
.ioctl = nvme_ns_head_ioctl, .ioctl = nvme_ns_head_ioctl,
.getgeo = nvme_getgeo, .getgeo = nvme_getgeo,
.report_zones = nvme_report_zones, .report_zones = nvme_ns_head_report_zones,
.pr_ops = &nvme_pr_ops, .pr_ops = &nvme_pr_ops,
}; };
......
...@@ -674,9 +674,6 @@ int nvme_delete_ctrl(struct nvme_ctrl *ctrl); ...@@ -674,9 +674,6 @@ int nvme_delete_ctrl(struct nvme_ctrl *ctrl);
void nvme_queue_scan(struct nvme_ctrl *ctrl); void nvme_queue_scan(struct nvme_ctrl *ctrl);
int nvme_get_log(struct nvme_ctrl *ctrl, u32 nsid, u8 log_page, u8 lsp, u8 csi, int nvme_get_log(struct nvme_ctrl *ctrl, u32 nsid, u8 log_page, u8 lsp, u8 csi,
void *log, size_t size, u64 offset); void *log, size_t size, u64 offset);
struct nvme_ns *nvme_get_ns_from_disk(struct gendisk *disk,
struct nvme_ns_head **head, int *srcu_idx);
void nvme_put_ns_from_disk(struct nvme_ns_head *head, int idx);
bool nvme_tryget_ns_head(struct nvme_ns_head *head); bool nvme_tryget_ns_head(struct nvme_ns_head *head);
void nvme_put_ns_head(struct nvme_ns_head *head); void nvme_put_ns_head(struct nvme_ns_head *head);
int nvme_cdev_add(struct cdev *cdev, struct device *cdev_device, int nvme_cdev_add(struct cdev *cdev, struct device *cdev_device,
...@@ -697,6 +694,7 @@ extern const struct attribute_group *nvme_ns_id_attr_groups[]; ...@@ -697,6 +694,7 @@ extern const struct attribute_group *nvme_ns_id_attr_groups[];
extern const struct pr_ops nvme_pr_ops; extern const struct pr_ops nvme_pr_ops;
extern const struct block_device_operations nvme_ns_head_ops; extern const struct block_device_operations nvme_ns_head_ops;
struct nvme_ns *nvme_find_path(struct nvme_ns_head *head);
#ifdef CONFIG_NVME_MULTIPATH #ifdef CONFIG_NVME_MULTIPATH
static inline bool nvme_ctrl_use_ana(struct nvme_ctrl *ctrl) static inline bool nvme_ctrl_use_ana(struct nvme_ctrl *ctrl)
{ {
...@@ -718,7 +716,6 @@ void nvme_mpath_uninit(struct nvme_ctrl *ctrl); ...@@ -718,7 +716,6 @@ void nvme_mpath_uninit(struct nvme_ctrl *ctrl);
void nvme_mpath_stop(struct nvme_ctrl *ctrl); void nvme_mpath_stop(struct nvme_ctrl *ctrl);
bool nvme_mpath_clear_current_path(struct nvme_ns *ns); bool nvme_mpath_clear_current_path(struct nvme_ns *ns);
void nvme_mpath_clear_ctrl_paths(struct nvme_ctrl *ctrl); void nvme_mpath_clear_ctrl_paths(struct nvme_ctrl *ctrl);
struct nvme_ns *nvme_find_path(struct nvme_ns_head *head);
static inline void nvme_mpath_check_last_path(struct nvme_ns *ns) static inline void nvme_mpath_check_last_path(struct nvme_ns *ns)
{ {
...@@ -810,17 +807,14 @@ static inline void nvme_mpath_start_freeze(struct nvme_subsystem *subsys) ...@@ -810,17 +807,14 @@ static inline void nvme_mpath_start_freeze(struct nvme_subsystem *subsys)
#endif /* CONFIG_NVME_MULTIPATH */ #endif /* CONFIG_NVME_MULTIPATH */
int nvme_revalidate_zones(struct nvme_ns *ns); int nvme_revalidate_zones(struct nvme_ns *ns);
int nvme_ns_report_zones(struct nvme_ns *ns, sector_t sector,
unsigned int nr_zones, report_zones_cb cb, void *data);
#ifdef CONFIG_BLK_DEV_ZONED #ifdef CONFIG_BLK_DEV_ZONED
int nvme_update_zone_info(struct nvme_ns *ns, unsigned lbaf); int nvme_update_zone_info(struct nvme_ns *ns, unsigned lbaf);
int nvme_report_zones(struct gendisk *disk, sector_t sector,
unsigned int nr_zones, report_zones_cb cb, void *data);
blk_status_t nvme_setup_zone_mgmt_send(struct nvme_ns *ns, struct request *req, blk_status_t nvme_setup_zone_mgmt_send(struct nvme_ns *ns, struct request *req,
struct nvme_command *cmnd, struct nvme_command *cmnd,
enum nvme_zone_mgmt_action action); enum nvme_zone_mgmt_action action);
#else #else
#define nvme_report_zones NULL
static inline blk_status_t nvme_setup_zone_mgmt_send(struct nvme_ns *ns, static inline blk_status_t nvme_setup_zone_mgmt_send(struct nvme_ns *ns,
struct request *req, struct nvme_command *cmnd, struct request *req, struct nvme_command *cmnd,
enum nvme_zone_mgmt_action action) enum nvme_zone_mgmt_action action)
......
...@@ -2831,10 +2831,7 @@ static unsigned long check_vendor_combination_bug(struct pci_dev *pdev) ...@@ -2831,10 +2831,7 @@ static unsigned long check_vendor_combination_bug(struct pci_dev *pdev)
#ifdef CONFIG_ACPI #ifdef CONFIG_ACPI
static bool nvme_acpi_storage_d3(struct pci_dev *dev) static bool nvme_acpi_storage_d3(struct pci_dev *dev)
{ {
struct acpi_device *adev; struct acpi_device *adev = ACPI_COMPANION(&dev->dev);
struct pci_dev *root;
acpi_handle handle;
acpi_status status;
u8 val; u8 val;
/* /*
...@@ -2842,28 +2839,9 @@ static bool nvme_acpi_storage_d3(struct pci_dev *dev) ...@@ -2842,28 +2839,9 @@ static bool nvme_acpi_storage_d3(struct pci_dev *dev)
* must use D3 to support deep platform power savings during * must use D3 to support deep platform power savings during
* suspend-to-idle. * suspend-to-idle.
*/ */
root = pcie_find_root_port(dev);
if (!root)
return false;
adev = ACPI_COMPANION(&root->dev);
if (!adev) if (!adev)
return false; return false;
/*
* The property is defined in the PXSX device for South complex ports
* and in the PEGP device for North complex ports.
*/
status = acpi_get_handle(adev->handle, "PXSX", &handle);
if (ACPI_FAILURE(status)) {
status = acpi_get_handle(adev->handle, "PEGP", &handle);
if (ACPI_FAILURE(status))
return false;
}
if (acpi_bus_get_device(handle, &adev))
return false;
if (fwnode_property_read_u8(acpi_fwnode_handle(adev), "StorageD3Enable", if (fwnode_property_read_u8(acpi_fwnode_handle(adev), "StorageD3Enable",
&val)) &val))
return false; return false;
......
...@@ -1088,7 +1088,7 @@ static void nvme_rdma_reconnect_or_remove(struct nvme_rdma_ctrl *ctrl) ...@@ -1088,7 +1088,7 @@ static void nvme_rdma_reconnect_or_remove(struct nvme_rdma_ctrl *ctrl)
static int nvme_rdma_setup_ctrl(struct nvme_rdma_ctrl *ctrl, bool new) static int nvme_rdma_setup_ctrl(struct nvme_rdma_ctrl *ctrl, bool new)
{ {
int ret = -EINVAL; int ret;
bool changed; bool changed;
ret = nvme_rdma_configure_admin_queue(ctrl, new); ret = nvme_rdma_configure_admin_queue(ctrl, new);
......
...@@ -123,6 +123,7 @@ struct nvme_tcp_ctrl { ...@@ -123,6 +123,7 @@ struct nvme_tcp_ctrl {
struct blk_mq_tag_set admin_tag_set; struct blk_mq_tag_set admin_tag_set;
struct sockaddr_storage addr; struct sockaddr_storage addr;
struct sockaddr_storage src_addr; struct sockaddr_storage src_addr;
struct net_device *ndev;
struct nvme_ctrl ctrl; struct nvme_ctrl ctrl;
struct work_struct err_work; struct work_struct err_work;
...@@ -1455,6 +1456,20 @@ static int nvme_tcp_alloc_queue(struct nvme_ctrl *nctrl, ...@@ -1455,6 +1456,20 @@ static int nvme_tcp_alloc_queue(struct nvme_ctrl *nctrl,
} }
} }
if (nctrl->opts->mask & NVMF_OPT_HOST_IFACE) {
char *iface = nctrl->opts->host_iface;
sockptr_t optval = KERNEL_SOCKPTR(iface);
ret = sock_setsockopt(queue->sock, SOL_SOCKET, SO_BINDTODEVICE,
optval, strlen(iface));
if (ret) {
dev_err(nctrl->device,
"failed to bind to interface %s queue %d err %d\n",
iface, qid, ret);
goto err_sock;
}
}
queue->hdr_digest = nctrl->opts->hdr_digest; queue->hdr_digest = nctrl->opts->hdr_digest;
queue->data_digest = nctrl->opts->data_digest; queue->data_digest = nctrl->opts->data_digest;
if (queue->hdr_digest || queue->data_digest) { if (queue->hdr_digest || queue->data_digest) {
...@@ -2515,6 +2530,16 @@ static struct nvme_ctrl *nvme_tcp_create_ctrl(struct device *dev, ...@@ -2515,6 +2530,16 @@ static struct nvme_ctrl *nvme_tcp_create_ctrl(struct device *dev,
} }
} }
if (opts->mask & NVMF_OPT_HOST_IFACE) {
ctrl->ndev = dev_get_by_name(&init_net, opts->host_iface);
if (!ctrl->ndev) {
pr_err("invalid interface passed: %s\n",
opts->host_iface);
ret = -ENODEV;
goto out_free_ctrl;
}
}
if (!opts->duplicate_connect && nvme_tcp_existing_controller(opts)) { if (!opts->duplicate_connect && nvme_tcp_existing_controller(opts)) {
ret = -EALREADY; ret = -EALREADY;
goto out_free_ctrl; goto out_free_ctrl;
...@@ -2571,7 +2596,7 @@ static struct nvmf_transport_ops nvme_tcp_transport = { ...@@ -2571,7 +2596,7 @@ static struct nvmf_transport_ops nvme_tcp_transport = {
NVMF_OPT_HOST_TRADDR | NVMF_OPT_CTRL_LOSS_TMO | NVMF_OPT_HOST_TRADDR | NVMF_OPT_CTRL_LOSS_TMO |
NVMF_OPT_HDR_DIGEST | NVMF_OPT_DATA_DIGEST | NVMF_OPT_HDR_DIGEST | NVMF_OPT_DATA_DIGEST |
NVMF_OPT_NR_WRITE_QUEUES | NVMF_OPT_NR_POLL_QUEUES | NVMF_OPT_NR_WRITE_QUEUES | NVMF_OPT_NR_POLL_QUEUES |
NVMF_OPT_TOS, NVMF_OPT_TOS | NVMF_OPT_HOST_IFACE,
.create_ctrl = nvme_tcp_create_ctrl, .create_ctrl = nvme_tcp_create_ctrl,
}; };
......
...@@ -171,8 +171,8 @@ static int nvme_zone_parse_entry(struct nvme_ns *ns, ...@@ -171,8 +171,8 @@ static int nvme_zone_parse_entry(struct nvme_ns *ns,
return cb(&zone, idx, data); return cb(&zone, idx, data);
} }
static int nvme_ns_report_zones(struct nvme_ns *ns, sector_t sector, int nvme_ns_report_zones(struct nvme_ns *ns, sector_t sector,
unsigned int nr_zones, report_zones_cb cb, void *data) unsigned int nr_zones, report_zones_cb cb, void *data)
{ {
struct nvme_zone_report *report; struct nvme_zone_report *report;
struct nvme_command c = { }; struct nvme_command c = { };
...@@ -180,6 +180,9 @@ static int nvme_ns_report_zones(struct nvme_ns *ns, sector_t sector, ...@@ -180,6 +180,9 @@ static int nvme_ns_report_zones(struct nvme_ns *ns, sector_t sector,
unsigned int nz, i; unsigned int nz, i;
size_t buflen; size_t buflen;
if (ns->head->ids.csi != NVME_CSI_ZNS)
return -EINVAL;
report = nvme_zns_alloc_report_buffer(ns, nr_zones, &buflen); report = nvme_zns_alloc_report_buffer(ns, nr_zones, &buflen);
if (!report) if (!report)
return -ENOMEM; return -ENOMEM;
...@@ -227,26 +230,6 @@ static int nvme_ns_report_zones(struct nvme_ns *ns, sector_t sector, ...@@ -227,26 +230,6 @@ static int nvme_ns_report_zones(struct nvme_ns *ns, sector_t sector,
return ret; return ret;
} }
int nvme_report_zones(struct gendisk *disk, sector_t sector,
unsigned int nr_zones, report_zones_cb cb, void *data)
{
struct nvme_ns_head *head = NULL;
struct nvme_ns *ns;
int srcu_idx, ret;
ns = nvme_get_ns_from_disk(disk, &head, &srcu_idx);
if (unlikely(!ns))
return -EWOULDBLOCK;
if (ns->head->ids.csi == NVME_CSI_ZNS)
ret = nvme_ns_report_zones(ns, sector, nr_zones, cb, data);
else
ret = -EINVAL;
nvme_put_ns_from_disk(head, srcu_idx);
return ret;
}
blk_status_t nvme_setup_zone_mgmt_send(struct nvme_ns *ns, struct request *req, blk_status_t nvme_setup_zone_mgmt_send(struct nvme_ns *ns, struct request *req,
struct nvme_command *c, enum nvme_zone_mgmt_action action) struct nvme_command *c, enum nvme_zone_mgmt_action action)
{ {
......
...@@ -412,7 +412,6 @@ void nvmet_start_keep_alive_timer(struct nvmet_ctrl *ctrl) ...@@ -412,7 +412,6 @@ void nvmet_start_keep_alive_timer(struct nvmet_ctrl *ctrl)
pr_debug("ctrl %d start keep-alive timer for %d secs\n", pr_debug("ctrl %d start keep-alive timer for %d secs\n",
ctrl->cntlid, ctrl->kato); ctrl->cntlid, ctrl->kato);
INIT_DELAYED_WORK(&ctrl->ka_work, nvmet_keep_alive_timer);
schedule_delayed_work(&ctrl->ka_work, ctrl->kato * HZ); schedule_delayed_work(&ctrl->ka_work, ctrl->kato * HZ);
} }
...@@ -1352,6 +1351,7 @@ u16 nvmet_alloc_ctrl(const char *subsysnqn, const char *hostnqn, ...@@ -1352,6 +1351,7 @@ u16 nvmet_alloc_ctrl(const char *subsysnqn, const char *hostnqn,
INIT_LIST_HEAD(&ctrl->async_events); INIT_LIST_HEAD(&ctrl->async_events);
INIT_RADIX_TREE(&ctrl->p2p_ns_map, GFP_KERNEL); INIT_RADIX_TREE(&ctrl->p2p_ns_map, GFP_KERNEL);
INIT_WORK(&ctrl->fatal_err_work, nvmet_fatal_error_handler); INIT_WORK(&ctrl->fatal_err_work, nvmet_fatal_error_handler);
INIT_DELAYED_WORK(&ctrl->ka_work, nvmet_keep_alive_timer);
memcpy(ctrl->subsysnqn, subsysnqn, NVMF_NQN_SIZE); memcpy(ctrl->subsysnqn, subsysnqn, NVMF_NQN_SIZE);
memcpy(ctrl->hostnqn, hostnqn, NVMF_NQN_SIZE); memcpy(ctrl->hostnqn, hostnqn, NVMF_NQN_SIZE);
......
...@@ -174,11 +174,10 @@ static int nvmet_bdev_alloc_bip(struct nvmet_req *req, struct bio *bio, ...@@ -174,11 +174,10 @@ static int nvmet_bdev_alloc_bip(struct nvmet_req *req, struct bio *bio,
{ {
struct blk_integrity *bi; struct blk_integrity *bi;
struct bio_integrity_payload *bip; struct bio_integrity_payload *bip;
struct block_device *bdev = req->ns->bdev;
int rc; int rc;
size_t resid, len; size_t resid, len;
bi = bdev_get_integrity(bdev); bi = bdev_get_integrity(req->ns->bdev);
if (unlikely(!bi)) { if (unlikely(!bi)) {
pr_err("Unable to locate bio_integrity\n"); pr_err("Unable to locate bio_integrity\n");
return -ENODEV; return -ENODEV;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment