Commit c48d8c5c authored by Jens Axboe's avatar Jens Axboe

Merge tag 'nvme-5.18-2022-03-03' of git://git.infradead.org/nvme into for-5.18/drivers

Pull NVMe updates from Christoph:

"nvme updates for Linux 5.18

 - add vectored-io support for user-passthrough (Kanchan Joshi)
 - add verbose error logging (Alan Adamson)
 - support buffered I/O on block devices in nvmet (Chaitanya Kulkarni)
 - central discovery controller support (Martin Belanger)
 - fix and extended the globally unique idenfier validation (me)
 - move away from the deprecated IDA APIs (Sagi Grimberg)
 - misc code cleanup (Keith Busch, Max Gurtovoy, Qinghua Jin,
   Chaitanya Kulkarni)"

* tag 'nvme-5.18-2022-03-03' of git://git.infradead.org/nvme: (27 commits)
  nvme: check that EUI/GUID/UUID are globally unique
  nvme: check for duplicate identifiers earlier
  nvme: fix the check for duplicate unique identifiers
  nvme: cleanup __nvme_check_ids
  nvme: remove nssa from struct nvme_ctrl
  nvme: explicitly set non-error for directives
  nvme: expose cntrltype and dctype through sysfs
  nvme: send uevent on connection up
  nvme: add vectored-io support for user-passthrough
  nvme: add verbose error logging
  nvme: add a helper to initialize connect_q
  nvme-rdma: add helpers for mapping/unmapping request
  nvmet-tcp: replace ida_simple[get|remove] with the simler ida_[alloc|free]
  nvmet-rdma: replace ida_simple[get|remove] with the simler ida_[alloc|free]
  nvmet-fc: replace ida_simple[get|remove] with the simler ida_[alloc|free]
  nvmet: replace ida_simple[get|remove] with the simler ida_[alloc|free]
  nvme-fc: replace ida_simple[get|remove] with the simler ida_[alloc|free]
  nvme: replace ida_simple[get|remove] with the simler ida_[alloc|free]
  nvmet: allow bdev in buffered_io mode
  nvmet: use i_size_read() to set size for file-ns
  ...
parents df00b1d2 2079f41e
......@@ -24,6 +24,14 @@ config NVME_MULTIPATH
/dev/nvmeXnY device will show up for each NVMe namespace,
even if it is accessible through multiple controllers.
config NVME_VERBOSE_ERRORS
bool "NVMe verbose error reporting"
depends on NVME_CORE
help
This option enables verbose reporting for NVMe errors. The
error translation table will grow the kernel image size by
about 4 KB.
config NVME_HWMON
bool "NVMe hardware monitoring"
depends on (NVME_CORE=y && HWMON=y) || (NVME_CORE=m && HWMON)
......
......@@ -9,7 +9,7 @@ obj-$(CONFIG_NVME_RDMA) += nvme-rdma.o
obj-$(CONFIG_NVME_FC) += nvme-fc.o
obj-$(CONFIG_NVME_TCP) += nvme-tcp.o
nvme-core-y := core.o ioctl.o
nvme-core-y := core.o ioctl.o constants.o
nvme-core-$(CONFIG_TRACING) += trace.o
nvme-core-$(CONFIG_NVME_MULTIPATH) += multipath.o
nvme-core-$(CONFIG_BLK_DEV_ZONED) += zns.o
......
// SPDX-License-Identifier: GPL-2.0
/*
* NVM Express device driver verbose errors
* Copyright (c) 2022, Oracle and/or its affiliates
*/
#include <linux/blkdev.h>
#include "nvme.h"
#ifdef CONFIG_NVME_VERBOSE_ERRORS
static const char * const nvme_ops[] = {
[nvme_cmd_flush] = "Flush",
[nvme_cmd_write] = "Write",
[nvme_cmd_read] = "Read",
[nvme_cmd_write_uncor] = "Write Uncorrectable",
[nvme_cmd_compare] = "Compare",
[nvme_cmd_write_zeroes] = "Write Zeros",
[nvme_cmd_dsm] = "Dataset Management",
[nvme_cmd_verify] = "Verify",
[nvme_cmd_resv_register] = "Reservation Register",
[nvme_cmd_resv_report] = "Reservation Report",
[nvme_cmd_resv_acquire] = "Reservation Acquire",
[nvme_cmd_resv_release] = "Reservation Release",
[nvme_cmd_zone_mgmt_send] = "Zone Management Send",
[nvme_cmd_zone_mgmt_recv] = "Zone Management Receive",
[nvme_cmd_zone_append] = "Zone Management Append",
};
static const char * const nvme_admin_ops[] = {
[nvme_admin_delete_sq] = "Delete SQ",
[nvme_admin_create_sq] = "Create SQ",
[nvme_admin_get_log_page] = "Get Log Page",
[nvme_admin_delete_cq] = "Delete CQ",
[nvme_admin_create_cq] = "Create CQ",
[nvme_admin_identify] = "Identify",
[nvme_admin_abort_cmd] = "Abort Command",
[nvme_admin_set_features] = "Set Features",
[nvme_admin_get_features] = "Get Features",
[nvme_admin_async_event] = "Async Event",
[nvme_admin_ns_mgmt] = "Namespace Management",
[nvme_admin_activate_fw] = "Activate Firmware",
[nvme_admin_download_fw] = "Download Firmware",
[nvme_admin_dev_self_test] = "Device Self Test",
[nvme_admin_ns_attach] = "Namespace Attach",
[nvme_admin_keep_alive] = "Keep Alive",
[nvme_admin_directive_send] = "Directive Send",
[nvme_admin_directive_recv] = "Directive Receive",
[nvme_admin_virtual_mgmt] = "Virtual Management",
[nvme_admin_nvme_mi_send] = "NVMe Send MI",
[nvme_admin_nvme_mi_recv] = "NVMe Receive MI",
[nvme_admin_dbbuf] = "Doorbell Buffer Config",
[nvme_admin_format_nvm] = "Format NVM",
[nvme_admin_security_send] = "Security Send",
[nvme_admin_security_recv] = "Security Receive",
[nvme_admin_sanitize_nvm] = "Sanitize NVM",
[nvme_admin_get_lba_status] = "Get LBA Status",
};
static const char * const nvme_statuses[] = {
[NVME_SC_SUCCESS] = "Success",
[NVME_SC_INVALID_OPCODE] = "Invalid Command Opcode",
[NVME_SC_INVALID_FIELD] = "Invalid Field in Command",
[NVME_SC_CMDID_CONFLICT] = "Command ID Conflict",
[NVME_SC_DATA_XFER_ERROR] = "Data Transfer Error",
[NVME_SC_POWER_LOSS] = "Commands Aborted due to Power Loss Notification",
[NVME_SC_INTERNAL] = "Internal Error",
[NVME_SC_ABORT_REQ] = "Command Abort Requested",
[NVME_SC_ABORT_QUEUE] = "Command Aborted due to SQ Deletion",
[NVME_SC_FUSED_FAIL] = "Command Aborted due to Failed Fused Command",
[NVME_SC_FUSED_MISSING] = "Command Aborted due to Missing Fused Command",
[NVME_SC_INVALID_NS] = "Invalid Namespace or Format",
[NVME_SC_CMD_SEQ_ERROR] = "Command Sequence Error",
[NVME_SC_SGL_INVALID_LAST] = "Invalid SGL Segment Descriptor",
[NVME_SC_SGL_INVALID_COUNT] = "Invalid Number of SGL Descriptors",
[NVME_SC_SGL_INVALID_DATA] = "Data SGL Length Invalid",
[NVME_SC_SGL_INVALID_METADATA] = "Metadata SGL Length Invalid",
[NVME_SC_SGL_INVALID_TYPE] = "SGL Descriptor Type Invalid",
[NVME_SC_CMB_INVALID_USE] = "Invalid Use of Controller Memory Buffer",
[NVME_SC_PRP_INVALID_OFFSET] = "PRP Offset Invalid",
[NVME_SC_ATOMIC_WU_EXCEEDED] = "Atomic Write Unit Exceeded",
[NVME_SC_OP_DENIED] = "Operation Denied",
[NVME_SC_SGL_INVALID_OFFSET] = "SGL Offset Invalid",
[NVME_SC_RESERVED] = "Reserved",
[NVME_SC_HOST_ID_INCONSIST] = "Host Identifier Inconsistent Format",
[NVME_SC_KA_TIMEOUT_EXPIRED] = "Keep Alive Timeout Expired",
[NVME_SC_KA_TIMEOUT_INVALID] = "Keep Alive Timeout Invalid",
[NVME_SC_ABORTED_PREEMPT_ABORT] = "Command Aborted due to Preempt and Abort",
[NVME_SC_SANITIZE_FAILED] = "Sanitize Failed",
[NVME_SC_SANITIZE_IN_PROGRESS] = "Sanitize In Progress",
[NVME_SC_SGL_INVALID_GRANULARITY] = "SGL Data Block Granularity Invalid",
[NVME_SC_CMD_NOT_SUP_CMB_QUEUE] = "Command Not Supported for Queue in CMB",
[NVME_SC_NS_WRITE_PROTECTED] = "Namespace is Write Protected",
[NVME_SC_CMD_INTERRUPTED] = "Command Interrupted",
[NVME_SC_TRANSIENT_TR_ERR] = "Transient Transport Error",
[NVME_SC_INVALID_IO_CMD_SET] = "Invalid IO Command Set",
[NVME_SC_LBA_RANGE] = "LBA Out of Range",
[NVME_SC_CAP_EXCEEDED] = "Capacity Exceeded",
[NVME_SC_NS_NOT_READY] = "Namespace Not Ready",
[NVME_SC_RESERVATION_CONFLICT] = "Reservation Conflict",
[NVME_SC_FORMAT_IN_PROGRESS] = "Format In Progress",
[NVME_SC_CQ_INVALID] = "Completion Queue Invalid",
[NVME_SC_QID_INVALID] = "Invalid Queue Identifier",
[NVME_SC_QUEUE_SIZE] = "Invalid Queue Size",
[NVME_SC_ABORT_LIMIT] = "Abort Command Limit Exceeded",
[NVME_SC_ABORT_MISSING] = "Reserved", /* XXX */
[NVME_SC_ASYNC_LIMIT] = "Asynchronous Event Request Limit Exceeded",
[NVME_SC_FIRMWARE_SLOT] = "Invalid Firmware Slot",
[NVME_SC_FIRMWARE_IMAGE] = "Invalid Firmware Image",
[NVME_SC_INVALID_VECTOR] = "Invalid Interrupt Vector",
[NVME_SC_INVALID_LOG_PAGE] = "Invalid Log Page",
[NVME_SC_INVALID_FORMAT] = "Invalid Format",
[NVME_SC_FW_NEEDS_CONV_RESET] = "Firmware Activation Requires Conventional Reset",
[NVME_SC_INVALID_QUEUE] = "Invalid Queue Deletion",
[NVME_SC_FEATURE_NOT_SAVEABLE] = "Feature Identifier Not Saveable",
[NVME_SC_FEATURE_NOT_CHANGEABLE] = "Feature Not Changeable",
[NVME_SC_FEATURE_NOT_PER_NS] = "Feature Not Namespace Specific",
[NVME_SC_FW_NEEDS_SUBSYS_RESET] = "Firmware Activation Requires NVM Subsystem Reset",
[NVME_SC_FW_NEEDS_RESET] = "Firmware Activation Requires Reset",
[NVME_SC_FW_NEEDS_MAX_TIME] = "Firmware Activation Requires Maximum Time Violation",
[NVME_SC_FW_ACTIVATE_PROHIBITED] = "Firmware Activation Prohibited",
[NVME_SC_OVERLAPPING_RANGE] = "Overlapping Range",
[NVME_SC_NS_INSUFFICIENT_CAP] = "Namespace Insufficient Capacity",
[NVME_SC_NS_ID_UNAVAILABLE] = "Namespace Identifier Unavailable",
[NVME_SC_NS_ALREADY_ATTACHED] = "Namespace Already Attached",
[NVME_SC_NS_IS_PRIVATE] = "Namespace Is Private",
[NVME_SC_NS_NOT_ATTACHED] = "Namespace Not Attached",
[NVME_SC_THIN_PROV_NOT_SUPP] = "Thin Provisioning Not Supported",
[NVME_SC_CTRL_LIST_INVALID] = "Controller List Invalid",
[NVME_SC_SELT_TEST_IN_PROGRESS] = "Device Self-test In Progress",
[NVME_SC_BP_WRITE_PROHIBITED] = "Boot Partition Write Prohibited",
[NVME_SC_CTRL_ID_INVALID] = "Invalid Controller Identifier",
[NVME_SC_SEC_CTRL_STATE_INVALID] = "Invalid Secondary Controller State",
[NVME_SC_CTRL_RES_NUM_INVALID] = "Invalid Number of Controller Resources",
[NVME_SC_RES_ID_INVALID] = "Invalid Resource Identifier",
[NVME_SC_PMR_SAN_PROHIBITED] = "Sanitize Prohibited",
[NVME_SC_ANA_GROUP_ID_INVALID] = "ANA Group Identifier Invalid",
[NVME_SC_ANA_ATTACH_FAILED] = "ANA Attach Failed",
[NVME_SC_BAD_ATTRIBUTES] = "Conflicting Attributes",
[NVME_SC_INVALID_PI] = "Invalid Protection Information",
[NVME_SC_READ_ONLY] = "Attempted Write to Read Only Range",
[NVME_SC_ONCS_NOT_SUPPORTED] = "ONCS Not Supported",
[NVME_SC_ZONE_BOUNDARY_ERROR] = "Zoned Boundary Error",
[NVME_SC_ZONE_FULL] = "Zone Is Full",
[NVME_SC_ZONE_READ_ONLY] = "Zone Is Read Only",
[NVME_SC_ZONE_OFFLINE] = "Zone Is Offline",
[NVME_SC_ZONE_INVALID_WRITE] = "Zone Invalid Write",
[NVME_SC_ZONE_TOO_MANY_ACTIVE] = "Too Many Active Zones",
[NVME_SC_ZONE_TOO_MANY_OPEN] = "Too Many Open Zones",
[NVME_SC_ZONE_INVALID_TRANSITION] = "Invalid Zone State Transition",
[NVME_SC_WRITE_FAULT] = "Write Fault",
[NVME_SC_READ_ERROR] = "Unrecovered Read Error",
[NVME_SC_GUARD_CHECK] = "End-to-end Guard Check Error",
[NVME_SC_APPTAG_CHECK] = "End-to-end Application Tag Check Error",
[NVME_SC_REFTAG_CHECK] = "End-to-end Reference Tag Check Error",
[NVME_SC_COMPARE_FAILED] = "Compare Failure",
[NVME_SC_ACCESS_DENIED] = "Access Denied",
[NVME_SC_UNWRITTEN_BLOCK] = "Deallocated or Unwritten Logical Block",
[NVME_SC_ANA_PERSISTENT_LOSS] = "Asymmetric Access Persistent Loss",
[NVME_SC_ANA_INACCESSIBLE] = "Asymmetric Access Inaccessible",
[NVME_SC_ANA_TRANSITION] = "Asymmetric Access Transition",
[NVME_SC_HOST_PATH_ERROR] = "Host Pathing Error",
};
const unsigned char *nvme_get_error_status_str(u16 status)
{
status &= 0x7ff;
if (status < ARRAY_SIZE(nvme_statuses) && nvme_statuses[status])
return nvme_statuses[status & 0x7ff];
return "Unknown";
}
const unsigned char *nvme_get_opcode_str(u8 opcode)
{
if (opcode < ARRAY_SIZE(nvme_ops) && nvme_ops[opcode])
return nvme_ops[opcode];
return "Unknown";
}
const unsigned char *nvme_get_admin_opcode_str(u8 opcode)
{
if (opcode < ARRAY_SIZE(nvme_admin_ops) && nvme_admin_ops[opcode])
return nvme_admin_ops[opcode];
return "Unknown";
}
#endif /* CONFIG_NVME_VERBOSE_ERRORS */
This diff is collapsed.
......@@ -144,11 +144,10 @@ EXPORT_SYMBOL_GPL(nvmf_get_address);
*/
int nvmf_reg_read32(struct nvme_ctrl *ctrl, u32 off, u32 *val)
{
struct nvme_command cmd;
struct nvme_command cmd = { };
union nvme_result res;
int ret;
memset(&cmd, 0, sizeof(cmd));
cmd.prop_get.opcode = nvme_fabrics_command;
cmd.prop_get.fctype = nvme_fabrics_type_property_get;
cmd.prop_get.offset = cpu_to_le32(off);
......@@ -272,7 +271,7 @@ static void nvmf_log_connect_error(struct nvme_ctrl *ctrl,
int err_sctype = errval & ~NVME_SC_DNR;
switch (err_sctype) {
case (NVME_SC_CONNECT_INVALID_PARAM):
case NVME_SC_CONNECT_INVALID_PARAM:
if (offset >> 16) {
char *inv_data = "Connect Invalid Data Parameter";
......@@ -873,7 +872,7 @@ static int nvmf_check_required_opts(struct nvmf_ctrl_options *opts,
unsigned int required_opts)
{
if ((opts->mask & required_opts) != required_opts) {
int i;
unsigned int i;
for (i = 0; i < ARRAY_SIZE(opt_tokens); i++) {
if ((opt_tokens[i].token & required_opts) &&
......@@ -923,7 +922,7 @@ static int nvmf_check_allowed_opts(struct nvmf_ctrl_options *opts,
unsigned int allowed_opts)
{
if (opts->mask & ~allowed_opts) {
int i;
unsigned int i;
for (i = 0; i < ARRAY_SIZE(opt_tokens); i++) {
if ((opt_tokens[i].token & opts->mask) &&
......
......@@ -259,7 +259,7 @@ nvme_fc_free_lport(struct kref *ref)
complete(&nvme_fc_unload_proceed);
spin_unlock_irqrestore(&nvme_fc_lock, flags);
ida_simple_remove(&nvme_fc_local_port_cnt, lport->localport.port_num);
ida_free(&nvme_fc_local_port_cnt, lport->localport.port_num);
ida_destroy(&lport->endp_cnt);
put_device(lport->dev);
......@@ -399,7 +399,7 @@ nvme_fc_register_localport(struct nvme_fc_port_info *pinfo,
goto out_reghost_failed;
}
idx = ida_simple_get(&nvme_fc_local_port_cnt, 0, 0, GFP_KERNEL);
idx = ida_alloc(&nvme_fc_local_port_cnt, GFP_KERNEL);
if (idx < 0) {
ret = -ENOSPC;
goto out_fail_kfree;
......@@ -439,7 +439,7 @@ nvme_fc_register_localport(struct nvme_fc_port_info *pinfo,
return 0;
out_ida_put:
ida_simple_remove(&nvme_fc_local_port_cnt, idx);
ida_free(&nvme_fc_local_port_cnt, idx);
out_fail_kfree:
kfree(newrec);
out_reghost_failed:
......@@ -535,7 +535,7 @@ nvme_fc_free_rport(struct kref *ref)
spin_unlock_irqrestore(&nvme_fc_lock, flags);
WARN_ON(!list_empty(&rport->disc_list));
ida_simple_remove(&lport->endp_cnt, rport->remoteport.port_num);
ida_free(&lport->endp_cnt, rport->remoteport.port_num);
kfree(rport);
......@@ -713,7 +713,7 @@ nvme_fc_register_remoteport(struct nvme_fc_local_port *localport,
goto out_lport_put;
}
idx = ida_simple_get(&lport->endp_cnt, 0, 0, GFP_KERNEL);
idx = ida_alloc(&lport->endp_cnt, GFP_KERNEL);
if (idx < 0) {
ret = -ENOSPC;
goto out_kfree_rport;
......@@ -2393,7 +2393,7 @@ nvme_fc_ctrl_free(struct kref *ref)
put_device(ctrl->dev);
nvme_fc_rport_put(ctrl->rport);
ida_simple_remove(&nvme_fc_ctrl_cnt, ctrl->cnum);
ida_free(&nvme_fc_ctrl_cnt, ctrl->cnum);
if (ctrl->ctrl.opts)
nvmf_free_options(ctrl->ctrl.opts);
kfree(ctrl);
......@@ -2916,11 +2916,9 @@ nvme_fc_create_io_queues(struct nvme_fc_ctrl *ctrl)
ctrl->ctrl.tagset = &ctrl->tag_set;
ctrl->ctrl.connect_q = blk_mq_init_queue(&ctrl->tag_set);
if (IS_ERR(ctrl->ctrl.connect_q)) {
ret = PTR_ERR(ctrl->ctrl.connect_q);
ret = nvme_ctrl_init_connect_q(&(ctrl->ctrl));
if (ret)
goto out_free_tag_set;
}
ret = nvme_fc_create_hw_io_queues(ctrl, ctrl->ctrl.sqsize + 1);
if (ret)
......@@ -3472,7 +3470,7 @@ nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts,
goto out_fail;
}
idx = ida_simple_get(&nvme_fc_ctrl_cnt, 0, 0, GFP_KERNEL);
idx = ida_alloc(&nvme_fc_ctrl_cnt, GFP_KERNEL);
if (idx < 0) {
ret = -ENOSPC;
goto out_free_ctrl;
......@@ -3635,7 +3633,7 @@ nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts,
kfree(ctrl->queues);
out_free_ida:
put_device(ctrl->dev);
ida_simple_remove(&nvme_fc_ctrl_cnt, ctrl->cnum);
ida_free(&nvme_fc_ctrl_cnt, ctrl->cnum);
out_free_ctrl:
kfree(ctrl);
out_fail:
......
......@@ -56,7 +56,7 @@ static void *nvme_add_user_metadata(struct bio *bio, void __user *ubuf,
static int nvme_submit_user_cmd(struct request_queue *q,
struct nvme_command *cmd, void __user *ubuffer,
unsigned bufflen, void __user *meta_buffer, unsigned meta_len,
u32 meta_seed, u64 *result, unsigned timeout)
u32 meta_seed, u64 *result, unsigned timeout, bool vec)
{
bool write = nvme_is_write(cmd);
struct nvme_ns *ns = q->queuedata;
......@@ -75,8 +75,22 @@ static int nvme_submit_user_cmd(struct request_queue *q,
nvme_req(req)->flags |= NVME_REQ_USERCMD;
if (ubuffer && bufflen) {
if (!vec)
ret = blk_rq_map_user(q, req, NULL, ubuffer, bufflen,
GFP_KERNEL);
else {
struct iovec fast_iov[UIO_FASTIOV];
struct iovec *iov = fast_iov;
struct iov_iter iter;
ret = import_iovec(rq_data_dir(req), ubuffer, bufflen,
UIO_FASTIOV, &iov, &iter);
if (ret < 0)
goto out;
ret = blk_rq_map_user_iov(q, req, NULL, &iter,
GFP_KERNEL);
kfree(iov);
}
if (ret)
goto out;
bio = req->bio;
......@@ -170,7 +184,8 @@ static int nvme_submit_io(struct nvme_ns *ns, struct nvme_user_io __user *uio)
return nvme_submit_user_cmd(ns->queue, &c,
nvme_to_user_ptr(io.addr), length,
metadata, meta_len, lower_32_bits(io.slba), NULL, 0);
metadata, meta_len, lower_32_bits(io.slba), NULL, 0,
false);
}
static bool nvme_validate_passthru_nsid(struct nvme_ctrl *ctrl,
......@@ -224,7 +239,7 @@ static int nvme_user_cmd(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
status = nvme_submit_user_cmd(ns ? ns->queue : ctrl->admin_q, &c,
nvme_to_user_ptr(cmd.addr), cmd.data_len,
nvme_to_user_ptr(cmd.metadata), cmd.metadata_len,
0, &result, timeout);
0, &result, timeout, false);
if (status >= 0) {
if (put_user(result, &ucmd->result))
......@@ -235,7 +250,7 @@ static int nvme_user_cmd(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
}
static int nvme_user_cmd64(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
struct nvme_passthru_cmd64 __user *ucmd)
struct nvme_passthru_cmd64 __user *ucmd, bool vec)
{
struct nvme_passthru_cmd64 cmd;
struct nvme_command c;
......@@ -270,7 +285,7 @@ static int nvme_user_cmd64(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
status = nvme_submit_user_cmd(ns ? ns->queue : ctrl->admin_q, &c,
nvme_to_user_ptr(cmd.addr), cmd.data_len,
nvme_to_user_ptr(cmd.metadata), cmd.metadata_len,
0, &cmd.result, timeout);
0, &cmd.result, timeout, vec);
if (status >= 0) {
if (put_user(cmd.result, &ucmd->result))
......@@ -296,7 +311,7 @@ static int nvme_ctrl_ioctl(struct nvme_ctrl *ctrl, unsigned int cmd,
case NVME_IOCTL_ADMIN_CMD:
return nvme_user_cmd(ctrl, NULL, argp);
case NVME_IOCTL_ADMIN64_CMD:
return nvme_user_cmd64(ctrl, NULL, argp);
return nvme_user_cmd64(ctrl, NULL, argp, false);
default:
return sed_ioctl(ctrl->opal_dev, cmd, argp);
}
......@@ -340,7 +355,9 @@ static int nvme_ns_ioctl(struct nvme_ns *ns, unsigned int cmd,
case NVME_IOCTL_SUBMIT_IO:
return nvme_submit_io(ns, argp);
case NVME_IOCTL_IO64_CMD:
return nvme_user_cmd64(ns->ctrl, ns, argp);
return nvme_user_cmd64(ns->ctrl, ns, argp, false);
case NVME_IOCTL_IO64_CMD_VEC:
return nvme_user_cmd64(ns->ctrl, ns, argp, true);
default:
return -ENOTTY;
}
......@@ -480,7 +497,7 @@ long nvme_dev_ioctl(struct file *file, unsigned int cmd,
case NVME_IOCTL_ADMIN_CMD:
return nvme_user_cmd(ctrl, NULL, argp);
case NVME_IOCTL_ADMIN64_CMD:
return nvme_user_cmd64(ctrl, NULL, argp);
return nvme_user_cmd64(ctrl, NULL, argp, false);
case NVME_IOCTL_IO_CMD:
return nvme_dev_user_cmd(ctrl, argp);
case NVME_IOCTL_RESET:
......
......@@ -280,7 +280,6 @@ struct nvme_ctrl {
u16 crdt[3];
u16 oncs;
u16 oacs;
u16 nssa;
u16 nr_streams;
u16 sqsize;
u32 max_namespaces;
......@@ -349,6 +348,9 @@ struct nvme_ctrl {
unsigned long discard_page_busy;
struct nvme_fault_inject fault_inject;
enum nvme_ctrl_type cntrltype;
enum nvme_dctype dctype;
};
enum nvme_iopolicy {
......@@ -894,6 +896,14 @@ static inline int nvme_update_zone_info(struct nvme_ns *ns, unsigned lbaf)
}
#endif
static inline int nvme_ctrl_init_connect_q(struct nvme_ctrl *ctrl)
{
ctrl->connect_q = blk_mq_init_queue(ctrl->tagset);
if (IS_ERR(ctrl->connect_q))
return PTR_ERR(ctrl->connect_q);
return 0;
}
static inline struct nvme_ns *nvme_get_ns_from_dev(struct device *dev)
{
return dev_to_disk(dev)->private_data;
......@@ -930,4 +940,23 @@ static inline bool nvme_multi_css(struct nvme_ctrl *ctrl)
return (ctrl->ctrl_config & NVME_CC_CSS_MASK) == NVME_CC_CSS_CSI;
}
#ifdef CONFIG_NVME_VERBOSE_ERRORS
const unsigned char *nvme_get_error_status_str(u16 status);
const unsigned char *nvme_get_opcode_str(u8 opcode);
const unsigned char *nvme_get_admin_opcode_str(u8 opcode);
#else /* CONFIG_NVME_VERBOSE_ERRORS */
static inline const unsigned char *nvme_get_error_status_str(u16 status)
{
return "I/O Error";
}
static inline const unsigned char *nvme_get_opcode_str(u8 opcode)
{
return "I/O Cmd";
}
static inline const unsigned char *nvme_get_admin_opcode_str(u8 opcode)
{
return "Admin Cmd";
}
#endif /* CONFIG_NVME_VERBOSE_ERRORS */
#endif /* _NVME_H */
......@@ -978,12 +978,10 @@ static int nvme_rdma_configure_io_queues(struct nvme_rdma_ctrl *ctrl, bool new)
goto out_free_io_queues;
}
ctrl->ctrl.connect_q = blk_mq_init_queue(&ctrl->tag_set);
if (IS_ERR(ctrl->ctrl.connect_q)) {
ret = PTR_ERR(ctrl->ctrl.connect_q);
ret = nvme_ctrl_init_connect_q(&(ctrl->ctrl));
if (ret)
goto out_free_tag_set;
}
}
ret = nvme_rdma_start_io_queues(ctrl);
if (ret)
......@@ -1282,6 +1280,22 @@ static int nvme_rdma_inv_rkey(struct nvme_rdma_queue *queue,
return ib_post_send(queue->qp, &wr, NULL);
}
static void nvme_rdma_dma_unmap_req(struct ib_device *ibdev, struct request *rq)
{
struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq);
if (blk_integrity_rq(rq)) {
ib_dma_unmap_sg(ibdev, req->metadata_sgl->sg_table.sgl,
req->metadata_sgl->nents, rq_dma_dir(rq));
sg_free_table_chained(&req->metadata_sgl->sg_table,
NVME_INLINE_METADATA_SG_CNT);
}
ib_dma_unmap_sg(ibdev, req->data_sgl.sg_table.sgl, req->data_sgl.nents,
rq_dma_dir(rq));
sg_free_table_chained(&req->data_sgl.sg_table, NVME_INLINE_SG_CNT);
}
static void nvme_rdma_unmap_data(struct nvme_rdma_queue *queue,
struct request *rq)
{
......@@ -1293,13 +1307,6 @@ static void nvme_rdma_unmap_data(struct nvme_rdma_queue *queue,
if (!blk_rq_nr_phys_segments(rq))
return;
if (blk_integrity_rq(rq)) {
ib_dma_unmap_sg(ibdev, req->metadata_sgl->sg_table.sgl,
req->metadata_sgl->nents, rq_dma_dir(rq));
sg_free_table_chained(&req->metadata_sgl->sg_table,
NVME_INLINE_METADATA_SG_CNT);
}
if (req->use_sig_mr)
pool = &queue->qp->sig_mrs;
......@@ -1308,9 +1315,7 @@ static void nvme_rdma_unmap_data(struct nvme_rdma_queue *queue,
req->mr = NULL;
}
ib_dma_unmap_sg(ibdev, req->data_sgl.sg_table.sgl, req->data_sgl.nents,
rq_dma_dir(rq));
sg_free_table_chained(&req->data_sgl.sg_table, NVME_INLINE_SG_CNT);
nvme_rdma_dma_unmap_req(ibdev, rq);
}
static int nvme_rdma_set_sg_null(struct nvme_command *c)
......@@ -1521,22 +1526,11 @@ static int nvme_rdma_map_sg_pi(struct nvme_rdma_queue *queue,
return -EINVAL;
}
static int nvme_rdma_map_data(struct nvme_rdma_queue *queue,
struct request *rq, struct nvme_command *c)
static int nvme_rdma_dma_map_req(struct ib_device *ibdev, struct request *rq,
int *count, int *pi_count)
{
struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq);
struct nvme_rdma_device *dev = queue->device;
struct ib_device *ibdev = dev->dev;
int pi_count = 0;
int count, ret;
req->num_sge = 1;
refcount_set(&req->ref, 2); /* send and recv completions */
c->common.flags |= NVME_CMD_SGL_METABUF;
if (!blk_rq_nr_phys_segments(rq))
return nvme_rdma_set_sg_null(c);
int ret;
req->data_sgl.sg_table.sgl = (struct scatterlist *)(req + 1);
ret = sg_alloc_table_chained(&req->data_sgl.sg_table,
......@@ -1548,9 +1542,9 @@ static int nvme_rdma_map_data(struct nvme_rdma_queue *queue,
req->data_sgl.nents = blk_rq_map_sg(rq->q, rq,
req->data_sgl.sg_table.sgl);
count = ib_dma_map_sg(ibdev, req->data_sgl.sg_table.sgl,
*count = ib_dma_map_sg(ibdev, req->data_sgl.sg_table.sgl,
req->data_sgl.nents, rq_dma_dir(rq));
if (unlikely(count <= 0)) {
if (unlikely(*count <= 0)) {
ret = -EIO;
goto out_free_table;
}
......@@ -1569,16 +1563,50 @@ static int nvme_rdma_map_data(struct nvme_rdma_queue *queue,
req->metadata_sgl->nents = blk_rq_map_integrity_sg(rq->q,
rq->bio, req->metadata_sgl->sg_table.sgl);
pi_count = ib_dma_map_sg(ibdev,
*pi_count = ib_dma_map_sg(ibdev,
req->metadata_sgl->sg_table.sgl,
req->metadata_sgl->nents,
rq_dma_dir(rq));
if (unlikely(pi_count <= 0)) {
if (unlikely(*pi_count <= 0)) {
ret = -EIO;
goto out_free_pi_table;
}
}
return 0;
out_free_pi_table:
sg_free_table_chained(&req->metadata_sgl->sg_table,
NVME_INLINE_METADATA_SG_CNT);
out_unmap_sg:
ib_dma_unmap_sg(ibdev, req->data_sgl.sg_table.sgl, req->data_sgl.nents,
rq_dma_dir(rq));
out_free_table:
sg_free_table_chained(&req->data_sgl.sg_table, NVME_INLINE_SG_CNT);
return ret;
}
static int nvme_rdma_map_data(struct nvme_rdma_queue *queue,
struct request *rq, struct nvme_command *c)
{
struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq);
struct nvme_rdma_device *dev = queue->device;
struct ib_device *ibdev = dev->dev;
int pi_count = 0;
int count, ret;
req->num_sge = 1;
refcount_set(&req->ref, 2); /* send and recv completions */
c->common.flags |= NVME_CMD_SGL_METABUF;
if (!blk_rq_nr_phys_segments(rq))
return nvme_rdma_set_sg_null(c);
ret = nvme_rdma_dma_map_req(ibdev, rq, &count, &pi_count);
if (unlikely(ret))
return ret;
if (req->use_sig_mr) {
ret = nvme_rdma_map_sg_pi(queue, req, c, count, pi_count);
goto out;
......@@ -1602,23 +1630,12 @@ static int nvme_rdma_map_data(struct nvme_rdma_queue *queue,
ret = nvme_rdma_map_sg_fr(queue, req, c, count);
out:
if (unlikely(ret))
goto out_unmap_pi_sg;
goto out_dma_unmap_req;
return 0;
out_unmap_pi_sg:
if (blk_integrity_rq(rq))
ib_dma_unmap_sg(ibdev, req->metadata_sgl->sg_table.sgl,
req->metadata_sgl->nents, rq_dma_dir(rq));
out_free_pi_table:
if (blk_integrity_rq(rq))
sg_free_table_chained(&req->metadata_sgl->sg_table,
NVME_INLINE_METADATA_SG_CNT);
out_unmap_sg:
ib_dma_unmap_sg(ibdev, req->data_sgl.sg_table.sgl, req->data_sgl.nents,
rq_dma_dir(rq));
out_free_table:
sg_free_table_chained(&req->data_sgl.sg_table, NVME_INLINE_SG_CNT);
out_dma_unmap_req:
nvme_rdma_dma_unmap_req(ibdev, rq);
return ret;
}
......
......@@ -1825,12 +1825,10 @@ static int nvme_tcp_configure_io_queues(struct nvme_ctrl *ctrl, bool new)
goto out_free_io_queues;
}
ctrl->connect_q = blk_mq_init_queue(ctrl->tagset);
if (IS_ERR(ctrl->connect_q)) {
ret = PTR_ERR(ctrl->connect_q);
ret = nvme_ctrl_init_connect_q(ctrl);
if (ret)
goto out_free_tag_set;
}
}
ret = nvme_tcp_start_io_queues(ctrl);
if (ret)
......
......@@ -1400,7 +1400,7 @@ u16 nvmet_alloc_ctrl(const char *subsysnqn, const char *hostnqn,
if (subsys->cntlid_min > subsys->cntlid_max)
goto out_free_sqs;
ret = ida_simple_get(&cntlid_ida,
ret = ida_alloc_range(&cntlid_ida,
subsys->cntlid_min, subsys->cntlid_max,
GFP_KERNEL);
if (ret < 0) {
......@@ -1459,7 +1459,7 @@ static void nvmet_ctrl_free(struct kref *ref)
flush_work(&ctrl->async_event_work);
cancel_work_sync(&ctrl->fatal_err_work);
ida_simple_remove(&cntlid_ida, ctrl->cntlid);
ida_free(&cntlid_ida, ctrl->cntlid);
nvmet_async_events_free(ctrl);
kfree(ctrl->sqs);
......
......@@ -1115,7 +1115,7 @@ nvmet_fc_alloc_target_assoc(struct nvmet_fc_tgtport *tgtport, void *hosthandle)
if (!assoc)
return NULL;
idx = ida_simple_get(&tgtport->assoc_cnt, 0, 0, GFP_KERNEL);
idx = ida_alloc(&tgtport->assoc_cnt, GFP_KERNEL);
if (idx < 0)
goto out_free_assoc;
......@@ -1157,7 +1157,7 @@ nvmet_fc_alloc_target_assoc(struct nvmet_fc_tgtport *tgtport, void *hosthandle)
out_put:
nvmet_fc_tgtport_put(tgtport);
out_ida:
ida_simple_remove(&tgtport->assoc_cnt, idx);
ida_free(&tgtport->assoc_cnt, idx);
out_free_assoc:
kfree(assoc);
return NULL;
......@@ -1183,7 +1183,7 @@ nvmet_fc_target_assoc_free(struct kref *ref)
/* if pending Rcv Disconnect Association LS, send rsp now */
if (oldls)
nvmet_fc_xmt_ls_rsp(tgtport, oldls);
ida_simple_remove(&tgtport->assoc_cnt, assoc->a_id);
ida_free(&tgtport->assoc_cnt, assoc->a_id);
dev_info(tgtport->dev,
"{%d:%d} Association freed\n",
tgtport->fc_target_port.port_num, assoc->a_id);
......@@ -1383,7 +1383,7 @@ nvmet_fc_register_targetport(struct nvmet_fc_port_info *pinfo,
goto out_regtgt_failed;
}
idx = ida_simple_get(&nvmet_fc_tgtport_cnt, 0, 0, GFP_KERNEL);
idx = ida_alloc(&nvmet_fc_tgtport_cnt, GFP_KERNEL);
if (idx < 0) {
ret = -ENOSPC;
goto out_fail_kfree;
......@@ -1433,7 +1433,7 @@ nvmet_fc_register_targetport(struct nvmet_fc_port_info *pinfo,
out_free_newrec:
put_device(dev);
out_ida_put:
ida_simple_remove(&nvmet_fc_tgtport_cnt, idx);
ida_free(&nvmet_fc_tgtport_cnt, idx);
out_fail_kfree:
kfree(newrec);
out_regtgt_failed:
......@@ -1460,7 +1460,7 @@ nvmet_fc_free_tgtport(struct kref *ref)
/* let the LLDD know we've finished tearing it down */
tgtport->ops->targetport_delete(&tgtport->fc_target_port);
ida_simple_remove(&nvmet_fc_tgtport_cnt,
ida_free(&nvmet_fc_tgtport_cnt,
tgtport->fc_target_port.port_num);
ida_destroy(&tgtport->assoc_cnt);
......
......@@ -76,6 +76,14 @@ int nvmet_bdev_ns_enable(struct nvmet_ns *ns)
{
int ret;
/*
* When buffered_io namespace attribute is enabled that means user want
* this block device to be used as a file, so block device can take
* an advantage of cache.
*/
if (ns->buffered_io)
return -ENOTBLK;
ns->bdev = blkdev_get_by_path(ns->device_path,
FMODE_READ | FMODE_WRITE, NULL);
if (IS_ERR(ns->bdev)) {
......
......@@ -14,16 +14,9 @@
#define NVMET_MAX_MPOOL_BVEC 16
#define NVMET_MIN_MPOOL_OBJ 16
int nvmet_file_ns_revalidate(struct nvmet_ns *ns)
void nvmet_file_ns_revalidate(struct nvmet_ns *ns)
{
struct kstat stat;
int ret;
ret = vfs_getattr(&ns->file->f_path, &stat, STATX_SIZE,
AT_STATX_FORCE_SYNC);
if (!ret)
ns->size = stat.size;
return ret;
ns->size = i_size_read(ns->file->f_mapping->host);
}
void nvmet_file_ns_disable(struct nvmet_ns *ns)
......@@ -43,7 +36,7 @@ void nvmet_file_ns_disable(struct nvmet_ns *ns)
int nvmet_file_ns_enable(struct nvmet_ns *ns)
{
int flags = O_RDWR | O_LARGEFILE;
int ret;
int ret = 0;
if (!ns->buffered_io)
flags |= O_DIRECT;
......@@ -57,9 +50,7 @@ int nvmet_file_ns_enable(struct nvmet_ns *ns)
return ret;
}
ret = nvmet_file_ns_revalidate(ns);
if (ret)
goto err;
nvmet_file_ns_revalidate(ns);
/*
* i_blkbits can be greater than the universally accepted upper bound,
......
......@@ -543,11 +543,9 @@ static int nvme_loop_create_io_queues(struct nvme_loop_ctrl *ctrl)
if (ret)
goto out_destroy_queues;
ctrl->ctrl.connect_q = blk_mq_init_queue(&ctrl->tag_set);
if (IS_ERR(ctrl->ctrl.connect_q)) {
ret = PTR_ERR(ctrl->ctrl.connect_q);
ret = nvme_ctrl_init_connect_q(&(ctrl->ctrl));
if (ret)
goto out_free_tagset;
}
ret = nvme_loop_connect_io_queues(ctrl);
if (ret)
......
......@@ -541,7 +541,7 @@ u16 nvmet_bdev_flush(struct nvmet_req *req);
u16 nvmet_file_flush(struct nvmet_req *req);
void nvmet_ns_changed(struct nvmet_subsys *subsys, u32 nsid);
void nvmet_bdev_ns_revalidate(struct nvmet_ns *ns);
int nvmet_file_ns_revalidate(struct nvmet_ns *ns);
void nvmet_file_ns_revalidate(struct nvmet_ns *ns);
void nvmet_ns_revalidate(struct nvmet_ns *ns);
u16 blk_to_nvme_status(struct nvmet_req *req, blk_status_t blk_sts);
......
......@@ -1356,7 +1356,7 @@ static void nvmet_rdma_free_queue(struct nvmet_rdma_queue *queue)
!queue->host_qid);
}
nvmet_rdma_free_rsps(queue);
ida_simple_remove(&nvmet_rdma_queue_ida, queue->idx);
ida_free(&nvmet_rdma_queue_ida, queue->idx);
kfree(queue);
}
......@@ -1459,7 +1459,7 @@ nvmet_rdma_alloc_queue(struct nvmet_rdma_device *ndev,
spin_lock_init(&queue->rsps_lock);
INIT_LIST_HEAD(&queue->queue_list);
queue->idx = ida_simple_get(&nvmet_rdma_queue_ida, 0, 0, GFP_KERNEL);
queue->idx = ida_alloc(&nvmet_rdma_queue_ida, GFP_KERNEL);
if (queue->idx < 0) {
ret = NVME_RDMA_CM_NO_RSC;
goto out_destroy_sq;
......@@ -1510,7 +1510,7 @@ nvmet_rdma_alloc_queue(struct nvmet_rdma_device *ndev,
out_free_responses:
nvmet_rdma_free_rsps(queue);
out_ida_remove:
ida_simple_remove(&nvmet_rdma_queue_ida, queue->idx);
ida_free(&nvmet_rdma_queue_ida, queue->idx);
out_destroy_sq:
nvmet_sq_destroy(&queue->nvme_sq);
out_free_queue:
......
......@@ -1473,7 +1473,7 @@ static void nvmet_tcp_release_queue_work(struct work_struct *w)
nvmet_tcp_free_cmds(queue);
if (queue->hdr_digest || queue->data_digest)
nvmet_tcp_free_crypto(queue);
ida_simple_remove(&nvmet_tcp_queue_ida, queue->idx);
ida_free(&nvmet_tcp_queue_ida, queue->idx);
page = virt_to_head_page(queue->pf_cache.va);
__page_frag_cache_drain(page, queue->pf_cache.pagecnt_bias);
......@@ -1613,7 +1613,7 @@ static int nvmet_tcp_alloc_queue(struct nvmet_tcp_port *port,
init_llist_head(&queue->resp_list);
INIT_LIST_HEAD(&queue->resp_send_list);
queue->idx = ida_simple_get(&nvmet_tcp_queue_ida, 0, 0, GFP_KERNEL);
queue->idx = ida_alloc(&nvmet_tcp_queue_ida, GFP_KERNEL);
if (queue->idx < 0) {
ret = queue->idx;
goto out_free_queue;
......@@ -1646,7 +1646,7 @@ static int nvmet_tcp_alloc_queue(struct nvmet_tcp_port *port,
out_free_connect:
nvmet_tcp_free_cmd(&queue->connect);
out_ida_remove:
ida_simple_remove(&nvmet_tcp_queue_ida, queue->idx);
ida_free(&nvmet_tcp_queue_ida, queue->idx);
out_free_queue:
kfree(queue);
return ret;
......
......@@ -721,7 +721,7 @@ enum {
*
* Fields with static values for the port. Initialized by the
* port_info struct supplied to the registration call.
* @port_num: NVME-FC transport subsytem port number
* @port_num: NVME-FC transport subsystem port number
* @node_name: FC WWNN for the port
* @port_name: FC WWPN for the port
* @private: pointer to memory allocated alongside the local port
......
......@@ -43,6 +43,12 @@ enum nvme_ctrl_type {
NVME_CTRL_ADMIN = 3, /* Administrative controller */
};
enum nvme_dctype {
NVME_DCTYPE_NOT_REPORTED = 0,
NVME_DCTYPE_DDC = 1, /* Direct Discovery Controller */
NVME_DCTYPE_CDC = 2, /* Central Discovery Controller */
};
/* Address Family codes for Discovery Log Page entry ADRFAM field */
enum {
NVMF_ADDR_FAMILY_PCI = 0, /* PCIe */
......@@ -320,7 +326,9 @@ struct nvme_id_ctrl {
__le16 icdoff;
__u8 ctrattr;
__u8 msdbd;
__u8 rsvd1804[244];
__u8 rsvd1804[2];
__u8 dctype;
__u8 rsvd1807[241];
struct nvme_id_power_state psd[32];
__u8 vs[1024];
};
......@@ -1636,6 +1644,7 @@ enum {
NVME_SC_HOST_ABORTED_CMD = 0x371,
NVME_SC_CRD = 0x1800,
NVME_SC_MORE = 0x2000,
NVME_SC_DNR = 0x4000,
};
......
......@@ -55,7 +55,10 @@ struct nvme_passthru_cmd64 {
__u64 metadata;
__u64 addr;
__u32 metadata_len;
__u32 data_len;
union {
__u32 data_len; /* for non-vectored io */
__u32 vec_cnt; /* for vectored io */
};
__u32 cdw10;
__u32 cdw11;
__u32 cdw12;
......@@ -78,5 +81,6 @@ struct nvme_passthru_cmd64 {
#define NVME_IOCTL_RESCAN _IO('N', 0x46)
#define NVME_IOCTL_ADMIN64_CMD _IOWR('N', 0x47, struct nvme_passthru_cmd64)
#define NVME_IOCTL_IO64_CMD _IOWR('N', 0x48, struct nvme_passthru_cmd64)
#define NVME_IOCTL_IO64_CMD_VEC _IOWR('N', 0x49, struct nvme_passthru_cmd64)
#endif /* _UAPI_LINUX_NVME_IOCTL_H */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment