Commit 4f5735f3 authored by Jens Axboe's avatar Jens Axboe

Merge branch 'nvme-4.20' of git://git.infradead.org/nvme into for-4.20/block

Pull NVMe updates from Christoph:

"A relatively boring merge window:

 - better AEN tracing (Chaitanya)
 - NUMA aware PCIe multipathing (me)
 - RDMA workqueue fixes (Sagi)
 - better bio usage in the target (Sagi)
 - FC rework for target removal (James)
 - better multipath handling of ->queue_rq failures (James)
 - various cleanups (Milan)"

* 'nvme-4.20' of git://git.infradead.org/nvme:
  nvmet-rdma: use a private workqueue for delete
  nvme: take node locality into account when selecting a path
  nvmet: don't split large I/Os unconditionally
  nvme: call nvme_complete_rq when nvmf_check_ready fails for mpath I/O
  nvme-core: add async event trace helper
  nvme_fc: add 'nvme_discovery' sysfs attribute to fc transport device
  nvmet_fc: support target port removal with nvmet layer
  nvme-fc: fix for a minor typos
  nvmet: remove redundant module prefix
  nvme: fix typo in nvme_identify_ns_descs
parents 9305455a 2acf70ad
...@@ -971,7 +971,7 @@ static int nvme_identify_ns_descs(struct nvme_ctrl *ctrl, unsigned nsid, ...@@ -971,7 +971,7 @@ static int nvme_identify_ns_descs(struct nvme_ctrl *ctrl, unsigned nsid,
uuid_copy(&ids->uuid, data + pos + sizeof(*cur)); uuid_copy(&ids->uuid, data + pos + sizeof(*cur));
break; break;
default: default:
/* Skip unnkown types */ /* Skip unknown types */
len = cur->nidl; len = cur->nidl;
break; break;
} }
...@@ -2908,9 +2908,14 @@ static struct nvme_ns_head *nvme_alloc_ns_head(struct nvme_ctrl *ctrl, ...@@ -2908,9 +2908,14 @@ static struct nvme_ns_head *nvme_alloc_ns_head(struct nvme_ctrl *ctrl,
unsigned nsid, struct nvme_id_ns *id) unsigned nsid, struct nvme_id_ns *id)
{ {
struct nvme_ns_head *head; struct nvme_ns_head *head;
size_t size = sizeof(*head);
int ret = -ENOMEM; int ret = -ENOMEM;
head = kzalloc(sizeof(*head), GFP_KERNEL); #ifdef CONFIG_NVME_MULTIPATH
size += num_possible_nodes() * sizeof(struct nvme_ns *);
#endif
head = kzalloc(size, GFP_KERNEL);
if (!head) if (!head)
goto out; goto out;
ret = ida_simple_get(&ctrl->subsys->ns_ida, 1, 0, GFP_KERNEL); ret = ida_simple_get(&ctrl->subsys->ns_ida, 1, 0, GFP_KERNEL);
...@@ -3408,16 +3413,21 @@ static void nvme_fw_act_work(struct work_struct *work) ...@@ -3408,16 +3413,21 @@ static void nvme_fw_act_work(struct work_struct *work)
static void nvme_handle_aen_notice(struct nvme_ctrl *ctrl, u32 result) static void nvme_handle_aen_notice(struct nvme_ctrl *ctrl, u32 result)
{ {
switch ((result & 0xff00) >> 8) { u32 aer_notice_type = (result & 0xff00) >> 8;
switch (aer_notice_type) {
case NVME_AER_NOTICE_NS_CHANGED: case NVME_AER_NOTICE_NS_CHANGED:
trace_nvme_async_event(ctrl, aer_notice_type);
set_bit(NVME_AER_NOTICE_NS_CHANGED, &ctrl->events); set_bit(NVME_AER_NOTICE_NS_CHANGED, &ctrl->events);
nvme_queue_scan(ctrl); nvme_queue_scan(ctrl);
break; break;
case NVME_AER_NOTICE_FW_ACT_STARTING: case NVME_AER_NOTICE_FW_ACT_STARTING:
trace_nvme_async_event(ctrl, aer_notice_type);
queue_work(nvme_wq, &ctrl->fw_act_work); queue_work(nvme_wq, &ctrl->fw_act_work);
break; break;
#ifdef CONFIG_NVME_MULTIPATH #ifdef CONFIG_NVME_MULTIPATH
case NVME_AER_NOTICE_ANA: case NVME_AER_NOTICE_ANA:
trace_nvme_async_event(ctrl, aer_notice_type);
if (!ctrl->ana_log_buf) if (!ctrl->ana_log_buf)
break; break;
queue_work(nvme_wq, &ctrl->ana_work); queue_work(nvme_wq, &ctrl->ana_work);
...@@ -3432,11 +3442,12 @@ void nvme_complete_async_event(struct nvme_ctrl *ctrl, __le16 status, ...@@ -3432,11 +3442,12 @@ void nvme_complete_async_event(struct nvme_ctrl *ctrl, __le16 status,
volatile union nvme_result *res) volatile union nvme_result *res)
{ {
u32 result = le32_to_cpu(res->u32); u32 result = le32_to_cpu(res->u32);
u32 aer_type = result & 0x07;
if (le16_to_cpu(status) >> 1 != NVME_SC_SUCCESS) if (le16_to_cpu(status) >> 1 != NVME_SC_SUCCESS)
return; return;
switch (result & 0x7) { switch (aer_type) {
case NVME_AER_NOTICE: case NVME_AER_NOTICE:
nvme_handle_aen_notice(ctrl, result); nvme_handle_aen_notice(ctrl, result);
break; break;
...@@ -3444,6 +3455,7 @@ void nvme_complete_async_event(struct nvme_ctrl *ctrl, __le16 status, ...@@ -3444,6 +3455,7 @@ void nvme_complete_async_event(struct nvme_ctrl *ctrl, __le16 status,
case NVME_AER_SMART: case NVME_AER_SMART:
case NVME_AER_CSS: case NVME_AER_CSS:
case NVME_AER_VS: case NVME_AER_VS:
trace_nvme_async_event(ctrl, aer_type);
ctrl->aen_result = result; ctrl->aen_result = result;
break; break;
default: default:
......
...@@ -552,8 +552,11 @@ blk_status_t nvmf_fail_nonready_command(struct nvme_ctrl *ctrl, ...@@ -552,8 +552,11 @@ blk_status_t nvmf_fail_nonready_command(struct nvme_ctrl *ctrl,
ctrl->state != NVME_CTRL_DEAD && ctrl->state != NVME_CTRL_DEAD &&
!blk_noretry_request(rq) && !(rq->cmd_flags & REQ_NVME_MPATH)) !blk_noretry_request(rq) && !(rq->cmd_flags & REQ_NVME_MPATH))
return BLK_STS_RESOURCE; return BLK_STS_RESOURCE;
nvme_req(rq)->status = NVME_SC_ABORT_REQ;
return BLK_STS_IOERR; nvme_req(rq)->status = NVME_SC_HOST_PATH_ERROR;
blk_mq_start_request(rq);
nvme_complete_rq(rq);
return BLK_STS_OK;
} }
EXPORT_SYMBOL_GPL(nvmf_fail_nonready_command); EXPORT_SYMBOL_GPL(nvmf_fail_nonready_command);
......
...@@ -122,6 +122,7 @@ struct nvme_fc_rport { ...@@ -122,6 +122,7 @@ struct nvme_fc_rport {
struct list_head endp_list; /* for lport->endp_list */ struct list_head endp_list; /* for lport->endp_list */
struct list_head ctrl_list; struct list_head ctrl_list;
struct list_head ls_req_list; struct list_head ls_req_list;
struct list_head disc_list;
struct device *dev; /* physical device for dma */ struct device *dev; /* physical device for dma */
struct nvme_fc_lport *lport; struct nvme_fc_lport *lport;
spinlock_t lock; spinlock_t lock;
...@@ -210,7 +211,6 @@ static DEFINE_IDA(nvme_fc_ctrl_cnt); ...@@ -210,7 +211,6 @@ static DEFINE_IDA(nvme_fc_ctrl_cnt);
* These items are short-term. They will eventually be moved into * These items are short-term. They will eventually be moved into
* a generic FC class. See comments in module init. * a generic FC class. See comments in module init.
*/ */
static struct class *fc_class;
static struct device *fc_udev_device; static struct device *fc_udev_device;
...@@ -507,6 +507,7 @@ nvme_fc_free_rport(struct kref *ref) ...@@ -507,6 +507,7 @@ nvme_fc_free_rport(struct kref *ref)
list_del(&rport->endp_list); list_del(&rport->endp_list);
spin_unlock_irqrestore(&nvme_fc_lock, flags); spin_unlock_irqrestore(&nvme_fc_lock, flags);
WARN_ON(!list_empty(&rport->disc_list));
ida_simple_remove(&lport->endp_cnt, rport->remoteport.port_num); ida_simple_remove(&lport->endp_cnt, rport->remoteport.port_num);
kfree(rport); kfree(rport);
...@@ -694,6 +695,7 @@ nvme_fc_register_remoteport(struct nvme_fc_local_port *localport, ...@@ -694,6 +695,7 @@ nvme_fc_register_remoteport(struct nvme_fc_local_port *localport,
INIT_LIST_HEAD(&newrec->endp_list); INIT_LIST_HEAD(&newrec->endp_list);
INIT_LIST_HEAD(&newrec->ctrl_list); INIT_LIST_HEAD(&newrec->ctrl_list);
INIT_LIST_HEAD(&newrec->ls_req_list); INIT_LIST_HEAD(&newrec->ls_req_list);
INIT_LIST_HEAD(&newrec->disc_list);
kref_init(&newrec->ref); kref_init(&newrec->ref);
atomic_set(&newrec->act_ctrl_cnt, 0); atomic_set(&newrec->act_ctrl_cnt, 0);
spin_lock_init(&newrec->lock); spin_lock_init(&newrec->lock);
...@@ -1385,7 +1387,7 @@ nvme_fc_disconnect_assoc_done(struct nvmefc_ls_req *lsreq, int status) ...@@ -1385,7 +1387,7 @@ nvme_fc_disconnect_assoc_done(struct nvmefc_ls_req *lsreq, int status)
__nvme_fc_finish_ls_req(lsop); __nvme_fc_finish_ls_req(lsop);
/* fc-nvme iniator doesn't care about success or failure of cmd */ /* fc-nvme initiator doesn't care about success or failure of cmd */
kfree(lsop); kfree(lsop);
} }
...@@ -3159,7 +3161,7 @@ nvme_fc_parse_traddr(struct nvmet_fc_traddr *traddr, char *buf, size_t blen) ...@@ -3159,7 +3161,7 @@ nvme_fc_parse_traddr(struct nvmet_fc_traddr *traddr, char *buf, size_t blen)
substring_t wwn = { name, &name[sizeof(name)-1] }; substring_t wwn = { name, &name[sizeof(name)-1] };
int nnoffset, pnoffset; int nnoffset, pnoffset;
/* validate it string one of the 2 allowed formats */ /* validate if string is one of the 2 allowed formats */
if (strnlen(buf, blen) == NVME_FC_TRADDR_MAXLENGTH && if (strnlen(buf, blen) == NVME_FC_TRADDR_MAXLENGTH &&
!strncmp(buf, "nn-0x", NVME_FC_TRADDR_OXNNLEN) && !strncmp(buf, "nn-0x", NVME_FC_TRADDR_OXNNLEN) &&
!strncmp(&buf[NVME_FC_TRADDR_MAX_PN_OFFSET], !strncmp(&buf[NVME_FC_TRADDR_MAX_PN_OFFSET],
...@@ -3254,6 +3256,90 @@ static struct nvmf_transport_ops nvme_fc_transport = { ...@@ -3254,6 +3256,90 @@ static struct nvmf_transport_ops nvme_fc_transport = {
.create_ctrl = nvme_fc_create_ctrl, .create_ctrl = nvme_fc_create_ctrl,
}; };
/* Arbitrary successive failures max. With lots of subsystems could be high */
#define DISCOVERY_MAX_FAIL 20
static ssize_t nvme_fc_nvme_discovery_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
unsigned long flags;
LIST_HEAD(local_disc_list);
struct nvme_fc_lport *lport;
struct nvme_fc_rport *rport;
int failcnt = 0;
spin_lock_irqsave(&nvme_fc_lock, flags);
restart:
list_for_each_entry(lport, &nvme_fc_lport_list, port_list) {
list_for_each_entry(rport, &lport->endp_list, endp_list) {
if (!nvme_fc_lport_get(lport))
continue;
if (!nvme_fc_rport_get(rport)) {
/*
* This is a temporary condition. Upon restart
* this rport will be gone from the list.
*
* Revert the lport put and retry. Anything
* added to the list already will be skipped (as
* they are no longer list_empty). Loops should
* resume at rports that were not yet seen.
*/
nvme_fc_lport_put(lport);
if (failcnt++ < DISCOVERY_MAX_FAIL)
goto restart;
pr_err("nvme_discovery: too many reference "
"failures\n");
goto process_local_list;
}
if (list_empty(&rport->disc_list))
list_add_tail(&rport->disc_list,
&local_disc_list);
}
}
process_local_list:
while (!list_empty(&local_disc_list)) {
rport = list_first_entry(&local_disc_list,
struct nvme_fc_rport, disc_list);
list_del_init(&rport->disc_list);
spin_unlock_irqrestore(&nvme_fc_lock, flags);
lport = rport->lport;
/* signal discovery. Won't hurt if it repeats */
nvme_fc_signal_discovery_scan(lport, rport);
nvme_fc_rport_put(rport);
nvme_fc_lport_put(lport);
spin_lock_irqsave(&nvme_fc_lock, flags);
}
spin_unlock_irqrestore(&nvme_fc_lock, flags);
return count;
}
static DEVICE_ATTR(nvme_discovery, 0200, NULL, nvme_fc_nvme_discovery_store);
static struct attribute *nvme_fc_attrs[] = {
&dev_attr_nvme_discovery.attr,
NULL
};
static struct attribute_group nvme_fc_attr_group = {
.attrs = nvme_fc_attrs,
};
static const struct attribute_group *nvme_fc_attr_groups[] = {
&nvme_fc_attr_group,
NULL
};
static struct class fc_class = {
.name = "fc",
.dev_groups = nvme_fc_attr_groups,
.owner = THIS_MODULE,
};
static int __init nvme_fc_init_module(void) static int __init nvme_fc_init_module(void)
{ {
int ret; int ret;
...@@ -3272,16 +3358,16 @@ static int __init nvme_fc_init_module(void) ...@@ -3272,16 +3358,16 @@ static int __init nvme_fc_init_module(void)
* put in place, this code will move to a more generic * put in place, this code will move to a more generic
* location for the class. * location for the class.
*/ */
fc_class = class_create(THIS_MODULE, "fc"); ret = class_register(&fc_class);
if (IS_ERR(fc_class)) { if (ret) {
pr_err("couldn't register class fc\n"); pr_err("couldn't register class fc\n");
return PTR_ERR(fc_class); return ret;
} }
/* /*
* Create a device for the FC-centric udev events * Create a device for the FC-centric udev events
*/ */
fc_udev_device = device_create(fc_class, NULL, MKDEV(0, 0), NULL, fc_udev_device = device_create(&fc_class, NULL, MKDEV(0, 0), NULL,
"fc_udev_device"); "fc_udev_device");
if (IS_ERR(fc_udev_device)) { if (IS_ERR(fc_udev_device)) {
pr_err("couldn't create fc_udev device!\n"); pr_err("couldn't create fc_udev device!\n");
...@@ -3296,9 +3382,9 @@ static int __init nvme_fc_init_module(void) ...@@ -3296,9 +3382,9 @@ static int __init nvme_fc_init_module(void)
return 0; return 0;
out_destroy_device: out_destroy_device:
device_destroy(fc_class, MKDEV(0, 0)); device_destroy(&fc_class, MKDEV(0, 0));
out_destroy_class: out_destroy_class:
class_destroy(fc_class); class_unregister(&fc_class);
return ret; return ret;
} }
...@@ -3313,8 +3399,8 @@ static void __exit nvme_fc_exit_module(void) ...@@ -3313,8 +3399,8 @@ static void __exit nvme_fc_exit_module(void)
ida_destroy(&nvme_fc_local_port_cnt); ida_destroy(&nvme_fc_local_port_cnt);
ida_destroy(&nvme_fc_ctrl_cnt); ida_destroy(&nvme_fc_ctrl_cnt);
device_destroy(fc_class, MKDEV(0, 0)); device_destroy(&fc_class, MKDEV(0, 0));
class_destroy(fc_class); class_unregister(&fc_class);
} }
module_init(nvme_fc_init_module); module_init(nvme_fc_init_module);
......
...@@ -77,6 +77,13 @@ void nvme_failover_req(struct request *req) ...@@ -77,6 +77,13 @@ void nvme_failover_req(struct request *req)
queue_work(nvme_wq, &ns->ctrl->ana_work); queue_work(nvme_wq, &ns->ctrl->ana_work);
} }
break; break;
case NVME_SC_HOST_PATH_ERROR:
/*
* Temporary transport disruption in talking to the controller.
* Try to send on a new path.
*/
nvme_mpath_clear_current_path(ns);
break;
default: default:
/* /*
* Reset the controller for any non-ANA error as we don't know * Reset the controller for any non-ANA error as we don't know
...@@ -110,29 +117,55 @@ static const char *nvme_ana_state_names[] = { ...@@ -110,29 +117,55 @@ static const char *nvme_ana_state_names[] = {
[NVME_ANA_CHANGE] = "change", [NVME_ANA_CHANGE] = "change",
}; };
static struct nvme_ns *__nvme_find_path(struct nvme_ns_head *head) void nvme_mpath_clear_current_path(struct nvme_ns *ns)
{ {
struct nvme_ns *ns, *fallback = NULL; struct nvme_ns_head *head = ns->head;
int node;
if (!head)
return;
for_each_node(node) {
if (ns == rcu_access_pointer(head->current_path[node]))
rcu_assign_pointer(head->current_path[node], NULL);
}
}
static struct nvme_ns *__nvme_find_path(struct nvme_ns_head *head, int node)
{
int found_distance = INT_MAX, fallback_distance = INT_MAX, distance;
struct nvme_ns *found = NULL, *fallback = NULL, *ns;
list_for_each_entry_rcu(ns, &head->list, siblings) { list_for_each_entry_rcu(ns, &head->list, siblings) {
if (ns->ctrl->state != NVME_CTRL_LIVE || if (ns->ctrl->state != NVME_CTRL_LIVE ||
test_bit(NVME_NS_ANA_PENDING, &ns->flags)) test_bit(NVME_NS_ANA_PENDING, &ns->flags))
continue; continue;
distance = node_distance(node, dev_to_node(ns->ctrl->dev));
switch (ns->ana_state) { switch (ns->ana_state) {
case NVME_ANA_OPTIMIZED: case NVME_ANA_OPTIMIZED:
rcu_assign_pointer(head->current_path, ns); if (distance < found_distance) {
return ns; found_distance = distance;
found = ns;
}
break;
case NVME_ANA_NONOPTIMIZED: case NVME_ANA_NONOPTIMIZED:
fallback = ns; if (distance < fallback_distance) {
fallback_distance = distance;
fallback = ns;
}
break; break;
default: default:
break; break;
} }
} }
if (fallback) if (!found)
rcu_assign_pointer(head->current_path, fallback); found = fallback;
return fallback; if (found)
rcu_assign_pointer(head->current_path[node], found);
return found;
} }
static inline bool nvme_path_is_optimized(struct nvme_ns *ns) static inline bool nvme_path_is_optimized(struct nvme_ns *ns)
...@@ -143,10 +176,12 @@ static inline bool nvme_path_is_optimized(struct nvme_ns *ns) ...@@ -143,10 +176,12 @@ static inline bool nvme_path_is_optimized(struct nvme_ns *ns)
inline struct nvme_ns *nvme_find_path(struct nvme_ns_head *head) inline struct nvme_ns *nvme_find_path(struct nvme_ns_head *head)
{ {
struct nvme_ns *ns = srcu_dereference(head->current_path, &head->srcu); int node = numa_node_id();
struct nvme_ns *ns;
ns = srcu_dereference(head->current_path[node], &head->srcu);
if (unlikely(!ns || !nvme_path_is_optimized(ns))) if (unlikely(!ns || !nvme_path_is_optimized(ns)))
ns = __nvme_find_path(head); ns = __nvme_find_path(head, node);
return ns; return ns;
} }
...@@ -193,7 +228,7 @@ static bool nvme_ns_head_poll(struct request_queue *q, blk_qc_t qc) ...@@ -193,7 +228,7 @@ static bool nvme_ns_head_poll(struct request_queue *q, blk_qc_t qc)
int srcu_idx; int srcu_idx;
srcu_idx = srcu_read_lock(&head->srcu); srcu_idx = srcu_read_lock(&head->srcu);
ns = srcu_dereference(head->current_path, &head->srcu); ns = srcu_dereference(head->current_path[numa_node_id()], &head->srcu);
if (likely(ns && nvme_path_is_optimized(ns))) if (likely(ns && nvme_path_is_optimized(ns)))
found = ns->queue->poll_fn(q, qc); found = ns->queue->poll_fn(q, qc);
srcu_read_unlock(&head->srcu, srcu_idx); srcu_read_unlock(&head->srcu, srcu_idx);
......
...@@ -277,14 +277,6 @@ struct nvme_ns_ids { ...@@ -277,14 +277,6 @@ struct nvme_ns_ids {
* only ever has a single entry for private namespaces. * only ever has a single entry for private namespaces.
*/ */
struct nvme_ns_head { struct nvme_ns_head {
#ifdef CONFIG_NVME_MULTIPATH
struct gendisk *disk;
struct nvme_ns __rcu *current_path;
struct bio_list requeue_list;
spinlock_t requeue_lock;
struct work_struct requeue_work;
struct mutex lock;
#endif
struct list_head list; struct list_head list;
struct srcu_struct srcu; struct srcu_struct srcu;
struct nvme_subsystem *subsys; struct nvme_subsystem *subsys;
...@@ -293,6 +285,14 @@ struct nvme_ns_head { ...@@ -293,6 +285,14 @@ struct nvme_ns_head {
struct list_head entry; struct list_head entry;
struct kref ref; struct kref ref;
int instance; int instance;
#ifdef CONFIG_NVME_MULTIPATH
struct gendisk *disk;
struct bio_list requeue_list;
spinlock_t requeue_lock;
struct work_struct requeue_work;
struct mutex lock;
struct nvme_ns __rcu *current_path[];
#endif
}; };
#ifdef CONFIG_FAULT_INJECTION_DEBUG_FS #ifdef CONFIG_FAULT_INJECTION_DEBUG_FS
...@@ -474,14 +474,7 @@ void nvme_mpath_remove_disk(struct nvme_ns_head *head); ...@@ -474,14 +474,7 @@ void nvme_mpath_remove_disk(struct nvme_ns_head *head);
int nvme_mpath_init(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id); int nvme_mpath_init(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id);
void nvme_mpath_uninit(struct nvme_ctrl *ctrl); void nvme_mpath_uninit(struct nvme_ctrl *ctrl);
void nvme_mpath_stop(struct nvme_ctrl *ctrl); void nvme_mpath_stop(struct nvme_ctrl *ctrl);
void nvme_mpath_clear_current_path(struct nvme_ns *ns);
static inline void nvme_mpath_clear_current_path(struct nvme_ns *ns)
{
struct nvme_ns_head *head = ns->head;
if (head && ns == rcu_access_pointer(head->current_path))
rcu_assign_pointer(head->current_path, NULL);
}
struct nvme_ns *nvme_find_path(struct nvme_ns_head *head); struct nvme_ns *nvme_find_path(struct nvme_ns_head *head);
static inline void nvme_mpath_check_last_path(struct nvme_ns *ns) static inline void nvme_mpath_check_last_path(struct nvme_ns *ns)
......
...@@ -156,6 +156,34 @@ TRACE_EVENT(nvme_complete_rq, ...@@ -156,6 +156,34 @@ TRACE_EVENT(nvme_complete_rq,
); );
#define aer_name(aer) { aer, #aer }
TRACE_EVENT(nvme_async_event,
TP_PROTO(struct nvme_ctrl *ctrl, u32 result),
TP_ARGS(ctrl, result),
TP_STRUCT__entry(
__field(int, ctrl_id)
__field(u32, result)
),
TP_fast_assign(
__entry->ctrl_id = ctrl->instance;
__entry->result = result;
),
TP_printk("nvme%d: NVME_AEN=%#08x [%s]",
__entry->ctrl_id, __entry->result,
__print_symbolic(__entry->result,
aer_name(NVME_AER_NOTICE_NS_CHANGED),
aer_name(NVME_AER_NOTICE_ANA),
aer_name(NVME_AER_NOTICE_FW_ACT_STARTING),
aer_name(NVME_AER_ERROR),
aer_name(NVME_AER_SMART),
aer_name(NVME_AER_CSS),
aer_name(NVME_AER_VS))
)
);
#undef aer_name
#endif /* _TRACE_NVME_H */ #endif /* _TRACE_NVME_H */
#undef TRACE_INCLUDE_PATH #undef TRACE_INCLUDE_PATH
......
...@@ -58,7 +58,7 @@ static u16 nvmet_get_smart_log_nsid(struct nvmet_req *req, ...@@ -58,7 +58,7 @@ static u16 nvmet_get_smart_log_nsid(struct nvmet_req *req,
ns = nvmet_find_namespace(req->sq->ctrl, req->cmd->get_log_page.nsid); ns = nvmet_find_namespace(req->sq->ctrl, req->cmd->get_log_page.nsid);
if (!ns) { if (!ns) {
pr_err("nvmet : Could not find namespace id : %d\n", pr_err("Could not find namespace id : %d\n",
le32_to_cpu(req->cmd->get_log_page.nsid)); le32_to_cpu(req->cmd->get_log_page.nsid));
return NVME_SC_INVALID_NS; return NVME_SC_INVALID_NS;
} }
......
...@@ -110,11 +110,19 @@ struct nvmet_fc_tgtport { ...@@ -110,11 +110,19 @@ struct nvmet_fc_tgtport {
struct list_head ls_busylist; struct list_head ls_busylist;
struct list_head assoc_list; struct list_head assoc_list;
struct ida assoc_cnt; struct ida assoc_cnt;
struct nvmet_port *port; struct nvmet_fc_port_entry *pe;
struct kref ref; struct kref ref;
u32 max_sg_cnt; u32 max_sg_cnt;
}; };
struct nvmet_fc_port_entry {
struct nvmet_fc_tgtport *tgtport;
struct nvmet_port *port;
u64 node_name;
u64 port_name;
struct list_head pe_list;
};
struct nvmet_fc_defer_fcp_req { struct nvmet_fc_defer_fcp_req {
struct list_head req_list; struct list_head req_list;
struct nvmefc_tgt_fcp_req *fcp_req; struct nvmefc_tgt_fcp_req *fcp_req;
...@@ -132,7 +140,6 @@ struct nvmet_fc_tgt_queue { ...@@ -132,7 +140,6 @@ struct nvmet_fc_tgt_queue {
atomic_t zrspcnt; atomic_t zrspcnt;
atomic_t rsn; atomic_t rsn;
spinlock_t qlock; spinlock_t qlock;
struct nvmet_port *port;
struct nvmet_cq nvme_cq; struct nvmet_cq nvme_cq;
struct nvmet_sq nvme_sq; struct nvmet_sq nvme_sq;
struct nvmet_fc_tgt_assoc *assoc; struct nvmet_fc_tgt_assoc *assoc;
...@@ -221,6 +228,7 @@ static DEFINE_SPINLOCK(nvmet_fc_tgtlock); ...@@ -221,6 +228,7 @@ static DEFINE_SPINLOCK(nvmet_fc_tgtlock);
static LIST_HEAD(nvmet_fc_target_list); static LIST_HEAD(nvmet_fc_target_list);
static DEFINE_IDA(nvmet_fc_tgtport_cnt); static DEFINE_IDA(nvmet_fc_tgtport_cnt);
static LIST_HEAD(nvmet_fc_portentry_list);
static void nvmet_fc_handle_ls_rqst_work(struct work_struct *work); static void nvmet_fc_handle_ls_rqst_work(struct work_struct *work);
...@@ -645,7 +653,6 @@ nvmet_fc_alloc_target_queue(struct nvmet_fc_tgt_assoc *assoc, ...@@ -645,7 +653,6 @@ nvmet_fc_alloc_target_queue(struct nvmet_fc_tgt_assoc *assoc,
queue->qid = qid; queue->qid = qid;
queue->sqsize = sqsize; queue->sqsize = sqsize;
queue->assoc = assoc; queue->assoc = assoc;
queue->port = assoc->tgtport->port;
queue->cpu = nvmet_fc_queue_to_cpu(assoc->tgtport, qid); queue->cpu = nvmet_fc_queue_to_cpu(assoc->tgtport, qid);
INIT_LIST_HEAD(&queue->fod_list); INIT_LIST_HEAD(&queue->fod_list);
INIT_LIST_HEAD(&queue->avail_defer_list); INIT_LIST_HEAD(&queue->avail_defer_list);
...@@ -957,6 +964,83 @@ nvmet_fc_find_target_assoc(struct nvmet_fc_tgtport *tgtport, ...@@ -957,6 +964,83 @@ nvmet_fc_find_target_assoc(struct nvmet_fc_tgtport *tgtport,
return ret; return ret;
} }
static void
nvmet_fc_portentry_bind(struct nvmet_fc_tgtport *tgtport,
struct nvmet_fc_port_entry *pe,
struct nvmet_port *port)
{
lockdep_assert_held(&nvmet_fc_tgtlock);
pe->tgtport = tgtport;
tgtport->pe = pe;
pe->port = port;
port->priv = pe;
pe->node_name = tgtport->fc_target_port.node_name;
pe->port_name = tgtport->fc_target_port.port_name;
INIT_LIST_HEAD(&pe->pe_list);
list_add_tail(&pe->pe_list, &nvmet_fc_portentry_list);
}
static void
nvmet_fc_portentry_unbind(struct nvmet_fc_port_entry *pe)
{
unsigned long flags;
spin_lock_irqsave(&nvmet_fc_tgtlock, flags);
if (pe->tgtport)
pe->tgtport->pe = NULL;
list_del(&pe->pe_list);
spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags);
}
/*
* called when a targetport deregisters. Breaks the relationship
* with the nvmet port, but leaves the port_entry in place so that
* re-registration can resume operation.
*/
static void
nvmet_fc_portentry_unbind_tgt(struct nvmet_fc_tgtport *tgtport)
{
struct nvmet_fc_port_entry *pe;
unsigned long flags;
spin_lock_irqsave(&nvmet_fc_tgtlock, flags);
pe = tgtport->pe;
if (pe)
pe->tgtport = NULL;
tgtport->pe = NULL;
spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags);
}
/*
* called when a new targetport is registered. Looks in the
* existing nvmet port_entries to see if the nvmet layer is
* configured for the targetport's wwn's. (the targetport existed,
* nvmet configured, the lldd unregistered the tgtport, and is now
* reregistering the same targetport). If so, set the nvmet port
* port entry on the targetport.
*/
static void
nvmet_fc_portentry_rebind_tgt(struct nvmet_fc_tgtport *tgtport)
{
struct nvmet_fc_port_entry *pe;
unsigned long flags;
spin_lock_irqsave(&nvmet_fc_tgtlock, flags);
list_for_each_entry(pe, &nvmet_fc_portentry_list, pe_list) {
if (tgtport->fc_target_port.node_name == pe->node_name &&
tgtport->fc_target_port.port_name == pe->port_name) {
WARN_ON(pe->tgtport);
tgtport->pe = pe;
pe->tgtport = tgtport;
break;
}
}
spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags);
}
/** /**
* nvme_fc_register_targetport - transport entry point called by an * nvme_fc_register_targetport - transport entry point called by an
...@@ -1034,6 +1118,8 @@ nvmet_fc_register_targetport(struct nvmet_fc_port_info *pinfo, ...@@ -1034,6 +1118,8 @@ nvmet_fc_register_targetport(struct nvmet_fc_port_info *pinfo,
goto out_free_newrec; goto out_free_newrec;
} }
nvmet_fc_portentry_rebind_tgt(newrec);
spin_lock_irqsave(&nvmet_fc_tgtlock, flags); spin_lock_irqsave(&nvmet_fc_tgtlock, flags);
list_add_tail(&newrec->tgt_list, &nvmet_fc_target_list); list_add_tail(&newrec->tgt_list, &nvmet_fc_target_list);
spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags); spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags);
...@@ -1171,6 +1257,8 @@ nvmet_fc_unregister_targetport(struct nvmet_fc_target_port *target_port) ...@@ -1171,6 +1257,8 @@ nvmet_fc_unregister_targetport(struct nvmet_fc_target_port *target_port)
{ {
struct nvmet_fc_tgtport *tgtport = targetport_to_tgtport(target_port); struct nvmet_fc_tgtport *tgtport = targetport_to_tgtport(target_port);
nvmet_fc_portentry_unbind_tgt(tgtport);
/* terminate any outstanding associations */ /* terminate any outstanding associations */
__nvmet_fc_free_assocs(tgtport); __nvmet_fc_free_assocs(tgtport);
...@@ -2147,7 +2235,7 @@ nvmet_fc_fcp_nvme_cmd_done(struct nvmet_req *nvme_req) ...@@ -2147,7 +2235,7 @@ nvmet_fc_fcp_nvme_cmd_done(struct nvmet_req *nvme_req)
/* /*
* Actual processing routine for received FC-NVME LS Requests from the LLD * Actual processing routine for received FC-NVME I/O Requests from the LLD
*/ */
static void static void
nvmet_fc_handle_fcp_rqst(struct nvmet_fc_tgtport *tgtport, nvmet_fc_handle_fcp_rqst(struct nvmet_fc_tgtport *tgtport,
...@@ -2157,6 +2245,13 @@ nvmet_fc_handle_fcp_rqst(struct nvmet_fc_tgtport *tgtport, ...@@ -2157,6 +2245,13 @@ nvmet_fc_handle_fcp_rqst(struct nvmet_fc_tgtport *tgtport,
u32 xfrlen = be32_to_cpu(cmdiu->data_len); u32 xfrlen = be32_to_cpu(cmdiu->data_len);
int ret; int ret;
/*
* if there is no nvmet mapping to the targetport there
* shouldn't be requests. just terminate them.
*/
if (!tgtport->pe)
goto transport_error;
/* /*
* Fused commands are currently not supported in the linux * Fused commands are currently not supported in the linux
* implementation. * implementation.
...@@ -2184,7 +2279,7 @@ nvmet_fc_handle_fcp_rqst(struct nvmet_fc_tgtport *tgtport, ...@@ -2184,7 +2279,7 @@ nvmet_fc_handle_fcp_rqst(struct nvmet_fc_tgtport *tgtport,
fod->req.cmd = &fod->cmdiubuf.sqe; fod->req.cmd = &fod->cmdiubuf.sqe;
fod->req.rsp = &fod->rspiubuf.cqe; fod->req.rsp = &fod->rspiubuf.cqe;
fod->req.port = fod->queue->port; fod->req.port = tgtport->pe->port;
/* clear any response payload */ /* clear any response payload */
memset(&fod->rspiubuf, 0, sizeof(fod->rspiubuf)); memset(&fod->rspiubuf, 0, sizeof(fod->rspiubuf));
...@@ -2468,7 +2563,7 @@ nvme_fc_parse_traddr(struct nvmet_fc_traddr *traddr, char *buf, size_t blen) ...@@ -2468,7 +2563,7 @@ nvme_fc_parse_traddr(struct nvmet_fc_traddr *traddr, char *buf, size_t blen)
substring_t wwn = { name, &name[sizeof(name)-1] }; substring_t wwn = { name, &name[sizeof(name)-1] };
int nnoffset, pnoffset; int nnoffset, pnoffset;
/* validate it string one of the 2 allowed formats */ /* validate if string is one of the 2 allowed formats */
if (strnlen(buf, blen) == NVME_FC_TRADDR_MAXLENGTH && if (strnlen(buf, blen) == NVME_FC_TRADDR_MAXLENGTH &&
!strncmp(buf, "nn-0x", NVME_FC_TRADDR_OXNNLEN) && !strncmp(buf, "nn-0x", NVME_FC_TRADDR_OXNNLEN) &&
!strncmp(&buf[NVME_FC_TRADDR_MAX_PN_OFFSET], !strncmp(&buf[NVME_FC_TRADDR_MAX_PN_OFFSET],
...@@ -2508,6 +2603,7 @@ static int ...@@ -2508,6 +2603,7 @@ static int
nvmet_fc_add_port(struct nvmet_port *port) nvmet_fc_add_port(struct nvmet_port *port)
{ {
struct nvmet_fc_tgtport *tgtport; struct nvmet_fc_tgtport *tgtport;
struct nvmet_fc_port_entry *pe;
struct nvmet_fc_traddr traddr = { 0L, 0L }; struct nvmet_fc_traddr traddr = { 0L, 0L };
unsigned long flags; unsigned long flags;
int ret; int ret;
...@@ -2524,24 +2620,40 @@ nvmet_fc_add_port(struct nvmet_port *port) ...@@ -2524,24 +2620,40 @@ nvmet_fc_add_port(struct nvmet_port *port)
if (ret) if (ret)
return ret; return ret;
pe = kzalloc(sizeof(*pe), GFP_KERNEL);
if (!pe)
return -ENOMEM;
ret = -ENXIO; ret = -ENXIO;
spin_lock_irqsave(&nvmet_fc_tgtlock, flags); spin_lock_irqsave(&nvmet_fc_tgtlock, flags);
list_for_each_entry(tgtport, &nvmet_fc_target_list, tgt_list) { list_for_each_entry(tgtport, &nvmet_fc_target_list, tgt_list) {
if ((tgtport->fc_target_port.node_name == traddr.nn) && if ((tgtport->fc_target_port.node_name == traddr.nn) &&
(tgtport->fc_target_port.port_name == traddr.pn)) { (tgtport->fc_target_port.port_name == traddr.pn)) {
tgtport->port = port; /* a FC port can only be 1 nvmet port id */
ret = 0; if (!tgtport->pe) {
nvmet_fc_portentry_bind(tgtport, pe, port);
ret = 0;
} else
ret = -EALREADY;
break; break;
} }
} }
spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags); spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags);
if (ret)
kfree(pe);
return ret; return ret;
} }
static void static void
nvmet_fc_remove_port(struct nvmet_port *port) nvmet_fc_remove_port(struct nvmet_port *port)
{ {
/* nothing to do */ struct nvmet_fc_port_entry *pe = port->priv;
nvmet_fc_portentry_unbind(pe);
kfree(pe);
} }
static const struct nvmet_fabrics_ops nvmet_fc_tgt_fcp_ops = { static const struct nvmet_fabrics_ops nvmet_fc_tgt_fcp_ops = {
......
...@@ -58,7 +58,7 @@ static void nvmet_bio_done(struct bio *bio) ...@@ -58,7 +58,7 @@ static void nvmet_bio_done(struct bio *bio)
static void nvmet_bdev_execute_rw(struct nvmet_req *req) static void nvmet_bdev_execute_rw(struct nvmet_req *req)
{ {
int sg_cnt = req->sg_cnt; int sg_cnt = req->sg_cnt;
struct bio *bio = &req->b.inline_bio; struct bio *bio;
struct scatterlist *sg; struct scatterlist *sg;
sector_t sector; sector_t sector;
blk_qc_t cookie; blk_qc_t cookie;
...@@ -81,7 +81,12 @@ static void nvmet_bdev_execute_rw(struct nvmet_req *req) ...@@ -81,7 +81,12 @@ static void nvmet_bdev_execute_rw(struct nvmet_req *req)
sector = le64_to_cpu(req->cmd->rw.slba); sector = le64_to_cpu(req->cmd->rw.slba);
sector <<= (req->ns->blksize_shift - 9); sector <<= (req->ns->blksize_shift - 9);
bio_init(bio, req->inline_bvec, ARRAY_SIZE(req->inline_bvec)); if (req->data_len <= NVMET_MAX_INLINE_DATA_LEN) {
bio = &req->b.inline_bio;
bio_init(bio, req->inline_bvec, ARRAY_SIZE(req->inline_bvec));
} else {
bio = bio_alloc(GFP_KERNEL, min(sg_cnt, BIO_MAX_PAGES));
}
bio_set_dev(bio, req->ns->bdev); bio_set_dev(bio, req->ns->bdev);
bio->bi_iter.bi_sector = sector; bio->bi_iter.bi_sector = sector;
bio->bi_private = req; bio->bi_private = req;
......
...@@ -264,6 +264,7 @@ struct nvmet_fabrics_ops { ...@@ -264,6 +264,7 @@ struct nvmet_fabrics_ops {
}; };
#define NVMET_MAX_INLINE_BIOVEC 8 #define NVMET_MAX_INLINE_BIOVEC 8
#define NVMET_MAX_INLINE_DATA_LEN NVMET_MAX_INLINE_BIOVEC * PAGE_SIZE
struct nvmet_req { struct nvmet_req {
struct nvme_command *cmd; struct nvme_command *cmd;
......
...@@ -122,6 +122,7 @@ struct nvmet_rdma_device { ...@@ -122,6 +122,7 @@ struct nvmet_rdma_device {
int inline_page_count; int inline_page_count;
}; };
struct workqueue_struct *nvmet_rdma_delete_wq;
static bool nvmet_rdma_use_srq; static bool nvmet_rdma_use_srq;
module_param_named(use_srq, nvmet_rdma_use_srq, bool, 0444); module_param_named(use_srq, nvmet_rdma_use_srq, bool, 0444);
MODULE_PARM_DESC(use_srq, "Use shared receive queue."); MODULE_PARM_DESC(use_srq, "Use shared receive queue.");
...@@ -1267,12 +1268,12 @@ static int nvmet_rdma_queue_connect(struct rdma_cm_id *cm_id, ...@@ -1267,12 +1268,12 @@ static int nvmet_rdma_queue_connect(struct rdma_cm_id *cm_id,
if (queue->host_qid == 0) { if (queue->host_qid == 0) {
/* Let inflight controller teardown complete */ /* Let inflight controller teardown complete */
flush_scheduled_work(); flush_workqueue(nvmet_rdma_delete_wq);
} }
ret = nvmet_rdma_cm_accept(cm_id, queue, &event->param.conn); ret = nvmet_rdma_cm_accept(cm_id, queue, &event->param.conn);
if (ret) { if (ret) {
schedule_work(&queue->release_work); queue_work(nvmet_rdma_delete_wq, &queue->release_work);
/* Destroying rdma_cm id is not needed here */ /* Destroying rdma_cm id is not needed here */
return 0; return 0;
} }
...@@ -1337,7 +1338,7 @@ static void __nvmet_rdma_queue_disconnect(struct nvmet_rdma_queue *queue) ...@@ -1337,7 +1338,7 @@ static void __nvmet_rdma_queue_disconnect(struct nvmet_rdma_queue *queue)
if (disconnect) { if (disconnect) {
rdma_disconnect(queue->cm_id); rdma_disconnect(queue->cm_id);
schedule_work(&queue->release_work); queue_work(nvmet_rdma_delete_wq, &queue->release_work);
} }
} }
...@@ -1367,7 +1368,7 @@ static void nvmet_rdma_queue_connect_fail(struct rdma_cm_id *cm_id, ...@@ -1367,7 +1368,7 @@ static void nvmet_rdma_queue_connect_fail(struct rdma_cm_id *cm_id,
mutex_unlock(&nvmet_rdma_queue_mutex); mutex_unlock(&nvmet_rdma_queue_mutex);
pr_err("failed to connect queue %d\n", queue->idx); pr_err("failed to connect queue %d\n", queue->idx);
schedule_work(&queue->release_work); queue_work(nvmet_rdma_delete_wq, &queue->release_work);
} }
/** /**
...@@ -1649,8 +1650,17 @@ static int __init nvmet_rdma_init(void) ...@@ -1649,8 +1650,17 @@ static int __init nvmet_rdma_init(void)
if (ret) if (ret)
goto err_ib_client; goto err_ib_client;
nvmet_rdma_delete_wq = alloc_workqueue("nvmet-rdma-delete-wq",
WQ_UNBOUND | WQ_MEM_RECLAIM | WQ_SYSFS, 0);
if (!nvmet_rdma_delete_wq) {
ret = -ENOMEM;
goto err_unreg_transport;
}
return 0; return 0;
err_unreg_transport:
nvmet_unregister_transport(&nvmet_rdma_ops);
err_ib_client: err_ib_client:
ib_unregister_client(&nvmet_rdma_ib_client); ib_unregister_client(&nvmet_rdma_ib_client);
return ret; return ret;
...@@ -1658,6 +1668,7 @@ static int __init nvmet_rdma_init(void) ...@@ -1658,6 +1668,7 @@ static int __init nvmet_rdma_init(void)
static void __exit nvmet_rdma_exit(void) static void __exit nvmet_rdma_exit(void)
{ {
destroy_workqueue(nvmet_rdma_delete_wq);
nvmet_unregister_transport(&nvmet_rdma_ops); nvmet_unregister_transport(&nvmet_rdma_ops);
ib_unregister_client(&nvmet_rdma_ib_client); ib_unregister_client(&nvmet_rdma_ib_client);
WARN_ON_ONCE(!list_empty(&nvmet_rdma_queue_list)); WARN_ON_ONCE(!list_empty(&nvmet_rdma_queue_list));
......
...@@ -1241,6 +1241,7 @@ enum { ...@@ -1241,6 +1241,7 @@ enum {
NVME_SC_ANA_PERSISTENT_LOSS = 0x301, NVME_SC_ANA_PERSISTENT_LOSS = 0x301,
NVME_SC_ANA_INACCESSIBLE = 0x302, NVME_SC_ANA_INACCESSIBLE = 0x302,
NVME_SC_ANA_TRANSITION = 0x303, NVME_SC_ANA_TRANSITION = 0x303,
NVME_SC_HOST_PATH_ERROR = 0x370,
NVME_SC_DNR = 0x4000, NVME_SC_DNR = 0x4000,
}; };
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment