Commit 9b9c63f7 authored by Jens Axboe's avatar Jens Axboe

Merge branch 'nvme-4.16' of git://git.infradead.org/nvme into for-4.16/block

Pull NVMe fixes for 4.16 from Christoph.

* 'nvme-4.16' of git://git.infradead.org/nvme:
  nvme-pci: clean up SMBSZ bit definitions
  nvme-pci: clean up CMB initialization
  nvme-fc: correct hang in nvme_ns_remove()
  nvme-fc: fix rogue admin cmds stalling teardown
  nvmet: release a ns reference in nvmet_req_uninit if needed
  nvme-fabrics: fix memory leak when parsing host ID option
  nvme: fix comment typos in nvme_create_io_queues
  nvme: host delete_work and reset_work on separate workqueues
  nvme-pci: allocate device queues storage space at probe
  nvme-pci: serialize pci resets
parents b889bf66 88de4598
...@@ -65,9 +65,26 @@ static bool streams; ...@@ -65,9 +65,26 @@ static bool streams;
module_param(streams, bool, 0644); module_param(streams, bool, 0644);
MODULE_PARM_DESC(streams, "turn on support for Streams write directives"); MODULE_PARM_DESC(streams, "turn on support for Streams write directives");
/*
* nvme_wq - hosts nvme related works that are not reset or delete
* nvme_reset_wq - hosts nvme reset works
* nvme_delete_wq - hosts nvme delete works
*
* nvme_wq will host works such are scan, aen handling, fw activation,
* keep-alive error recovery, periodic reconnects etc. nvme_reset_wq
* runs reset works which also flush works hosted on nvme_wq for
* serialization purposes. nvme_delete_wq host controller deletion
* works which flush reset works for serialization.
*/
struct workqueue_struct *nvme_wq; struct workqueue_struct *nvme_wq;
EXPORT_SYMBOL_GPL(nvme_wq); EXPORT_SYMBOL_GPL(nvme_wq);
struct workqueue_struct *nvme_reset_wq;
EXPORT_SYMBOL_GPL(nvme_reset_wq);
struct workqueue_struct *nvme_delete_wq;
EXPORT_SYMBOL_GPL(nvme_delete_wq);
static DEFINE_IDA(nvme_subsystems_ida); static DEFINE_IDA(nvme_subsystems_ida);
static LIST_HEAD(nvme_subsystems); static LIST_HEAD(nvme_subsystems);
static DEFINE_MUTEX(nvme_subsystems_lock); static DEFINE_MUTEX(nvme_subsystems_lock);
...@@ -89,13 +106,13 @@ int nvme_reset_ctrl(struct nvme_ctrl *ctrl) ...@@ -89,13 +106,13 @@ int nvme_reset_ctrl(struct nvme_ctrl *ctrl)
{ {
if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_RESETTING)) if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_RESETTING))
return -EBUSY; return -EBUSY;
if (!queue_work(nvme_wq, &ctrl->reset_work)) if (!queue_work(nvme_reset_wq, &ctrl->reset_work))
return -EBUSY; return -EBUSY;
return 0; return 0;
} }
EXPORT_SYMBOL_GPL(nvme_reset_ctrl); EXPORT_SYMBOL_GPL(nvme_reset_ctrl);
static int nvme_reset_ctrl_sync(struct nvme_ctrl *ctrl) int nvme_reset_ctrl_sync(struct nvme_ctrl *ctrl)
{ {
int ret; int ret;
...@@ -104,6 +121,7 @@ static int nvme_reset_ctrl_sync(struct nvme_ctrl *ctrl) ...@@ -104,6 +121,7 @@ static int nvme_reset_ctrl_sync(struct nvme_ctrl *ctrl)
flush_work(&ctrl->reset_work); flush_work(&ctrl->reset_work);
return ret; return ret;
} }
EXPORT_SYMBOL_GPL(nvme_reset_ctrl_sync);
static void nvme_delete_ctrl_work(struct work_struct *work) static void nvme_delete_ctrl_work(struct work_struct *work)
{ {
...@@ -122,7 +140,7 @@ int nvme_delete_ctrl(struct nvme_ctrl *ctrl) ...@@ -122,7 +140,7 @@ int nvme_delete_ctrl(struct nvme_ctrl *ctrl)
{ {
if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_DELETING)) if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_DELETING))
return -EBUSY; return -EBUSY;
if (!queue_work(nvme_wq, &ctrl->delete_work)) if (!queue_work(nvme_delete_wq, &ctrl->delete_work))
return -EBUSY; return -EBUSY;
return 0; return 0;
} }
...@@ -3525,16 +3543,26 @@ EXPORT_SYMBOL_GPL(nvme_reinit_tagset); ...@@ -3525,16 +3543,26 @@ EXPORT_SYMBOL_GPL(nvme_reinit_tagset);
int __init nvme_core_init(void) int __init nvme_core_init(void)
{ {
int result; int result = -ENOMEM;
nvme_wq = alloc_workqueue("nvme-wq", nvme_wq = alloc_workqueue("nvme-wq",
WQ_UNBOUND | WQ_MEM_RECLAIM | WQ_SYSFS, 0); WQ_UNBOUND | WQ_MEM_RECLAIM | WQ_SYSFS, 0);
if (!nvme_wq) if (!nvme_wq)
return -ENOMEM; goto out;
nvme_reset_wq = alloc_workqueue("nvme-reset-wq",
WQ_UNBOUND | WQ_MEM_RECLAIM | WQ_SYSFS, 0);
if (!nvme_reset_wq)
goto destroy_wq;
nvme_delete_wq = alloc_workqueue("nvme-delete-wq",
WQ_UNBOUND | WQ_MEM_RECLAIM | WQ_SYSFS, 0);
if (!nvme_delete_wq)
goto destroy_reset_wq;
result = alloc_chrdev_region(&nvme_chr_devt, 0, NVME_MINORS, "nvme"); result = alloc_chrdev_region(&nvme_chr_devt, 0, NVME_MINORS, "nvme");
if (result < 0) if (result < 0)
goto destroy_wq; goto destroy_delete_wq;
nvme_class = class_create(THIS_MODULE, "nvme"); nvme_class = class_create(THIS_MODULE, "nvme");
if (IS_ERR(nvme_class)) { if (IS_ERR(nvme_class)) {
...@@ -3553,8 +3581,13 @@ int __init nvme_core_init(void) ...@@ -3553,8 +3581,13 @@ int __init nvme_core_init(void)
class_destroy(nvme_class); class_destroy(nvme_class);
unregister_chrdev: unregister_chrdev:
unregister_chrdev_region(nvme_chr_devt, NVME_MINORS); unregister_chrdev_region(nvme_chr_devt, NVME_MINORS);
destroy_delete_wq:
destroy_workqueue(nvme_delete_wq);
destroy_reset_wq:
destroy_workqueue(nvme_reset_wq);
destroy_wq: destroy_wq:
destroy_workqueue(nvme_wq); destroy_workqueue(nvme_wq);
out:
return result; return result;
} }
...@@ -3564,6 +3597,8 @@ void nvme_core_exit(void) ...@@ -3564,6 +3597,8 @@ void nvme_core_exit(void)
class_destroy(nvme_subsys_class); class_destroy(nvme_subsys_class);
class_destroy(nvme_class); class_destroy(nvme_class);
unregister_chrdev_region(nvme_chr_devt, NVME_MINORS); unregister_chrdev_region(nvme_chr_devt, NVME_MINORS);
destroy_workqueue(nvme_delete_wq);
destroy_workqueue(nvme_reset_wq);
destroy_workqueue(nvme_wq); destroy_workqueue(nvme_wq);
} }
......
...@@ -738,7 +738,9 @@ static int nvmf_parse_options(struct nvmf_ctrl_options *opts, ...@@ -738,7 +738,9 @@ static int nvmf_parse_options(struct nvmf_ctrl_options *opts,
ret = -ENOMEM; ret = -ENOMEM;
goto out; goto out;
} }
if (uuid_parse(p, &hostid)) { ret = uuid_parse(p, &hostid);
kfree(p);
if (ret) {
pr_err("Invalid hostid %s\n", p); pr_err("Invalid hostid %s\n", p);
ret = -EINVAL; ret = -EINVAL;
goto out; goto out;
......
...@@ -2921,6 +2921,9 @@ nvme_fc_delete_association(struct nvme_fc_ctrl *ctrl) ...@@ -2921,6 +2921,9 @@ nvme_fc_delete_association(struct nvme_fc_ctrl *ctrl)
__nvme_fc_delete_hw_queue(ctrl, &ctrl->queues[0], 0); __nvme_fc_delete_hw_queue(ctrl, &ctrl->queues[0], 0);
nvme_fc_free_queue(&ctrl->queues[0]); nvme_fc_free_queue(&ctrl->queues[0]);
/* re-enable the admin_q so anything new can fast fail */
blk_mq_unquiesce_queue(ctrl->ctrl.admin_q);
nvme_fc_ctlr_inactive_on_rport(ctrl); nvme_fc_ctlr_inactive_on_rport(ctrl);
} }
...@@ -2935,6 +2938,9 @@ nvme_fc_delete_ctrl(struct nvme_ctrl *nctrl) ...@@ -2935,6 +2938,9 @@ nvme_fc_delete_ctrl(struct nvme_ctrl *nctrl)
* waiting for io to terminate * waiting for io to terminate
*/ */
nvme_fc_delete_association(ctrl); nvme_fc_delete_association(ctrl);
/* resume the io queues so that things will fast fail */
nvme_start_queues(nctrl);
} }
static void static void
......
...@@ -32,6 +32,8 @@ extern unsigned int admin_timeout; ...@@ -32,6 +32,8 @@ extern unsigned int admin_timeout;
#define NVME_KATO_GRACE 10 #define NVME_KATO_GRACE 10
extern struct workqueue_struct *nvme_wq; extern struct workqueue_struct *nvme_wq;
extern struct workqueue_struct *nvme_reset_wq;
extern struct workqueue_struct *nvme_delete_wq;
enum { enum {
NVME_NS_LBA = 0, NVME_NS_LBA = 0,
...@@ -394,6 +396,7 @@ int nvme_set_queue_count(struct nvme_ctrl *ctrl, int *count); ...@@ -394,6 +396,7 @@ int nvme_set_queue_count(struct nvme_ctrl *ctrl, int *count);
void nvme_start_keep_alive(struct nvme_ctrl *ctrl); void nvme_start_keep_alive(struct nvme_ctrl *ctrl);
void nvme_stop_keep_alive(struct nvme_ctrl *ctrl); void nvme_stop_keep_alive(struct nvme_ctrl *ctrl);
int nvme_reset_ctrl(struct nvme_ctrl *ctrl); int nvme_reset_ctrl(struct nvme_ctrl *ctrl);
int nvme_reset_ctrl_sync(struct nvme_ctrl *ctrl);
int nvme_delete_ctrl(struct nvme_ctrl *ctrl); int nvme_delete_ctrl(struct nvme_ctrl *ctrl);
int nvme_delete_ctrl_sync(struct nvme_ctrl *ctrl); int nvme_delete_ctrl_sync(struct nvme_ctrl *ctrl);
......
This diff is collapsed.
...@@ -2029,7 +2029,7 @@ static void nvme_rdma_remove_one(struct ib_device *ib_device, void *client_data) ...@@ -2029,7 +2029,7 @@ static void nvme_rdma_remove_one(struct ib_device *ib_device, void *client_data)
} }
mutex_unlock(&nvme_rdma_ctrl_mutex); mutex_unlock(&nvme_rdma_ctrl_mutex);
flush_workqueue(nvme_wq); flush_workqueue(nvme_delete_wq);
} }
static struct ib_client nvme_rdma_ib_client = { static struct ib_client nvme_rdma_ib_client = {
......
...@@ -512,6 +512,7 @@ bool nvmet_req_init(struct nvmet_req *req, struct nvmet_cq *cq, ...@@ -512,6 +512,7 @@ bool nvmet_req_init(struct nvmet_req *req, struct nvmet_cq *cq,
req->sg_cnt = 0; req->sg_cnt = 0;
req->transfer_len = 0; req->transfer_len = 0;
req->rsp->status = 0; req->rsp->status = 0;
req->ns = NULL;
/* no support for fused commands yet */ /* no support for fused commands yet */
if (unlikely(flags & (NVME_CMD_FUSE_FIRST | NVME_CMD_FUSE_SECOND))) { if (unlikely(flags & (NVME_CMD_FUSE_FIRST | NVME_CMD_FUSE_SECOND))) {
...@@ -557,6 +558,8 @@ EXPORT_SYMBOL_GPL(nvmet_req_init); ...@@ -557,6 +558,8 @@ EXPORT_SYMBOL_GPL(nvmet_req_init);
void nvmet_req_uninit(struct nvmet_req *req) void nvmet_req_uninit(struct nvmet_req *req)
{ {
percpu_ref_put(&req->sq->ref); percpu_ref_put(&req->sq->ref);
if (req->ns)
nvmet_put_namespace(req->ns);
} }
EXPORT_SYMBOL_GPL(nvmet_req_uninit); EXPORT_SYMBOL_GPL(nvmet_req_uninit);
......
...@@ -717,7 +717,7 @@ static void __exit nvme_loop_cleanup_module(void) ...@@ -717,7 +717,7 @@ static void __exit nvme_loop_cleanup_module(void)
nvme_delete_ctrl(&ctrl->ctrl); nvme_delete_ctrl(&ctrl->ctrl);
mutex_unlock(&nvme_loop_ctrl_mutex); mutex_unlock(&nvme_loop_ctrl_mutex);
flush_workqueue(nvme_wq); flush_workqueue(nvme_delete_wq);
} }
module_init(nvme_loop_init_module); module_init(nvme_loop_init_module);
......
...@@ -124,14 +124,20 @@ enum { ...@@ -124,14 +124,20 @@ enum {
#define NVME_CMB_BIR(cmbloc) ((cmbloc) & 0x7) #define NVME_CMB_BIR(cmbloc) ((cmbloc) & 0x7)
#define NVME_CMB_OFST(cmbloc) (((cmbloc) >> 12) & 0xfffff) #define NVME_CMB_OFST(cmbloc) (((cmbloc) >> 12) & 0xfffff)
#define NVME_CMB_SZ(cmbsz) (((cmbsz) >> 12) & 0xfffff)
#define NVME_CMB_SZU(cmbsz) (((cmbsz) >> 8) & 0xf) enum {
NVME_CMBSZ_SQS = 1 << 0,
#define NVME_CMB_WDS(cmbsz) ((cmbsz) & 0x10) NVME_CMBSZ_CQS = 1 << 1,
#define NVME_CMB_RDS(cmbsz) ((cmbsz) & 0x8) NVME_CMBSZ_LISTS = 1 << 2,
#define NVME_CMB_LISTS(cmbsz) ((cmbsz) & 0x4) NVME_CMBSZ_RDS = 1 << 3,
#define NVME_CMB_CQS(cmbsz) ((cmbsz) & 0x2) NVME_CMBSZ_WDS = 1 << 4,
#define NVME_CMB_SQS(cmbsz) ((cmbsz) & 0x1)
NVME_CMBSZ_SZ_SHIFT = 12,
NVME_CMBSZ_SZ_MASK = 0xfffff,
NVME_CMBSZ_SZU_SHIFT = 8,
NVME_CMBSZ_SZU_MASK = 0xf,
};
/* /*
* Submission and Completion Queue Entry Sizes for the NVM command set. * Submission and Completion Queue Entry Sizes for the NVM command set.
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment