Commit e02bbac7 authored by Jens Axboe's avatar Jens Axboe

Merge tag 'nvme-6.2-2023-02-02' of git://git.infradead.org/nvme into block-6.2

Pul NVMe fixes from Christoph:

"nvme fixes for Linux 6.2

 - fix a missing queue put in nvmet_fc_ls_create_association (Amit Engel)
 - clear queue pointers on tag_set initialization failure
   (Maurizio Lombardi)
 - use workqueue dedicated to authentication (Shin'ichiro Kawasaki)"

* tag 'nvme-6.2-2023-02-02' of git://git.infradead.org/nvme:
  nvme-auth: use workqueue dedicated to authentication
  nvme: clear the request_queue pointers on failure in nvme_alloc_io_tag_set
  nvme: clear the request_queue pointers on failure in nvme_alloc_admin_tag_set
  nvme-fc: fix a missing queue put in nvmet_fc_ls_create_association
parents 0416f3be bd97a59d
...@@ -45,6 +45,8 @@ struct nvme_dhchap_queue_context { ...@@ -45,6 +45,8 @@ struct nvme_dhchap_queue_context {
int sess_key_len; int sess_key_len;
}; };
struct workqueue_struct *nvme_auth_wq;
#define nvme_auth_flags_from_qid(qid) \ #define nvme_auth_flags_from_qid(qid) \
(qid == 0) ? 0 : BLK_MQ_REQ_NOWAIT | BLK_MQ_REQ_RESERVED (qid == 0) ? 0 : BLK_MQ_REQ_NOWAIT | BLK_MQ_REQ_RESERVED
#define nvme_auth_queue_from_qid(ctrl, qid) \ #define nvme_auth_queue_from_qid(ctrl, qid) \
...@@ -866,7 +868,7 @@ int nvme_auth_negotiate(struct nvme_ctrl *ctrl, int qid) ...@@ -866,7 +868,7 @@ int nvme_auth_negotiate(struct nvme_ctrl *ctrl, int qid)
chap = &ctrl->dhchap_ctxs[qid]; chap = &ctrl->dhchap_ctxs[qid];
cancel_work_sync(&chap->auth_work); cancel_work_sync(&chap->auth_work);
queue_work(nvme_wq, &chap->auth_work); queue_work(nvme_auth_wq, &chap->auth_work);
return 0; return 0;
} }
EXPORT_SYMBOL_GPL(nvme_auth_negotiate); EXPORT_SYMBOL_GPL(nvme_auth_negotiate);
...@@ -1008,10 +1010,15 @@ EXPORT_SYMBOL_GPL(nvme_auth_free); ...@@ -1008,10 +1010,15 @@ EXPORT_SYMBOL_GPL(nvme_auth_free);
int __init nvme_init_auth(void) int __init nvme_init_auth(void)
{ {
nvme_auth_wq = alloc_workqueue("nvme-auth-wq",
WQ_UNBOUND | WQ_MEM_RECLAIM | WQ_SYSFS, 0);
if (!nvme_auth_wq)
return -ENOMEM;
nvme_chap_buf_cache = kmem_cache_create("nvme-chap-buf-cache", nvme_chap_buf_cache = kmem_cache_create("nvme-chap-buf-cache",
CHAP_BUF_SIZE, 0, SLAB_HWCACHE_ALIGN, NULL); CHAP_BUF_SIZE, 0, SLAB_HWCACHE_ALIGN, NULL);
if (!nvme_chap_buf_cache) if (!nvme_chap_buf_cache)
return -ENOMEM; goto err_destroy_workqueue;
nvme_chap_buf_pool = mempool_create(16, mempool_alloc_slab, nvme_chap_buf_pool = mempool_create(16, mempool_alloc_slab,
mempool_free_slab, nvme_chap_buf_cache); mempool_free_slab, nvme_chap_buf_cache);
...@@ -1021,6 +1028,8 @@ int __init nvme_init_auth(void) ...@@ -1021,6 +1028,8 @@ int __init nvme_init_auth(void)
return 0; return 0;
err_destroy_chap_buf_cache: err_destroy_chap_buf_cache:
kmem_cache_destroy(nvme_chap_buf_cache); kmem_cache_destroy(nvme_chap_buf_cache);
err_destroy_workqueue:
destroy_workqueue(nvme_auth_wq);
return -ENOMEM; return -ENOMEM;
} }
...@@ -1028,4 +1037,5 @@ void __exit nvme_exit_auth(void) ...@@ -1028,4 +1037,5 @@ void __exit nvme_exit_auth(void)
{ {
mempool_destroy(nvme_chap_buf_pool); mempool_destroy(nvme_chap_buf_pool);
kmem_cache_destroy(nvme_chap_buf_cache); kmem_cache_destroy(nvme_chap_buf_cache);
destroy_workqueue(nvme_auth_wq);
} }
...@@ -4892,7 +4892,9 @@ int nvme_alloc_admin_tag_set(struct nvme_ctrl *ctrl, struct blk_mq_tag_set *set, ...@@ -4892,7 +4892,9 @@ int nvme_alloc_admin_tag_set(struct nvme_ctrl *ctrl, struct blk_mq_tag_set *set,
blk_mq_destroy_queue(ctrl->admin_q); blk_mq_destroy_queue(ctrl->admin_q);
blk_put_queue(ctrl->admin_q); blk_put_queue(ctrl->admin_q);
out_free_tagset: out_free_tagset:
blk_mq_free_tag_set(ctrl->admin_tagset); blk_mq_free_tag_set(set);
ctrl->admin_q = NULL;
ctrl->fabrics_q = NULL;
return ret; return ret;
} }
EXPORT_SYMBOL_GPL(nvme_alloc_admin_tag_set); EXPORT_SYMBOL_GPL(nvme_alloc_admin_tag_set);
...@@ -4954,6 +4956,7 @@ int nvme_alloc_io_tag_set(struct nvme_ctrl *ctrl, struct blk_mq_tag_set *set, ...@@ -4954,6 +4956,7 @@ int nvme_alloc_io_tag_set(struct nvme_ctrl *ctrl, struct blk_mq_tag_set *set,
out_free_tag_set: out_free_tag_set:
blk_mq_free_tag_set(set); blk_mq_free_tag_set(set);
ctrl->connect_q = NULL;
return ret; return ret;
} }
EXPORT_SYMBOL_GPL(nvme_alloc_io_tag_set); EXPORT_SYMBOL_GPL(nvme_alloc_io_tag_set);
......
...@@ -1685,8 +1685,10 @@ nvmet_fc_ls_create_association(struct nvmet_fc_tgtport *tgtport, ...@@ -1685,8 +1685,10 @@ nvmet_fc_ls_create_association(struct nvmet_fc_tgtport *tgtport,
else { else {
queue = nvmet_fc_alloc_target_queue(iod->assoc, 0, queue = nvmet_fc_alloc_target_queue(iod->assoc, 0,
be16_to_cpu(rqst->assoc_cmd.sqsize)); be16_to_cpu(rqst->assoc_cmd.sqsize));
if (!queue) if (!queue) {
ret = VERR_QUEUE_ALLOC_FAIL; ret = VERR_QUEUE_ALLOC_FAIL;
nvmet_fc_tgt_a_put(iod->assoc);
}
} }
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment