Commit 0136d86b authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'block-6.2-2023-02-03' of git://git.kernel.dk/linux

Pull block fixes from Jens Axboe:
 "A bit bigger than I'd like at this point, but mostly a bunch of little
  fixes. In detail:

   - NVMe pull request via Christoph:
       - Fix a missing queue put in nvmet_fc_ls_create_association
         (Amit Engel)
       - Clear queue pointers on tag_set initialization failure
         (Maurizio Lombardi)
       - Use workqueue dedicated to authentication (Shin'ichiro
         Kawasaki)

   - Fix for an overflow in ublk (Liu)

   - Fix for leaking a queue reference in block cgroups (Ming)

   - Fix for a use-after-free in BFQ (Yu)"

* tag 'block-6.2-2023-02-03' of git://git.kernel.dk/linux:
  blk-cgroup: don't update io stat for root cgroup
  nvme-auth: use workqueue dedicated to authentication
  nvme: clear the request_queue pointers on failure in nvme_alloc_io_tag_set
  nvme: clear the request_queue pointers on failure in nvme_alloc_admin_tag_set
  nvme-fc: fix a missing queue put in nvmet_fc_ls_create_association
  block: Fix the blk_mq_destroy_queue() documentation
  block: ublk: extending queue_size to fix overflow
  block, bfq: fix uaf for bfqq in bic_set_bfqq()
parents 7b753a90 e02bbac7
...@@ -769,8 +769,8 @@ static void __bfq_bic_change_cgroup(struct bfq_data *bfqd, ...@@ -769,8 +769,8 @@ static void __bfq_bic_change_cgroup(struct bfq_data *bfqd,
* request from the old cgroup. * request from the old cgroup.
*/ */
bfq_put_cooperator(sync_bfqq); bfq_put_cooperator(sync_bfqq);
bfq_release_process_ref(bfqd, sync_bfqq);
bic_set_bfqq(bic, NULL, true); bic_set_bfqq(bic, NULL, true);
bfq_release_process_ref(bfqd, sync_bfqq);
} }
} }
} }
......
...@@ -5425,9 +5425,11 @@ static void bfq_check_ioprio_change(struct bfq_io_cq *bic, struct bio *bio) ...@@ -5425,9 +5425,11 @@ static void bfq_check_ioprio_change(struct bfq_io_cq *bic, struct bio *bio)
bfqq = bic_to_bfqq(bic, false); bfqq = bic_to_bfqq(bic, false);
if (bfqq) { if (bfqq) {
bfq_release_process_ref(bfqd, bfqq); struct bfq_queue *old_bfqq = bfqq;
bfqq = bfq_get_queue(bfqd, bio, false, bic, true); bfqq = bfq_get_queue(bfqd, bio, false, bic, true);
bic_set_bfqq(bic, bfqq, false); bic_set_bfqq(bic, bfqq, false);
bfq_release_process_ref(bfqd, old_bfqq);
} }
bfqq = bic_to_bfqq(bic, true); bfqq = bic_to_bfqq(bic, true);
......
...@@ -2001,6 +2001,10 @@ void blk_cgroup_bio_start(struct bio *bio) ...@@ -2001,6 +2001,10 @@ void blk_cgroup_bio_start(struct bio *bio)
struct blkg_iostat_set *bis; struct blkg_iostat_set *bis;
unsigned long flags; unsigned long flags;
/* Root-level stats are sourced from system-wide IO stats */
if (!cgroup_parent(blkcg->css.cgroup))
return;
cpu = get_cpu(); cpu = get_cpu();
bis = per_cpu_ptr(bio->bi_blkg->iostat_cpu, cpu); bis = per_cpu_ptr(bio->bi_blkg->iostat_cpu, cpu);
flags = u64_stats_update_begin_irqsave(&bis->sync); flags = u64_stats_update_begin_irqsave(&bis->sync);
......
...@@ -4069,8 +4069,9 @@ EXPORT_SYMBOL(blk_mq_init_queue); ...@@ -4069,8 +4069,9 @@ EXPORT_SYMBOL(blk_mq_init_queue);
* blk_mq_destroy_queue - shutdown a request queue * blk_mq_destroy_queue - shutdown a request queue
* @q: request queue to shutdown * @q: request queue to shutdown
* *
* This shuts down a request queue allocated by blk_mq_init_queue() and drops * This shuts down a request queue allocated by blk_mq_init_queue(). All future
* the initial reference. All future requests will failed with -ENODEV. * requests will be failed with -ENODEV. The caller is responsible for dropping
* the reference from blk_mq_init_queue() by calling blk_put_queue().
* *
* Context: can sleep * Context: can sleep
*/ */
......
...@@ -137,7 +137,7 @@ struct ublk_device { ...@@ -137,7 +137,7 @@ struct ublk_device {
char *__queues; char *__queues;
unsigned short queue_size; unsigned int queue_size;
struct ublksrv_ctrl_dev_info dev_info; struct ublksrv_ctrl_dev_info dev_info;
struct blk_mq_tag_set tag_set; struct blk_mq_tag_set tag_set;
......
...@@ -45,6 +45,8 @@ struct nvme_dhchap_queue_context { ...@@ -45,6 +45,8 @@ struct nvme_dhchap_queue_context {
int sess_key_len; int sess_key_len;
}; };
struct workqueue_struct *nvme_auth_wq;
#define nvme_auth_flags_from_qid(qid) \ #define nvme_auth_flags_from_qid(qid) \
(qid == 0) ? 0 : BLK_MQ_REQ_NOWAIT | BLK_MQ_REQ_RESERVED (qid == 0) ? 0 : BLK_MQ_REQ_NOWAIT | BLK_MQ_REQ_RESERVED
#define nvme_auth_queue_from_qid(ctrl, qid) \ #define nvme_auth_queue_from_qid(ctrl, qid) \
...@@ -866,7 +868,7 @@ int nvme_auth_negotiate(struct nvme_ctrl *ctrl, int qid) ...@@ -866,7 +868,7 @@ int nvme_auth_negotiate(struct nvme_ctrl *ctrl, int qid)
chap = &ctrl->dhchap_ctxs[qid]; chap = &ctrl->dhchap_ctxs[qid];
cancel_work_sync(&chap->auth_work); cancel_work_sync(&chap->auth_work);
queue_work(nvme_wq, &chap->auth_work); queue_work(nvme_auth_wq, &chap->auth_work);
return 0; return 0;
} }
EXPORT_SYMBOL_GPL(nvme_auth_negotiate); EXPORT_SYMBOL_GPL(nvme_auth_negotiate);
...@@ -1008,10 +1010,15 @@ EXPORT_SYMBOL_GPL(nvme_auth_free); ...@@ -1008,10 +1010,15 @@ EXPORT_SYMBOL_GPL(nvme_auth_free);
int __init nvme_init_auth(void) int __init nvme_init_auth(void)
{ {
nvme_auth_wq = alloc_workqueue("nvme-auth-wq",
WQ_UNBOUND | WQ_MEM_RECLAIM | WQ_SYSFS, 0);
if (!nvme_auth_wq)
return -ENOMEM;
nvme_chap_buf_cache = kmem_cache_create("nvme-chap-buf-cache", nvme_chap_buf_cache = kmem_cache_create("nvme-chap-buf-cache",
CHAP_BUF_SIZE, 0, SLAB_HWCACHE_ALIGN, NULL); CHAP_BUF_SIZE, 0, SLAB_HWCACHE_ALIGN, NULL);
if (!nvme_chap_buf_cache) if (!nvme_chap_buf_cache)
return -ENOMEM; goto err_destroy_workqueue;
nvme_chap_buf_pool = mempool_create(16, mempool_alloc_slab, nvme_chap_buf_pool = mempool_create(16, mempool_alloc_slab,
mempool_free_slab, nvme_chap_buf_cache); mempool_free_slab, nvme_chap_buf_cache);
...@@ -1021,6 +1028,8 @@ int __init nvme_init_auth(void) ...@@ -1021,6 +1028,8 @@ int __init nvme_init_auth(void)
return 0; return 0;
err_destroy_chap_buf_cache: err_destroy_chap_buf_cache:
kmem_cache_destroy(nvme_chap_buf_cache); kmem_cache_destroy(nvme_chap_buf_cache);
err_destroy_workqueue:
destroy_workqueue(nvme_auth_wq);
return -ENOMEM; return -ENOMEM;
} }
...@@ -1028,4 +1037,5 @@ void __exit nvme_exit_auth(void) ...@@ -1028,4 +1037,5 @@ void __exit nvme_exit_auth(void)
{ {
mempool_destroy(nvme_chap_buf_pool); mempool_destroy(nvme_chap_buf_pool);
kmem_cache_destroy(nvme_chap_buf_cache); kmem_cache_destroy(nvme_chap_buf_cache);
destroy_workqueue(nvme_auth_wq);
} }
...@@ -4921,7 +4921,9 @@ int nvme_alloc_admin_tag_set(struct nvme_ctrl *ctrl, struct blk_mq_tag_set *set, ...@@ -4921,7 +4921,9 @@ int nvme_alloc_admin_tag_set(struct nvme_ctrl *ctrl, struct blk_mq_tag_set *set,
blk_mq_destroy_queue(ctrl->admin_q); blk_mq_destroy_queue(ctrl->admin_q);
blk_put_queue(ctrl->admin_q); blk_put_queue(ctrl->admin_q);
out_free_tagset: out_free_tagset:
blk_mq_free_tag_set(ctrl->admin_tagset); blk_mq_free_tag_set(set);
ctrl->admin_q = NULL;
ctrl->fabrics_q = NULL;
return ret; return ret;
} }
EXPORT_SYMBOL_GPL(nvme_alloc_admin_tag_set); EXPORT_SYMBOL_GPL(nvme_alloc_admin_tag_set);
...@@ -4983,6 +4985,7 @@ int nvme_alloc_io_tag_set(struct nvme_ctrl *ctrl, struct blk_mq_tag_set *set, ...@@ -4983,6 +4985,7 @@ int nvme_alloc_io_tag_set(struct nvme_ctrl *ctrl, struct blk_mq_tag_set *set,
out_free_tag_set: out_free_tag_set:
blk_mq_free_tag_set(set); blk_mq_free_tag_set(set);
ctrl->connect_q = NULL;
return ret; return ret;
} }
EXPORT_SYMBOL_GPL(nvme_alloc_io_tag_set); EXPORT_SYMBOL_GPL(nvme_alloc_io_tag_set);
......
...@@ -1685,8 +1685,10 @@ nvmet_fc_ls_create_association(struct nvmet_fc_tgtport *tgtport, ...@@ -1685,8 +1685,10 @@ nvmet_fc_ls_create_association(struct nvmet_fc_tgtport *tgtport,
else { else {
queue = nvmet_fc_alloc_target_queue(iod->assoc, 0, queue = nvmet_fc_alloc_target_queue(iod->assoc, 0,
be16_to_cpu(rqst->assoc_cmd.sqsize)); be16_to_cpu(rqst->assoc_cmd.sqsize));
if (!queue) if (!queue) {
ret = VERR_QUEUE_ALLOC_FAIL; ret = VERR_QUEUE_ALLOC_FAIL;
nvmet_fc_tgt_a_put(iod->assoc);
}
} }
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment