Commit c7e2d94b authored by Ming Lei's avatar Ming Lei Committed by Jens Axboe

blk-mq: free hw queue's resource in hctx's release handler

Once blk_cleanup_queue() returns, tags shouldn't be used any more,
because blk_mq_free_tag_set() may be called. Commit 45a9c9d9
("blk-mq: Fix a use-after-free") fixes this issue exactly.

However, that commit introduces another issue. Before 45a9c9d9,
we are allowed to run queue during cleaning up queue if the queue's
kobj refcount is held. After that commit, queue can't be run during
queue cleaning up, otherwise oops can be triggered easily because
some fields of hctx are freed by blk_mq_free_queue() in blk_cleanup_queue().

We have invented ways for addressing this kind of issue before, such as:

	8dc765d4 ("SCSI: fix queue cleanup race before queue initialization is done")
	c2856ae2 ("blk-mq: quiesce queue before freeing queue")

But still can't cover all cases, recently James reports another such
kind of issue:

	https://marc.info/?l=linux-scsi&m=155389088124782&w=2

This issue can be quite hard to address by previous way, given
scsi_run_queue() may run requeues for other LUNs.

Fixes the above issue by freeing hctx's resources in its release handler, and this
way is safe becasue tags isn't needed for freeing such hctx resource.

This approach follows typical design pattern wrt. kobject's release handler.

Cc: Dongli Zhang <dongli.zhang@oracle.com>
Cc: James Smart <james.smart@broadcom.com>
Cc: Bart Van Assche <bart.vanassche@wdc.com>
Cc: linux-scsi@vger.kernel.org,
Cc: Martin K . Petersen <martin.petersen@oracle.com>,
Cc: Christoph Hellwig <hch@lst.de>,
Cc: James E . J . Bottomley <jejb@linux.vnet.ibm.com>,
Reported-by: default avatarJames Smart <james.smart@broadcom.com>
Fixes: 45a9c9d9 ("blk-mq: Fix a use-after-free")
Cc: stable@vger.kernel.org
Reviewed-by: default avatarHannes Reinecke <hare@suse.com>
Reviewed-by: default avatarChristoph Hellwig <hch@lst.de>
Tested-by: default avatarJames Smart <james.smart@broadcom.com>
Signed-off-by: default avatarMing Lei <ming.lei@redhat.com>
Signed-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent fbc2a15e
...@@ -375,7 +375,7 @@ void blk_cleanup_queue(struct request_queue *q) ...@@ -375,7 +375,7 @@ void blk_cleanup_queue(struct request_queue *q)
blk_exit_queue(q); blk_exit_queue(q);
if (queue_is_mq(q)) if (queue_is_mq(q))
blk_mq_free_queue(q); blk_mq_exit_queue(q);
percpu_ref_exit(&q->q_usage_counter); percpu_ref_exit(&q->q_usage_counter);
......
...@@ -11,6 +11,7 @@ ...@@ -11,6 +11,7 @@
#include <linux/smp.h> #include <linux/smp.h>
#include <linux/blk-mq.h> #include <linux/blk-mq.h>
#include "blk.h"
#include "blk-mq.h" #include "blk-mq.h"
#include "blk-mq-tag.h" #include "blk-mq-tag.h"
...@@ -34,6 +35,11 @@ static void blk_mq_hw_sysfs_release(struct kobject *kobj) ...@@ -34,6 +35,11 @@ static void blk_mq_hw_sysfs_release(struct kobject *kobj)
{ {
struct blk_mq_hw_ctx *hctx = container_of(kobj, struct blk_mq_hw_ctx, struct blk_mq_hw_ctx *hctx = container_of(kobj, struct blk_mq_hw_ctx,
kobj); kobj);
if (hctx->flags & BLK_MQ_F_BLOCKING)
cleanup_srcu_struct(hctx->srcu);
blk_free_flush_queue(hctx->fq);
sbitmap_free(&hctx->ctx_map);
free_cpumask_var(hctx->cpumask); free_cpumask_var(hctx->cpumask);
kfree(hctx->ctxs); kfree(hctx->ctxs);
kfree(hctx); kfree(hctx);
......
...@@ -2268,12 +2268,7 @@ static void blk_mq_exit_hctx(struct request_queue *q, ...@@ -2268,12 +2268,7 @@ static void blk_mq_exit_hctx(struct request_queue *q,
if (set->ops->exit_hctx) if (set->ops->exit_hctx)
set->ops->exit_hctx(hctx, hctx_idx); set->ops->exit_hctx(hctx, hctx_idx);
if (hctx->flags & BLK_MQ_F_BLOCKING)
cleanup_srcu_struct(hctx->srcu);
blk_mq_remove_cpuhp(hctx); blk_mq_remove_cpuhp(hctx);
blk_free_flush_queue(hctx->fq);
sbitmap_free(&hctx->ctx_map);
} }
static void blk_mq_exit_hw_queues(struct request_queue *q, static void blk_mq_exit_hw_queues(struct request_queue *q,
...@@ -2908,7 +2903,8 @@ struct request_queue *blk_mq_init_allocated_queue(struct blk_mq_tag_set *set, ...@@ -2908,7 +2903,8 @@ struct request_queue *blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
} }
EXPORT_SYMBOL(blk_mq_init_allocated_queue); EXPORT_SYMBOL(blk_mq_init_allocated_queue);
void blk_mq_free_queue(struct request_queue *q) /* tags can _not_ be used after returning from blk_mq_exit_queue */
void blk_mq_exit_queue(struct request_queue *q)
{ {
struct blk_mq_tag_set *set = q->tag_set; struct blk_mq_tag_set *set = q->tag_set;
......
...@@ -37,7 +37,7 @@ struct blk_mq_ctx { ...@@ -37,7 +37,7 @@ struct blk_mq_ctx {
struct kobject kobj; struct kobject kobj;
} ____cacheline_aligned_in_smp; } ____cacheline_aligned_in_smp;
void blk_mq_free_queue(struct request_queue *q); void blk_mq_exit_queue(struct request_queue *q);
int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr); int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr);
void blk_mq_wake_waiters(struct request_queue *q); void blk_mq_wake_waiters(struct request_queue *q);
bool blk_mq_dispatch_rq_list(struct request_queue *, struct list_head *, bool); bool blk_mq_dispatch_rq_list(struct request_queue *, struct list_head *, bool);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment