Commit 055f6e18 authored by Ming Lei's avatar Ming Lei Committed by Jens Axboe

block: Make q_usage_counter also track legacy requests

This patch makes it possible to pause request allocation for
the legacy block layer by calling blk_mq_freeze_queue() and
blk_mq_unfreeze_queue().
Signed-off-by: default avatarMing Lei <ming.lei@redhat.com>
[ bvanassche: Combined two patches into one, edited a comment and made sure
  REQ_NOWAIT is handled properly in blk_old_get_request() ]
Signed-off-by: default avatarBart Van Assche <bart.vanassche@wdc.com>
Reviewed-by: default avatarChristoph Hellwig <hch@lst.de>
Reviewed-by: default avatarJohannes Thumshirn <jthumshirn@suse.de>
Reviewed-by: default avatarHannes Reinecke <hare@suse.com>
Tested-by: default avatarMartin Steigerwald <martin@lichtvoll.de>
Tested-by: default avatarOleksandr Natalenko <oleksandr@natalenko.name>
Cc: Ming Lei <ming.lei@redhat.com>
Signed-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent eb619fdb
...@@ -612,6 +612,9 @@ void blk_set_queue_dying(struct request_queue *q) ...@@ -612,6 +612,9 @@ void blk_set_queue_dying(struct request_queue *q)
} }
spin_unlock_irq(q->queue_lock); spin_unlock_irq(q->queue_lock);
} }
/* Make blk_queue_enter() reexamine the DYING flag. */
wake_up_all(&q->mq_freeze_wq);
} }
EXPORT_SYMBOL_GPL(blk_set_queue_dying); EXPORT_SYMBOL_GPL(blk_set_queue_dying);
...@@ -1398,16 +1401,22 @@ static struct request *blk_old_get_request(struct request_queue *q, ...@@ -1398,16 +1401,22 @@ static struct request *blk_old_get_request(struct request_queue *q,
unsigned int op, gfp_t gfp_mask) unsigned int op, gfp_t gfp_mask)
{ {
struct request *rq; struct request *rq;
int ret = 0;
WARN_ON_ONCE(q->mq_ops); WARN_ON_ONCE(q->mq_ops);
/* create ioc upfront */ /* create ioc upfront */
create_io_context(gfp_mask, q->node); create_io_context(gfp_mask, q->node);
ret = blk_queue_enter(q, !(gfp_mask & __GFP_DIRECT_RECLAIM) ||
(op & REQ_NOWAIT));
if (ret)
return ERR_PTR(ret);
spin_lock_irq(q->queue_lock); spin_lock_irq(q->queue_lock);
rq = get_request(q, op, NULL, gfp_mask); rq = get_request(q, op, NULL, gfp_mask);
if (IS_ERR(rq)) { if (IS_ERR(rq)) {
spin_unlock_irq(q->queue_lock); spin_unlock_irq(q->queue_lock);
blk_queue_exit(q);
return rq; return rq;
} }
...@@ -1579,6 +1588,7 @@ void __blk_put_request(struct request_queue *q, struct request *req) ...@@ -1579,6 +1588,7 @@ void __blk_put_request(struct request_queue *q, struct request *req)
blk_free_request(rl, req); blk_free_request(rl, req);
freed_request(rl, sync, rq_flags); freed_request(rl, sync, rq_flags);
blk_put_rl(rl); blk_put_rl(rl);
blk_queue_exit(q);
} }
} }
EXPORT_SYMBOL_GPL(__blk_put_request); EXPORT_SYMBOL_GPL(__blk_put_request);
...@@ -1860,8 +1870,10 @@ static blk_qc_t blk_queue_bio(struct request_queue *q, struct bio *bio) ...@@ -1860,8 +1870,10 @@ static blk_qc_t blk_queue_bio(struct request_queue *q, struct bio *bio)
* Grab a free request. This is might sleep but can not fail. * Grab a free request. This is might sleep but can not fail.
* Returns with the queue unlocked. * Returns with the queue unlocked.
*/ */
blk_queue_enter_live(q);
req = get_request(q, bio->bi_opf, bio, GFP_NOIO); req = get_request(q, bio->bi_opf, bio, GFP_NOIO);
if (IS_ERR(req)) { if (IS_ERR(req)) {
blk_queue_exit(q);
__wbt_done(q->rq_wb, wb_acct); __wbt_done(q->rq_wb, wb_acct);
if (PTR_ERR(req) == -ENOMEM) if (PTR_ERR(req) == -ENOMEM)
bio->bi_status = BLK_STS_RESOURCE; bio->bi_status = BLK_STS_RESOURCE;
......
...@@ -126,7 +126,8 @@ void blk_freeze_queue_start(struct request_queue *q) ...@@ -126,7 +126,8 @@ void blk_freeze_queue_start(struct request_queue *q)
freeze_depth = atomic_inc_return(&q->mq_freeze_depth); freeze_depth = atomic_inc_return(&q->mq_freeze_depth);
if (freeze_depth == 1) { if (freeze_depth == 1) {
percpu_ref_kill(&q->q_usage_counter); percpu_ref_kill(&q->q_usage_counter);
blk_mq_run_hw_queues(q, false); if (q->mq_ops)
blk_mq_run_hw_queues(q, false);
} }
} }
EXPORT_SYMBOL_GPL(blk_freeze_queue_start); EXPORT_SYMBOL_GPL(blk_freeze_queue_start);
...@@ -256,13 +257,6 @@ void blk_mq_wake_waiters(struct request_queue *q) ...@@ -256,13 +257,6 @@ void blk_mq_wake_waiters(struct request_queue *q)
queue_for_each_hw_ctx(q, hctx, i) queue_for_each_hw_ctx(q, hctx, i)
if (blk_mq_hw_queue_mapped(hctx)) if (blk_mq_hw_queue_mapped(hctx))
blk_mq_tag_wakeup_all(hctx->tags, true); blk_mq_tag_wakeup_all(hctx->tags, true);
/*
* If we are called because the queue has now been marked as
* dying, we need to ensure that processes currently waiting on
* the queue are notified as well.
*/
wake_up_all(&q->mq_freeze_wq);
} }
bool blk_mq_can_queue(struct blk_mq_hw_ctx *hctx) bool blk_mq_can_queue(struct blk_mq_hw_ctx *hctx)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment