Commit 7f556a44 authored by Jianchao Wang's avatar Jianchao Wang Committed by Jens Axboe

blk-mq: refactor the code of issue request directly

Merge blk_mq_try_issue_directly and __blk_mq_try_issue_directly
into one interface to unify the interfaces to issue requests
directly. The merged interface takes over the requests totally,
it could insert, end or do nothing based on the return value of
.queue_rq and 'bypass' parameter. Then caller needn't any other
handling any more and then code could be cleaned up.

And also the commit c616cbee ( blk-mq: punt failed direct issue
to dispatch list ) always inserts requests to hctx dispatch list
whenever get a BLK_STS_RESOURCE or BLK_STS_DEV_RESOURCE, this is
overkill and will harm the merging. We just need to do that for
the requests that has been through .queue_rq. This patch also
could fix this.
Signed-off-by: default avatarJianchao Wang <jianchao.w.wang@oracle.com>
Signed-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent 4c9770c9
...@@ -1792,78 +1792,83 @@ static blk_status_t __blk_mq_issue_directly(struct blk_mq_hw_ctx *hctx, ...@@ -1792,78 +1792,83 @@ static blk_status_t __blk_mq_issue_directly(struct blk_mq_hw_ctx *hctx,
return ret; return ret;
} }
static blk_status_t __blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx, static blk_status_t blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx,
struct request *rq, struct request *rq,
blk_qc_t *cookie, blk_qc_t *cookie,
bool bypass_insert, bool last) bool bypass, bool last)
{ {
struct request_queue *q = rq->q; struct request_queue *q = rq->q;
bool run_queue = true; bool run_queue = true;
blk_status_t ret = BLK_STS_RESOURCE;
int srcu_idx;
bool force = false;
hctx_lock(hctx, &srcu_idx);
/* /*
* RCU or SRCU read lock is needed before checking quiesced flag. * hctx_lock is needed before checking quiesced flag.
* *
* When queue is stopped or quiesced, ignore 'bypass_insert' from * When queue is stopped or quiesced, ignore 'bypass', insert
* blk_mq_request_issue_directly(), and return BLK_STS_OK to caller, * and return BLK_STS_OK to caller, and avoid driver to try to
* and avoid driver to try to dispatch again. * dispatch again.
*/ */
if (blk_mq_hctx_stopped(hctx) || blk_queue_quiesced(q)) { if (unlikely(blk_mq_hctx_stopped(hctx) || blk_queue_quiesced(q))) {
run_queue = false; run_queue = false;
bypass_insert = false; bypass = false;
goto insert; goto out_unlock;
} }
if (q->elevator && !bypass_insert) if (unlikely(q->elevator && !bypass))
goto insert; goto out_unlock;
if (!blk_mq_get_dispatch_budget(hctx)) if (!blk_mq_get_dispatch_budget(hctx))
goto insert; goto out_unlock;
if (!blk_mq_get_driver_tag(rq)) { if (!blk_mq_get_driver_tag(rq)) {
blk_mq_put_dispatch_budget(hctx); blk_mq_put_dispatch_budget(hctx);
goto insert; goto out_unlock;
} }
return __blk_mq_issue_directly(hctx, rq, cookie, last); /*
insert: * Always add a request that has been through
if (bypass_insert) *.queue_rq() to the hardware dispatch list.
return BLK_STS_RESOURCE; */
force = true;
ret = __blk_mq_issue_directly(hctx, rq, cookie, last);
out_unlock:
hctx_unlock(hctx, srcu_idx);
switch (ret) {
case BLK_STS_OK:
break;
case BLK_STS_DEV_RESOURCE:
case BLK_STS_RESOURCE:
if (force) {
blk_mq_request_bypass_insert(rq, run_queue); blk_mq_request_bypass_insert(rq, run_queue);
return BLK_STS_OK; /*
} * We have to return BLK_STS_OK for the DM
* to avoid livelock. Otherwise, we return
static void blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx, * the real result to indicate whether the
struct request *rq, blk_qc_t *cookie) * request is direct-issued successfully.
{ */
blk_status_t ret; ret = bypass ? BLK_STS_OK : ret;
int srcu_idx; } else if (!bypass) {
blk_mq_sched_insert_request(rq, false,
might_sleep_if(hctx->flags & BLK_MQ_F_BLOCKING); run_queue, false);
}
hctx_lock(hctx, &srcu_idx); break;
default:
ret = __blk_mq_try_issue_directly(hctx, rq, cookie, false, true); if (!bypass)
if (ret == BLK_STS_RESOURCE || ret == BLK_STS_DEV_RESOURCE)
blk_mq_request_bypass_insert(rq, true);
else if (ret != BLK_STS_OK)
blk_mq_end_request(rq, ret); blk_mq_end_request(rq, ret);
break;
}
hctx_unlock(hctx, srcu_idx); return ret;
} }
blk_status_t blk_mq_request_issue_directly(struct request *rq, bool last) blk_status_t blk_mq_request_issue_directly(struct request *rq, bool last)
{ {
blk_status_t ret; blk_qc_t unused;
int srcu_idx;
blk_qc_t unused_cookie;
struct blk_mq_hw_ctx *hctx = rq->mq_hctx;
hctx_lock(hctx, &srcu_idx); return blk_mq_try_issue_directly(rq->mq_hctx, rq, &unused, true, last);
ret = __blk_mq_try_issue_directly(hctx, rq, &unused_cookie, true, last);
hctx_unlock(hctx, srcu_idx);
return ret;
} }
void blk_mq_try_issue_list_directly(struct blk_mq_hw_ctx *hctx, void blk_mq_try_issue_list_directly(struct blk_mq_hw_ctx *hctx,
...@@ -2004,13 +2009,13 @@ static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio) ...@@ -2004,13 +2009,13 @@ static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
if (same_queue_rq) { if (same_queue_rq) {
data.hctx = same_queue_rq->mq_hctx; data.hctx = same_queue_rq->mq_hctx;
blk_mq_try_issue_directly(data.hctx, same_queue_rq, blk_mq_try_issue_directly(data.hctx, same_queue_rq,
&cookie); &cookie, false, true);
} }
} else if ((q->nr_hw_queues > 1 && is_sync) || (!q->elevator && } else if ((q->nr_hw_queues > 1 && is_sync) || (!q->elevator &&
!data.hctx->dispatch_busy)) { !data.hctx->dispatch_busy)) {
blk_mq_put_ctx(data.ctx); blk_mq_put_ctx(data.ctx);
blk_mq_bio_to_request(rq, bio); blk_mq_bio_to_request(rq, bio);
blk_mq_try_issue_directly(data.hctx, rq, &cookie); blk_mq_try_issue_directly(data.hctx, rq, &cookie, false, true);
} else { } else {
blk_mq_put_ctx(data.ctx); blk_mq_put_ctx(data.ctx);
blk_mq_bio_to_request(rq, bio); blk_mq_bio_to_request(rq, bio);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment