Commit 634f9e46 authored by Tejun Heo's avatar Tejun Heo Committed by Jens Axboe

blk-mq: remove REQ_ATOM_COMPLETE usages from blk-mq

After the recent updates to use generation number and state based
synchronization, blk-mq no longer depends on REQ_ATOM_COMPLETE except
to avoid firing the same timeout multiple times.

Remove all REQ_ATOM_COMPLETE usages and use a new rq_flags flag
RQF_MQ_TIMEOUT_EXPIRED to avoid firing the same timeout multiple
times.  This removes atomic bitops from hot paths too.

v2: Removed blk_clear_rq_complete() from blk_mq_rq_timed_out().

v3: Added RQF_MQ_TIMEOUT_EXPIRED flag.
Signed-off-by: default avatarTejun Heo <tj@kernel.org>
Cc: "jianchao.wang" <jianchao.w.wang@oracle.com>
Signed-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent 358f70da
...@@ -634,8 +634,7 @@ void blk_mq_complete_request(struct request *rq) ...@@ -634,8 +634,7 @@ void blk_mq_complete_request(struct request *rq)
* hctx_lock() covers both issue and completion paths. * hctx_lock() covers both issue and completion paths.
*/ */
hctx_lock(hctx, &srcu_idx); hctx_lock(hctx, &srcu_idx);
if (blk_mq_rq_aborted_gstate(rq) != rq->gstate && if (blk_mq_rq_aborted_gstate(rq) != rq->gstate)
!blk_mark_rq_complete(rq))
__blk_mq_complete_request(rq); __blk_mq_complete_request(rq);
hctx_unlock(hctx, srcu_idx); hctx_unlock(hctx, srcu_idx);
} }
...@@ -685,8 +684,6 @@ void blk_mq_start_request(struct request *rq) ...@@ -685,8 +684,6 @@ void blk_mq_start_request(struct request *rq)
preempt_enable(); preempt_enable();
set_bit(REQ_ATOM_STARTED, &rq->atomic_flags); set_bit(REQ_ATOM_STARTED, &rq->atomic_flags);
if (test_bit(REQ_ATOM_COMPLETE, &rq->atomic_flags))
clear_bit(REQ_ATOM_COMPLETE, &rq->atomic_flags);
if (q->dma_drain_size && blk_rq_bytes(rq)) { if (q->dma_drain_size && blk_rq_bytes(rq)) {
/* /*
...@@ -837,6 +834,8 @@ static void blk_mq_rq_timed_out(struct request *req, bool reserved) ...@@ -837,6 +834,8 @@ static void blk_mq_rq_timed_out(struct request *req, bool reserved)
if (!test_bit(REQ_ATOM_STARTED, &req->atomic_flags)) if (!test_bit(REQ_ATOM_STARTED, &req->atomic_flags))
return; return;
req->rq_flags |= RQF_MQ_TIMEOUT_EXPIRED;
if (ops->timeout) if (ops->timeout)
ret = ops->timeout(req, reserved); ret = ops->timeout(req, reserved);
...@@ -852,7 +851,6 @@ static void blk_mq_rq_timed_out(struct request *req, bool reserved) ...@@ -852,7 +851,6 @@ static void blk_mq_rq_timed_out(struct request *req, bool reserved)
*/ */
blk_mq_rq_update_aborted_gstate(req, 0); blk_mq_rq_update_aborted_gstate(req, 0);
blk_add_timer(req); blk_add_timer(req);
blk_clear_rq_complete(req);
break; break;
case BLK_EH_NOT_HANDLED: case BLK_EH_NOT_HANDLED:
break; break;
...@@ -871,7 +869,8 @@ static void blk_mq_check_expired(struct blk_mq_hw_ctx *hctx, ...@@ -871,7 +869,8 @@ static void blk_mq_check_expired(struct blk_mq_hw_ctx *hctx,
might_sleep(); might_sleep();
if (!test_bit(REQ_ATOM_STARTED, &rq->atomic_flags)) if ((rq->rq_flags & RQF_MQ_TIMEOUT_EXPIRED) ||
!test_bit(REQ_ATOM_STARTED, &rq->atomic_flags))
return; return;
/* read coherent snapshots of @rq->state_gen and @rq->deadline */ /* read coherent snapshots of @rq->state_gen and @rq->deadline */
...@@ -906,8 +905,8 @@ static void blk_mq_terminate_expired(struct blk_mq_hw_ctx *hctx, ...@@ -906,8 +905,8 @@ static void blk_mq_terminate_expired(struct blk_mq_hw_ctx *hctx,
* now guaranteed to see @rq->aborted_gstate and yield. If * now guaranteed to see @rq->aborted_gstate and yield. If
* @rq->aborted_gstate still matches @rq->gstate, @rq is ours. * @rq->aborted_gstate still matches @rq->gstate, @rq is ours.
*/ */
if (READ_ONCE(rq->gstate) == rq->aborted_gstate && if (!(rq->rq_flags & RQF_MQ_TIMEOUT_EXPIRED) &&
!blk_mark_rq_complete(rq)) READ_ONCE(rq->gstate) == rq->aborted_gstate)
blk_mq_rq_timed_out(rq, reserved); blk_mq_rq_timed_out(rq, reserved);
} }
......
...@@ -214,6 +214,7 @@ void blk_add_timer(struct request *req) ...@@ -214,6 +214,7 @@ void blk_add_timer(struct request *req)
req->timeout = q->rq_timeout; req->timeout = q->rq_timeout;
req->deadline = jiffies + req->timeout; req->deadline = jiffies + req->timeout;
req->rq_flags &= ~RQF_MQ_TIMEOUT_EXPIRED;
/* /*
* Only the non-mq case needs to add the request to a protected list. * Only the non-mq case needs to add the request to a protected list.
......
...@@ -125,6 +125,8 @@ typedef __u32 __bitwise req_flags_t; ...@@ -125,6 +125,8 @@ typedef __u32 __bitwise req_flags_t;
#define RQF_SPECIAL_PAYLOAD ((__force req_flags_t)(1 << 18)) #define RQF_SPECIAL_PAYLOAD ((__force req_flags_t)(1 << 18))
/* The per-zone write lock is held for this request */ /* The per-zone write lock is held for this request */
#define RQF_ZONE_WRITE_LOCKED ((__force req_flags_t)(1 << 19)) #define RQF_ZONE_WRITE_LOCKED ((__force req_flags_t)(1 << 19))
/* timeout is expired */
#define RQF_MQ_TIMEOUT_EXPIRED ((__force req_flags_t)(1 << 20))
/* flags that prevent us from merging requests: */ /* flags that prevent us from merging requests: */
#define RQF_NOMERGE_FLAGS \ #define RQF_NOMERGE_FLAGS \
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment