Commit 002c0aef authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'block-5.14-2021-08-20' of git://git.kernel.dk/linux-block

Pull block fixes from Jens Axboe:
 "Three fixes from Ming Lei that should go into 5.14:

   - Fix for a kernel panic when iterating over tags for some cases
     where a flush request is present, a regression in this cycle.

   - Request timeout fix

   - Fix flush request checking"

* tag 'block-5.14-2021-08-20' of git://git.kernel.dk/linux-block:
  blk-mq: fix is_flush_rq
  blk-mq: fix kernel panic during iterating over flush request
  blk-mq: don't grab rq's refcount in blk_mq_check_expired()
parents 1e6907d5 a9ed27a7
...@@ -122,7 +122,6 @@ void blk_rq_init(struct request_queue *q, struct request *rq) ...@@ -122,7 +122,6 @@ void blk_rq_init(struct request_queue *q, struct request *rq)
rq->internal_tag = BLK_MQ_NO_TAG; rq->internal_tag = BLK_MQ_NO_TAG;
rq->start_time_ns = ktime_get_ns(); rq->start_time_ns = ktime_get_ns();
rq->part = NULL; rq->part = NULL;
refcount_set(&rq->ref, 1);
blk_crypto_rq_set_defaults(rq); blk_crypto_rq_set_defaults(rq);
} }
EXPORT_SYMBOL(blk_rq_init); EXPORT_SYMBOL(blk_rq_init);
......
...@@ -262,6 +262,11 @@ static void flush_end_io(struct request *flush_rq, blk_status_t error) ...@@ -262,6 +262,11 @@ static void flush_end_io(struct request *flush_rq, blk_status_t error)
spin_unlock_irqrestore(&fq->mq_flush_lock, flags); spin_unlock_irqrestore(&fq->mq_flush_lock, flags);
} }
bool is_flush_rq(struct request *rq)
{
return rq->end_io == flush_end_io;
}
/** /**
* blk_kick_flush - consider issuing flush request * blk_kick_flush - consider issuing flush request
* @q: request_queue being kicked * @q: request_queue being kicked
...@@ -329,6 +334,14 @@ static void blk_kick_flush(struct request_queue *q, struct blk_flush_queue *fq, ...@@ -329,6 +334,14 @@ static void blk_kick_flush(struct request_queue *q, struct blk_flush_queue *fq,
flush_rq->rq_flags |= RQF_FLUSH_SEQ; flush_rq->rq_flags |= RQF_FLUSH_SEQ;
flush_rq->rq_disk = first_rq->rq_disk; flush_rq->rq_disk = first_rq->rq_disk;
flush_rq->end_io = flush_end_io; flush_rq->end_io = flush_end_io;
/*
* Order WRITE ->end_io and WRITE rq->ref, and its pair is the one
* implied in refcount_inc_not_zero() called from
* blk_mq_find_and_get_req(), which orders WRITE/READ flush_rq->ref
* and READ flush_rq->end_io
*/
smp_wmb();
refcount_set(&flush_rq->ref, 1);
blk_flush_queue_rq(flush_rq, false); blk_flush_queue_rq(flush_rq, false);
} }
......
...@@ -911,7 +911,7 @@ static bool blk_mq_req_expired(struct request *rq, unsigned long *next) ...@@ -911,7 +911,7 @@ static bool blk_mq_req_expired(struct request *rq, unsigned long *next)
void blk_mq_put_rq_ref(struct request *rq) void blk_mq_put_rq_ref(struct request *rq)
{ {
if (is_flush_rq(rq, rq->mq_hctx)) if (is_flush_rq(rq))
rq->end_io(rq, 0); rq->end_io(rq, 0);
else if (refcount_dec_and_test(&rq->ref)) else if (refcount_dec_and_test(&rq->ref))
__blk_mq_free_request(rq); __blk_mq_free_request(rq);
...@@ -923,34 +923,14 @@ static bool blk_mq_check_expired(struct blk_mq_hw_ctx *hctx, ...@@ -923,34 +923,14 @@ static bool blk_mq_check_expired(struct blk_mq_hw_ctx *hctx,
unsigned long *next = priv; unsigned long *next = priv;
/* /*
* Just do a quick check if it is expired before locking the request in * blk_mq_queue_tag_busy_iter() has locked the request, so it cannot
* so we're not unnecessarilly synchronizing across CPUs. * be reallocated underneath the timeout handler's processing, then
*/ * the expire check is reliable. If the request is not expired, then
if (!blk_mq_req_expired(rq, next)) * it was completed and reallocated as a new request after returning
return true; * from blk_mq_check_expired().
/*
* We have reason to believe the request may be expired. Take a
* reference on the request to lock this request lifetime into its
* currently allocated context to prevent it from being reallocated in
* the event the completion by-passes this timeout handler.
*
* If the reference was already released, then the driver beat the
* timeout handler to posting a natural completion.
*/
if (!refcount_inc_not_zero(&rq->ref))
return true;
/*
* The request is now locked and cannot be reallocated underneath the
* timeout handler's processing. Re-verify this exact request is truly
* expired; if it is not expired, then the request was completed and
* reallocated as a new request.
*/ */
if (blk_mq_req_expired(rq, next)) if (blk_mq_req_expired(rq, next))
blk_mq_rq_timed_out(rq, reserved); blk_mq_rq_timed_out(rq, reserved);
blk_mq_put_rq_ref(rq);
return true; return true;
} }
......
...@@ -44,11 +44,7 @@ static inline void __blk_get_queue(struct request_queue *q) ...@@ -44,11 +44,7 @@ static inline void __blk_get_queue(struct request_queue *q)
kobject_get(&q->kobj); kobject_get(&q->kobj);
} }
static inline bool bool is_flush_rq(struct request *req);
is_flush_rq(struct request *req, struct blk_mq_hw_ctx *hctx)
{
return hctx->fq->flush_rq == req;
}
struct blk_flush_queue *blk_alloc_flush_queue(int node, int cmd_size, struct blk_flush_queue *blk_alloc_flush_queue(int node, int cmd_size,
gfp_t flags); gfp_t flags);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment