Commit 2fff8a92 authored by Bart Van Assche's avatar Bart Van Assche Committed by Jens Axboe

block: Check locking assumptions at runtime

Instead of documenting the locking assumptions of most block layer
functions as a comment, use lockdep_assert_held() to verify locking
assumptions at runtime.
Signed-off-by: default avatarBart Van Assche <bart.vanassche@sandisk.com>
Reviewed-by: default avatarChristoph Hellwig <hch@lst.de>
Cc: Hannes Reinecke <hare@suse.com>
Cc: Omar Sandoval <osandov@fb.com>
Cc: Ming Lei <ming.lei@redhat.com>
Signed-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent 9e0c8299
...@@ -236,10 +236,12 @@ static void blk_delay_work(struct work_struct *work) ...@@ -236,10 +236,12 @@ static void blk_delay_work(struct work_struct *work)
* Description: * Description:
* Sometimes queueing needs to be postponed for a little while, to allow * Sometimes queueing needs to be postponed for a little while, to allow
* resources to come back. This function will make sure that queueing is * resources to come back. This function will make sure that queueing is
* restarted around the specified time. Queue lock must be held. * restarted around the specified time.
*/ */
void blk_delay_queue(struct request_queue *q, unsigned long msecs) void blk_delay_queue(struct request_queue *q, unsigned long msecs)
{ {
lockdep_assert_held(q->queue_lock);
if (likely(!blk_queue_dead(q))) if (likely(!blk_queue_dead(q)))
queue_delayed_work(kblockd_workqueue, &q->delay_work, queue_delayed_work(kblockd_workqueue, &q->delay_work,
msecs_to_jiffies(msecs)); msecs_to_jiffies(msecs));
...@@ -257,6 +259,8 @@ EXPORT_SYMBOL(blk_delay_queue); ...@@ -257,6 +259,8 @@ EXPORT_SYMBOL(blk_delay_queue);
**/ **/
void blk_start_queue_async(struct request_queue *q) void blk_start_queue_async(struct request_queue *q)
{ {
lockdep_assert_held(q->queue_lock);
queue_flag_clear(QUEUE_FLAG_STOPPED, q); queue_flag_clear(QUEUE_FLAG_STOPPED, q);
blk_run_queue_async(q); blk_run_queue_async(q);
} }
...@@ -269,10 +273,11 @@ EXPORT_SYMBOL(blk_start_queue_async); ...@@ -269,10 +273,11 @@ EXPORT_SYMBOL(blk_start_queue_async);
* Description: * Description:
* blk_start_queue() will clear the stop flag on the queue, and call * blk_start_queue() will clear the stop flag on the queue, and call
* the request_fn for the queue if it was in a stopped state when * the request_fn for the queue if it was in a stopped state when
* entered. Also see blk_stop_queue(). Queue lock must be held. * entered. Also see blk_stop_queue().
**/ **/
void blk_start_queue(struct request_queue *q) void blk_start_queue(struct request_queue *q)
{ {
lockdep_assert_held(q->queue_lock);
WARN_ON(!irqs_disabled()); WARN_ON(!irqs_disabled());
queue_flag_clear(QUEUE_FLAG_STOPPED, q); queue_flag_clear(QUEUE_FLAG_STOPPED, q);
...@@ -292,10 +297,12 @@ EXPORT_SYMBOL(blk_start_queue); ...@@ -292,10 +297,12 @@ EXPORT_SYMBOL(blk_start_queue);
* or if it simply chooses not to queue more I/O at one point, it can * or if it simply chooses not to queue more I/O at one point, it can
* call this function to prevent the request_fn from being called until * call this function to prevent the request_fn from being called until
* the driver has signalled it's ready to go again. This happens by calling * the driver has signalled it's ready to go again. This happens by calling
* blk_start_queue() to restart queue operations. Queue lock must be held. * blk_start_queue() to restart queue operations.
**/ **/
void blk_stop_queue(struct request_queue *q) void blk_stop_queue(struct request_queue *q)
{ {
lockdep_assert_held(q->queue_lock);
cancel_delayed_work(&q->delay_work); cancel_delayed_work(&q->delay_work);
queue_flag_set(QUEUE_FLAG_STOPPED, q); queue_flag_set(QUEUE_FLAG_STOPPED, q);
} }
...@@ -348,6 +355,8 @@ EXPORT_SYMBOL(blk_sync_queue); ...@@ -348,6 +355,8 @@ EXPORT_SYMBOL(blk_sync_queue);
*/ */
inline void __blk_run_queue_uncond(struct request_queue *q) inline void __blk_run_queue_uncond(struct request_queue *q)
{ {
lockdep_assert_held(q->queue_lock);
if (unlikely(blk_queue_dead(q))) if (unlikely(blk_queue_dead(q)))
return; return;
...@@ -369,11 +378,12 @@ EXPORT_SYMBOL_GPL(__blk_run_queue_uncond); ...@@ -369,11 +378,12 @@ EXPORT_SYMBOL_GPL(__blk_run_queue_uncond);
* @q: The queue to run * @q: The queue to run
* *
* Description: * Description:
* See @blk_run_queue. This variant must be called with the queue lock * See @blk_run_queue.
* held and interrupts disabled.
*/ */
void __blk_run_queue(struct request_queue *q) void __blk_run_queue(struct request_queue *q)
{ {
lockdep_assert_held(q->queue_lock);
if (unlikely(blk_queue_stopped(q))) if (unlikely(blk_queue_stopped(q)))
return; return;
...@@ -387,10 +397,17 @@ EXPORT_SYMBOL(__blk_run_queue); ...@@ -387,10 +397,17 @@ EXPORT_SYMBOL(__blk_run_queue);
* *
* Description: * Description:
* Tells kblockd to perform the equivalent of @blk_run_queue on behalf * Tells kblockd to perform the equivalent of @blk_run_queue on behalf
* of us. The caller must hold the queue lock. * of us.
*
* Note:
* Since it is not allowed to run q->delay_work after blk_cleanup_queue()
* has canceled q->delay_work, callers must hold the queue lock to avoid
* race conditions between blk_cleanup_queue() and blk_run_queue_async().
*/ */
void blk_run_queue_async(struct request_queue *q) void blk_run_queue_async(struct request_queue *q)
{ {
lockdep_assert_held(q->queue_lock);
if (likely(!blk_queue_stopped(q) && !blk_queue_dead(q))) if (likely(!blk_queue_stopped(q) && !blk_queue_dead(q)))
mod_delayed_work(kblockd_workqueue, &q->delay_work, 0); mod_delayed_work(kblockd_workqueue, &q->delay_work, 0);
} }
...@@ -1136,6 +1153,8 @@ static struct request *__get_request(struct request_list *rl, unsigned int op, ...@@ -1136,6 +1153,8 @@ static struct request *__get_request(struct request_list *rl, unsigned int op,
int may_queue; int may_queue;
req_flags_t rq_flags = RQF_ALLOCED; req_flags_t rq_flags = RQF_ALLOCED;
lockdep_assert_held(q->queue_lock);
if (unlikely(blk_queue_dying(q))) if (unlikely(blk_queue_dying(q)))
return ERR_PTR(-ENODEV); return ERR_PTR(-ENODEV);
...@@ -1309,6 +1328,8 @@ static struct request *get_request(struct request_queue *q, unsigned int op, ...@@ -1309,6 +1328,8 @@ static struct request *get_request(struct request_queue *q, unsigned int op,
struct request_list *rl; struct request_list *rl;
struct request *rq; struct request *rq;
lockdep_assert_held(q->queue_lock);
rl = blk_get_rl(q, bio); /* transferred to @rq on success */ rl = blk_get_rl(q, bio); /* transferred to @rq on success */
retry: retry:
rq = __get_request(rl, op, bio, gfp_mask); rq = __get_request(rl, op, bio, gfp_mask);
...@@ -1402,6 +1423,8 @@ EXPORT_SYMBOL(blk_get_request); ...@@ -1402,6 +1423,8 @@ EXPORT_SYMBOL(blk_get_request);
*/ */
void blk_requeue_request(struct request_queue *q, struct request *rq) void blk_requeue_request(struct request_queue *q, struct request *rq)
{ {
lockdep_assert_held(q->queue_lock);
blk_delete_timer(rq); blk_delete_timer(rq);
blk_clear_rq_complete(rq); blk_clear_rq_complete(rq);
trace_block_rq_requeue(q, rq); trace_block_rq_requeue(q, rq);
...@@ -1476,9 +1499,6 @@ static void blk_pm_put_request(struct request *rq) ...@@ -1476,9 +1499,6 @@ static void blk_pm_put_request(struct request *rq)
static inline void blk_pm_put_request(struct request *rq) {} static inline void blk_pm_put_request(struct request *rq) {}
#endif #endif
/*
* queue lock must be held
*/
void __blk_put_request(struct request_queue *q, struct request *req) void __blk_put_request(struct request_queue *q, struct request *req)
{ {
req_flags_t rq_flags = req->rq_flags; req_flags_t rq_flags = req->rq_flags;
...@@ -1491,6 +1511,8 @@ void __blk_put_request(struct request_queue *q, struct request *req) ...@@ -1491,6 +1511,8 @@ void __blk_put_request(struct request_queue *q, struct request *req)
return; return;
} }
lockdep_assert_held(q->queue_lock);
blk_pm_put_request(req); blk_pm_put_request(req);
elv_completed_request(q, req); elv_completed_request(q, req);
...@@ -2327,9 +2349,6 @@ EXPORT_SYMBOL_GPL(blk_insert_cloned_request); ...@@ -2327,9 +2349,6 @@ EXPORT_SYMBOL_GPL(blk_insert_cloned_request);
* *
* Return: * Return:
* The number of bytes to fail. * The number of bytes to fail.
*
* Context:
* queue_lock must be held.
*/ */
unsigned int blk_rq_err_bytes(const struct request *rq) unsigned int blk_rq_err_bytes(const struct request *rq)
{ {
...@@ -2469,15 +2488,14 @@ void blk_account_io_start(struct request *rq, bool new_io) ...@@ -2469,15 +2488,14 @@ void blk_account_io_start(struct request *rq, bool new_io)
* Return: * Return:
* Pointer to the request at the top of @q if available. Null * Pointer to the request at the top of @q if available. Null
* otherwise. * otherwise.
*
* Context:
* queue_lock must be held.
*/ */
struct request *blk_peek_request(struct request_queue *q) struct request *blk_peek_request(struct request_queue *q)
{ {
struct request *rq; struct request *rq;
int ret; int ret;
lockdep_assert_held(q->queue_lock);
while ((rq = __elv_next_request(q)) != NULL) { while ((rq = __elv_next_request(q)) != NULL) {
rq = blk_pm_peek_request(q, rq); rq = blk_pm_peek_request(q, rq);
...@@ -2593,12 +2611,11 @@ void blk_dequeue_request(struct request *rq) ...@@ -2593,12 +2611,11 @@ void blk_dequeue_request(struct request *rq)
* *
* Block internal functions which don't want to start timer should * Block internal functions which don't want to start timer should
* call blk_dequeue_request(). * call blk_dequeue_request().
*
* Context:
* queue_lock must be held.
*/ */
void blk_start_request(struct request *req) void blk_start_request(struct request *req)
{ {
lockdep_assert_held(req->q->queue_lock);
blk_dequeue_request(req); blk_dequeue_request(req);
if (test_bit(QUEUE_FLAG_STATS, &req->q->queue_flags)) { if (test_bit(QUEUE_FLAG_STATS, &req->q->queue_flags)) {
...@@ -2623,14 +2640,13 @@ EXPORT_SYMBOL(blk_start_request); ...@@ -2623,14 +2640,13 @@ EXPORT_SYMBOL(blk_start_request);
* Return: * Return:
* Pointer to the request at the top of @q if available. Null * Pointer to the request at the top of @q if available. Null
* otherwise. * otherwise.
*
* Context:
* queue_lock must be held.
*/ */
struct request *blk_fetch_request(struct request_queue *q) struct request *blk_fetch_request(struct request_queue *q)
{ {
struct request *rq; struct request *rq;
lockdep_assert_held(q->queue_lock);
rq = blk_peek_request(q); rq = blk_peek_request(q);
if (rq) if (rq)
blk_start_request(rq); blk_start_request(rq);
...@@ -2776,13 +2792,12 @@ void blk_unprep_request(struct request *req) ...@@ -2776,13 +2792,12 @@ void blk_unprep_request(struct request *req)
} }
EXPORT_SYMBOL_GPL(blk_unprep_request); EXPORT_SYMBOL_GPL(blk_unprep_request);
/*
* queue lock must be held
*/
void blk_finish_request(struct request *req, blk_status_t error) void blk_finish_request(struct request *req, blk_status_t error)
{ {
struct request_queue *q = req->q; struct request_queue *q = req->q;
lockdep_assert_held(req->q->queue_lock);
if (req->rq_flags & RQF_STATS) if (req->rq_flags & RQF_STATS)
blk_stat_add(req); blk_stat_add(req);
...@@ -2864,6 +2879,8 @@ static bool blk_end_bidi_request(struct request *rq, blk_status_t error, ...@@ -2864,6 +2879,8 @@ static bool blk_end_bidi_request(struct request *rq, blk_status_t error,
static bool __blk_end_bidi_request(struct request *rq, blk_status_t error, static bool __blk_end_bidi_request(struct request *rq, blk_status_t error,
unsigned int nr_bytes, unsigned int bidi_bytes) unsigned int nr_bytes, unsigned int bidi_bytes)
{ {
lockdep_assert_held(rq->q->queue_lock);
if (blk_update_bidi_request(rq, error, nr_bytes, bidi_bytes)) if (blk_update_bidi_request(rq, error, nr_bytes, bidi_bytes))
return true; return true;
...@@ -2930,6 +2947,8 @@ EXPORT_SYMBOL(blk_end_request_all); ...@@ -2930,6 +2947,8 @@ EXPORT_SYMBOL(blk_end_request_all);
bool __blk_end_request(struct request *rq, blk_status_t error, bool __blk_end_request(struct request *rq, blk_status_t error,
unsigned int nr_bytes) unsigned int nr_bytes)
{ {
lockdep_assert_held(rq->q->queue_lock);
return __blk_end_bidi_request(rq, error, nr_bytes, 0); return __blk_end_bidi_request(rq, error, nr_bytes, 0);
} }
EXPORT_SYMBOL(__blk_end_request); EXPORT_SYMBOL(__blk_end_request);
...@@ -2947,6 +2966,8 @@ void __blk_end_request_all(struct request *rq, blk_status_t error) ...@@ -2947,6 +2966,8 @@ void __blk_end_request_all(struct request *rq, blk_status_t error)
bool pending; bool pending;
unsigned int bidi_bytes = 0; unsigned int bidi_bytes = 0;
lockdep_assert_held(rq->q->queue_lock);
if (unlikely(blk_bidi_rq(rq))) if (unlikely(blk_bidi_rq(rq)))
bidi_bytes = blk_rq_bytes(rq->next_rq); bidi_bytes = blk_rq_bytes(rq->next_rq);
...@@ -3211,6 +3232,8 @@ static void queue_unplugged(struct request_queue *q, unsigned int depth, ...@@ -3211,6 +3232,8 @@ static void queue_unplugged(struct request_queue *q, unsigned int depth,
bool from_schedule) bool from_schedule)
__releases(q->queue_lock) __releases(q->queue_lock)
{ {
lockdep_assert_held(q->queue_lock);
trace_block_unplug(q, depth, !from_schedule); trace_block_unplug(q, depth, !from_schedule);
if (from_schedule) if (from_schedule)
......
...@@ -346,6 +346,8 @@ static void flush_data_end_io(struct request *rq, blk_status_t error) ...@@ -346,6 +346,8 @@ static void flush_data_end_io(struct request *rq, blk_status_t error)
struct request_queue *q = rq->q; struct request_queue *q = rq->q;
struct blk_flush_queue *fq = blk_get_flush_queue(q, NULL); struct blk_flush_queue *fq = blk_get_flush_queue(q, NULL);
lockdep_assert_held(q->queue_lock);
/* /*
* Updating q->in_flight[] here for making this tag usable * Updating q->in_flight[] here for making this tag usable
* early. Because in blk_queue_start_tag(), * early. Because in blk_queue_start_tag(),
...@@ -411,9 +413,6 @@ static void mq_flush_data_end_io(struct request *rq, blk_status_t error) ...@@ -411,9 +413,6 @@ static void mq_flush_data_end_io(struct request *rq, blk_status_t error)
* or __blk_mq_run_hw_queue() to dispatch request. * or __blk_mq_run_hw_queue() to dispatch request.
* @rq is being submitted. Analyze what needs to be done and put it on the * @rq is being submitted. Analyze what needs to be done and put it on the
* right queue. * right queue.
*
* CONTEXT:
* spin_lock_irq(q->queue_lock) in !mq case
*/ */
void blk_insert_flush(struct request *rq) void blk_insert_flush(struct request *rq)
{ {
...@@ -422,6 +421,9 @@ void blk_insert_flush(struct request *rq) ...@@ -422,6 +421,9 @@ void blk_insert_flush(struct request *rq)
unsigned int policy = blk_flush_policy(fflags, rq); unsigned int policy = blk_flush_policy(fflags, rq);
struct blk_flush_queue *fq = blk_get_flush_queue(q, rq->mq_ctx); struct blk_flush_queue *fq = blk_get_flush_queue(q, rq->mq_ctx);
if (!q->mq_ops)
lockdep_assert_held(q->queue_lock);
/* /*
* @policy now records what operations need to be done. Adjust * @policy now records what operations need to be done. Adjust
* REQ_PREFLUSH and FUA for the driver. * REQ_PREFLUSH and FUA for the driver.
......
...@@ -648,6 +648,9 @@ static void blk_account_io_merge(struct request *req) ...@@ -648,6 +648,9 @@ static void blk_account_io_merge(struct request *req)
static struct request *attempt_merge(struct request_queue *q, static struct request *attempt_merge(struct request_queue *q,
struct request *req, struct request *next) struct request *req, struct request *next)
{ {
if (!q->mq_ops)
lockdep_assert_held(q->queue_lock);
if (!rq_mergeable(req) || !rq_mergeable(next)) if (!rq_mergeable(req) || !rq_mergeable(next))
return NULL; return NULL;
......
...@@ -258,15 +258,14 @@ EXPORT_SYMBOL(blk_queue_resize_tags); ...@@ -258,15 +258,14 @@ EXPORT_SYMBOL(blk_queue_resize_tags);
* all transfers have been done for a request. It's important to call * all transfers have been done for a request. It's important to call
* this function before end_that_request_last(), as that will put the * this function before end_that_request_last(), as that will put the
* request back on the free list thus corrupting the internal tag list. * request back on the free list thus corrupting the internal tag list.
*
* Notes:
* queue lock must be held.
**/ **/
void blk_queue_end_tag(struct request_queue *q, struct request *rq) void blk_queue_end_tag(struct request_queue *q, struct request *rq)
{ {
struct blk_queue_tag *bqt = q->queue_tags; struct blk_queue_tag *bqt = q->queue_tags;
unsigned tag = rq->tag; /* negative tags invalid */ unsigned tag = rq->tag; /* negative tags invalid */
lockdep_assert_held(q->queue_lock);
BUG_ON(tag >= bqt->real_max_depth); BUG_ON(tag >= bqt->real_max_depth);
list_del_init(&rq->queuelist); list_del_init(&rq->queuelist);
...@@ -307,9 +306,6 @@ EXPORT_SYMBOL(blk_queue_end_tag); ...@@ -307,9 +306,6 @@ EXPORT_SYMBOL(blk_queue_end_tag);
* calling this function. The request will also be removed from * calling this function. The request will also be removed from
* the request queue, so it's the drivers responsibility to readd * the request queue, so it's the drivers responsibility to readd
* it if it should need to be restarted for some reason. * it if it should need to be restarted for some reason.
*
* Notes:
* queue lock must be held.
**/ **/
int blk_queue_start_tag(struct request_queue *q, struct request *rq) int blk_queue_start_tag(struct request_queue *q, struct request *rq)
{ {
...@@ -317,6 +313,8 @@ int blk_queue_start_tag(struct request_queue *q, struct request *rq) ...@@ -317,6 +313,8 @@ int blk_queue_start_tag(struct request_queue *q, struct request *rq)
unsigned max_depth; unsigned max_depth;
int tag; int tag;
lockdep_assert_held(q->queue_lock);
if (unlikely((rq->rq_flags & RQF_QUEUED))) { if (unlikely((rq->rq_flags & RQF_QUEUED))) {
printk(KERN_ERR printk(KERN_ERR
"%s: request %p for device [%s] already tagged %d", "%s: request %p for device [%s] already tagged %d",
...@@ -389,14 +387,13 @@ EXPORT_SYMBOL(blk_queue_start_tag); ...@@ -389,14 +387,13 @@ EXPORT_SYMBOL(blk_queue_start_tag);
* Hardware conditions may dictate a need to stop all pending requests. * Hardware conditions may dictate a need to stop all pending requests.
* In this case, we will safely clear the block side of the tag queue and * In this case, we will safely clear the block side of the tag queue and
* readd all requests to the request queue in the right order. * readd all requests to the request queue in the right order.
*
* Notes:
* queue lock must be held.
**/ **/
void blk_queue_invalidate_tags(struct request_queue *q) void blk_queue_invalidate_tags(struct request_queue *q)
{ {
struct list_head *tmp, *n; struct list_head *tmp, *n;
lockdep_assert_held(q->queue_lock);
list_for_each_safe(tmp, n, &q->tag_busy_list) list_for_each_safe(tmp, n, &q->tag_busy_list)
blk_requeue_request(q, list_entry_rq(tmp)); blk_requeue_request(q, list_entry_rq(tmp));
} }
......
...@@ -189,13 +189,15 @@ unsigned long blk_rq_timeout(unsigned long timeout) ...@@ -189,13 +189,15 @@ unsigned long blk_rq_timeout(unsigned long timeout)
* Notes: * Notes:
* Each request has its own timer, and as it is added to the queue, we * Each request has its own timer, and as it is added to the queue, we
* set up the timer. When the request completes, we cancel the timer. * set up the timer. When the request completes, we cancel the timer.
* Queue lock must be held for the non-mq case, mq case doesn't care.
*/ */
void blk_add_timer(struct request *req) void blk_add_timer(struct request *req)
{ {
struct request_queue *q = req->q; struct request_queue *q = req->q;
unsigned long expiry; unsigned long expiry;
if (!q->mq_ops)
lockdep_assert_held(q->queue_lock);
/* blk-mq has its own handler, so we don't need ->rq_timed_out_fn */ /* blk-mq has its own handler, so we don't need ->rq_timed_out_fn */
if (!q->mq_ops && !q->rq_timed_out_fn) if (!q->mq_ops && !q->rq_timed_out_fn)
return; return;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment