Commit 3f3299d5 authored by Bart Van Assche's avatar Bart Van Assche Committed by Jens Axboe

block: Rename queue dead flag

QUEUE_FLAG_DEAD is used to indicate that queuing new requests must
stop. After this flag has been set queue draining starts. However,
during the queue draining phase it is still safe to invoke the
queue's request_fn, so QUEUE_FLAG_DYING is a better name for this
flag.

This patch has been generated by running the following command
over the kernel source tree:

git grep -lEw 'blk_queue_dead|QUEUE_FLAG_DEAD' |
    xargs sed -i.tmp -e 's/blk_queue_dead/blk_queue_dying/g'      \
        -e 's/QUEUE_FLAG_DEAD/QUEUE_FLAG_DYING/g';                \
sed -i.tmp -e "s/QUEUE_FLAG_DYING$(printf \\t)*5/QUEUE_FLAG_DYING$(printf \\t)5/g" \
    include/linux/blkdev.h;                                       \
sed -i.tmp -e 's/ DEAD/ DYING/g' -e 's/dead queue/a dying queue/' \
    -e 's/Dead queue/A dying queue/' block/blk-core.c
Signed-off-by: default avatarBart Van Assche <bvanassche@acm.org>
Acked-by: default avatarTejun Heo <tj@kernel.org>
Cc: James Bottomley <JBottomley@Parallels.com>
Cc: Mike Christie <michaelc@cs.wisc.edu>
Cc: Jens Axboe <axboe@kernel.dk>
Cc: Chanho Min <chanho.min@lge.com>
Signed-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent 8fa72d23
...@@ -231,7 +231,7 @@ struct blkcg_gq *blkg_lookup_create(struct blkcg *blkcg, ...@@ -231,7 +231,7 @@ struct blkcg_gq *blkg_lookup_create(struct blkcg *blkcg,
* we shouldn't allow anything to go through for a bypassing queue. * we shouldn't allow anything to go through for a bypassing queue.
*/ */
if (unlikely(blk_queue_bypass(q))) if (unlikely(blk_queue_bypass(q)))
return ERR_PTR(blk_queue_dead(q) ? -EINVAL : -EBUSY); return ERR_PTR(blk_queue_dying(q) ? -EINVAL : -EBUSY);
return __blkg_lookup_create(blkcg, q, NULL); return __blkg_lookup_create(blkcg, q, NULL);
} }
EXPORT_SYMBOL_GPL(blkg_lookup_create); EXPORT_SYMBOL_GPL(blkg_lookup_create);
......
...@@ -473,20 +473,20 @@ EXPORT_SYMBOL_GPL(blk_queue_bypass_end); ...@@ -473,20 +473,20 @@ EXPORT_SYMBOL_GPL(blk_queue_bypass_end);
* blk_cleanup_queue - shutdown a request queue * blk_cleanup_queue - shutdown a request queue
* @q: request queue to shutdown * @q: request queue to shutdown
* *
* Mark @q DEAD, drain all pending requests, destroy and put it. All * Mark @q DYING, drain all pending requests, destroy and put it. All
* future requests will be failed immediately with -ENODEV. * future requests will be failed immediately with -ENODEV.
*/ */
void blk_cleanup_queue(struct request_queue *q) void blk_cleanup_queue(struct request_queue *q)
{ {
spinlock_t *lock = q->queue_lock; spinlock_t *lock = q->queue_lock;
/* mark @q DEAD, no new request or merges will be allowed afterwards */ /* mark @q DYING, no new request or merges will be allowed afterwards */
mutex_lock(&q->sysfs_lock); mutex_lock(&q->sysfs_lock);
queue_flag_set_unlocked(QUEUE_FLAG_DEAD, q); queue_flag_set_unlocked(QUEUE_FLAG_DYING, q);
spin_lock_irq(lock); spin_lock_irq(lock);
/* /*
* Dead queue is permanently in bypass mode till released. Note * A dying queue is permanently in bypass mode till released. Note
* that, unlike blk_queue_bypass_start(), we aren't performing * that, unlike blk_queue_bypass_start(), we aren't performing
* synchronize_rcu() after entering bypass mode to avoid the delay * synchronize_rcu() after entering bypass mode to avoid the delay
* as some drivers create and destroy a lot of queues while * as some drivers create and destroy a lot of queues while
...@@ -499,11 +499,11 @@ void blk_cleanup_queue(struct request_queue *q) ...@@ -499,11 +499,11 @@ void blk_cleanup_queue(struct request_queue *q)
queue_flag_set(QUEUE_FLAG_NOMERGES, q); queue_flag_set(QUEUE_FLAG_NOMERGES, q);
queue_flag_set(QUEUE_FLAG_NOXMERGES, q); queue_flag_set(QUEUE_FLAG_NOXMERGES, q);
queue_flag_set(QUEUE_FLAG_DEAD, q); queue_flag_set(QUEUE_FLAG_DYING, q);
spin_unlock_irq(lock); spin_unlock_irq(lock);
mutex_unlock(&q->sysfs_lock); mutex_unlock(&q->sysfs_lock);
/* drain all requests queued before DEAD marking */ /* drain all requests queued before DYING marking */
blk_drain_queue(q, true); blk_drain_queue(q, true);
/* @q won't process any more request, flush async actions */ /* @q won't process any more request, flush async actions */
...@@ -716,7 +716,7 @@ EXPORT_SYMBOL(blk_init_allocated_queue); ...@@ -716,7 +716,7 @@ EXPORT_SYMBOL(blk_init_allocated_queue);
bool blk_get_queue(struct request_queue *q) bool blk_get_queue(struct request_queue *q)
{ {
if (likely(!blk_queue_dead(q))) { if (likely(!blk_queue_dying(q))) {
__blk_get_queue(q); __blk_get_queue(q);
return true; return true;
} }
...@@ -870,7 +870,7 @@ static struct request *__get_request(struct request_list *rl, int rw_flags, ...@@ -870,7 +870,7 @@ static struct request *__get_request(struct request_list *rl, int rw_flags,
const bool is_sync = rw_is_sync(rw_flags) != 0; const bool is_sync = rw_is_sync(rw_flags) != 0;
int may_queue; int may_queue;
if (unlikely(blk_queue_dead(q))) if (unlikely(blk_queue_dying(q)))
return NULL; return NULL;
may_queue = elv_may_queue(q, rw_flags); may_queue = elv_may_queue(q, rw_flags);
...@@ -1050,7 +1050,7 @@ static struct request *get_request(struct request_queue *q, int rw_flags, ...@@ -1050,7 +1050,7 @@ static struct request *get_request(struct request_queue *q, int rw_flags,
if (rq) if (rq)
return rq; return rq;
if (!(gfp_mask & __GFP_WAIT) || unlikely(blk_queue_dead(q))) { if (!(gfp_mask & __GFP_WAIT) || unlikely(blk_queue_dying(q))) {
blk_put_rl(rl); blk_put_rl(rl);
return NULL; return NULL;
} }
...@@ -1910,7 +1910,7 @@ int blk_insert_cloned_request(struct request_queue *q, struct request *rq) ...@@ -1910,7 +1910,7 @@ int blk_insert_cloned_request(struct request_queue *q, struct request *rq)
return -EIO; return -EIO;
spin_lock_irqsave(q->queue_lock, flags); spin_lock_irqsave(q->queue_lock, flags);
if (unlikely(blk_queue_dead(q))) { if (unlikely(blk_queue_dying(q))) {
spin_unlock_irqrestore(q->queue_lock, flags); spin_unlock_irqrestore(q->queue_lock, flags);
return -ENODEV; return -ENODEV;
} }
...@@ -2885,9 +2885,9 @@ static void queue_unplugged(struct request_queue *q, unsigned int depth, ...@@ -2885,9 +2885,9 @@ static void queue_unplugged(struct request_queue *q, unsigned int depth,
trace_block_unplug(q, depth, !from_schedule); trace_block_unplug(q, depth, !from_schedule);
/* /*
* Don't mess with dead queue. * Don't mess with a dying queue.
*/ */
if (unlikely(blk_queue_dead(q))) { if (unlikely(blk_queue_dying(q))) {
spin_unlock(q->queue_lock); spin_unlock(q->queue_lock);
return; return;
} }
...@@ -2996,7 +2996,7 @@ void blk_flush_plug_list(struct blk_plug *plug, bool from_schedule) ...@@ -2996,7 +2996,7 @@ void blk_flush_plug_list(struct blk_plug *plug, bool from_schedule)
/* /*
* Short-circuit if @q is dead * Short-circuit if @q is dead
*/ */
if (unlikely(blk_queue_dead(q))) { if (unlikely(blk_queue_dying(q))) {
__blk_end_request_all(rq, -ENODEV); __blk_end_request_all(rq, -ENODEV);
continue; continue;
} }
......
...@@ -60,7 +60,7 @@ void blk_execute_rq_nowait(struct request_queue *q, struct gendisk *bd_disk, ...@@ -60,7 +60,7 @@ void blk_execute_rq_nowait(struct request_queue *q, struct gendisk *bd_disk,
spin_lock_irq(q->queue_lock); spin_lock_irq(q->queue_lock);
if (unlikely(blk_queue_dead(q))) { if (unlikely(blk_queue_dying(q))) {
rq->errors = -ENXIO; rq->errors = -ENXIO;
if (rq->end_io) if (rq->end_io)
rq->end_io(rq, rq->errors); rq->end_io(rq, rq->errors);
......
...@@ -466,7 +466,7 @@ queue_attr_show(struct kobject *kobj, struct attribute *attr, char *page) ...@@ -466,7 +466,7 @@ queue_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
if (!entry->show) if (!entry->show)
return -EIO; return -EIO;
mutex_lock(&q->sysfs_lock); mutex_lock(&q->sysfs_lock);
if (blk_queue_dead(q)) { if (blk_queue_dying(q)) {
mutex_unlock(&q->sysfs_lock); mutex_unlock(&q->sysfs_lock);
return -ENOENT; return -ENOENT;
} }
...@@ -488,7 +488,7 @@ queue_attr_store(struct kobject *kobj, struct attribute *attr, ...@@ -488,7 +488,7 @@ queue_attr_store(struct kobject *kobj, struct attribute *attr,
q = container_of(kobj, struct request_queue, kobj); q = container_of(kobj, struct request_queue, kobj);
mutex_lock(&q->sysfs_lock); mutex_lock(&q->sysfs_lock);
if (blk_queue_dead(q)) { if (blk_queue_dying(q)) {
mutex_unlock(&q->sysfs_lock); mutex_unlock(&q->sysfs_lock);
return -ENOENT; return -ENOENT;
} }
......
...@@ -302,7 +302,7 @@ static struct throtl_grp *throtl_lookup_create_tg(struct throtl_data *td, ...@@ -302,7 +302,7 @@ static struct throtl_grp *throtl_lookup_create_tg(struct throtl_data *td,
/* if %NULL and @q is alive, fall back to root_tg */ /* if %NULL and @q is alive, fall back to root_tg */
if (!IS_ERR(blkg)) if (!IS_ERR(blkg))
tg = blkg_to_tg(blkg); tg = blkg_to_tg(blkg);
else if (!blk_queue_dead(q)) else if (!blk_queue_dying(q))
tg = td_root_tg(td); tg = td_root_tg(td);
} }
......
...@@ -96,7 +96,7 @@ static inline struct request *__elv_next_request(struct request_queue *q) ...@@ -96,7 +96,7 @@ static inline struct request *__elv_next_request(struct request_queue *q)
q->flush_queue_delayed = 1; q->flush_queue_delayed = 1;
return NULL; return NULL;
} }
if (unlikely(blk_queue_dead(q)) || if (unlikely(blk_queue_dying(q)) ||
!q->elevator->type->ops.elevator_dispatch_fn(q, 0)) !q->elevator->type->ops.elevator_dispatch_fn(q, 0))
return NULL; return NULL;
} }
......
...@@ -1406,7 +1406,7 @@ static int scsi_lld_busy(struct request_queue *q) ...@@ -1406,7 +1406,7 @@ static int scsi_lld_busy(struct request_queue *q)
struct scsi_device *sdev = q->queuedata; struct scsi_device *sdev = q->queuedata;
struct Scsi_Host *shost; struct Scsi_Host *shost;
if (blk_queue_dead(q)) if (blk_queue_dying(q))
return 0; return 0;
shost = sdev->host; shost = sdev->host;
......
...@@ -437,7 +437,7 @@ struct request_queue { ...@@ -437,7 +437,7 @@ struct request_queue {
#define QUEUE_FLAG_STOPPED 2 /* queue is stopped */ #define QUEUE_FLAG_STOPPED 2 /* queue is stopped */
#define QUEUE_FLAG_SYNCFULL 3 /* read queue has been filled */ #define QUEUE_FLAG_SYNCFULL 3 /* read queue has been filled */
#define QUEUE_FLAG_ASYNCFULL 4 /* write queue has been filled */ #define QUEUE_FLAG_ASYNCFULL 4 /* write queue has been filled */
#define QUEUE_FLAG_DEAD 5 /* queue being torn down */ #define QUEUE_FLAG_DYING 5 /* queue being torn down */
#define QUEUE_FLAG_BYPASS 6 /* act as dumb FIFO queue */ #define QUEUE_FLAG_BYPASS 6 /* act as dumb FIFO queue */
#define QUEUE_FLAG_BIDI 7 /* queue supports bidi requests */ #define QUEUE_FLAG_BIDI 7 /* queue supports bidi requests */
#define QUEUE_FLAG_NOMERGES 8 /* disable merge attempts */ #define QUEUE_FLAG_NOMERGES 8 /* disable merge attempts */
...@@ -521,7 +521,7 @@ static inline void queue_flag_clear(unsigned int flag, struct request_queue *q) ...@@ -521,7 +521,7 @@ static inline void queue_flag_clear(unsigned int flag, struct request_queue *q)
#define blk_queue_tagged(q) test_bit(QUEUE_FLAG_QUEUED, &(q)->queue_flags) #define blk_queue_tagged(q) test_bit(QUEUE_FLAG_QUEUED, &(q)->queue_flags)
#define blk_queue_stopped(q) test_bit(QUEUE_FLAG_STOPPED, &(q)->queue_flags) #define blk_queue_stopped(q) test_bit(QUEUE_FLAG_STOPPED, &(q)->queue_flags)
#define blk_queue_dead(q) test_bit(QUEUE_FLAG_DEAD, &(q)->queue_flags) #define blk_queue_dying(q) test_bit(QUEUE_FLAG_DYING, &(q)->queue_flags)
#define blk_queue_bypass(q) test_bit(QUEUE_FLAG_BYPASS, &(q)->queue_flags) #define blk_queue_bypass(q) test_bit(QUEUE_FLAG_BYPASS, &(q)->queue_flags)
#define blk_queue_nomerges(q) test_bit(QUEUE_FLAG_NOMERGES, &(q)->queue_flags) #define blk_queue_nomerges(q) test_bit(QUEUE_FLAG_NOMERGES, &(q)->queue_flags)
#define blk_queue_noxmerges(q) \ #define blk_queue_noxmerges(q) \
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment