Commit 24ecfbe2 authored by Christoph Hellwig's avatar Christoph Hellwig Committed by Jens Axboe

block: add blk_run_queue_async

Instead of overloading __blk_run_queue to force an offload to kblockd
add a new blk_run_queue_async helper to do it explicitly.  I've kept
the blk_queue_stopped check for now, but I suspect it's not needed
as the check we do when the workqueue items runs should be enough.
Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
Signed-off-by: default avatarJens Axboe <jaxboe@fusionio.com>
parent 4521cc4e
...@@ -204,7 +204,7 @@ static void blk_delay_work(struct work_struct *work) ...@@ -204,7 +204,7 @@ static void blk_delay_work(struct work_struct *work)
q = container_of(work, struct request_queue, delay_work.work); q = container_of(work, struct request_queue, delay_work.work);
spin_lock_irq(q->queue_lock); spin_lock_irq(q->queue_lock);
__blk_run_queue(q, false); __blk_run_queue(q);
spin_unlock_irq(q->queue_lock); spin_unlock_irq(q->queue_lock);
} }
...@@ -239,7 +239,7 @@ void blk_start_queue(struct request_queue *q) ...@@ -239,7 +239,7 @@ void blk_start_queue(struct request_queue *q)
WARN_ON(!irqs_disabled()); WARN_ON(!irqs_disabled());
queue_flag_clear(QUEUE_FLAG_STOPPED, q); queue_flag_clear(QUEUE_FLAG_STOPPED, q);
__blk_run_queue(q, false); __blk_run_queue(q);
} }
EXPORT_SYMBOL(blk_start_queue); EXPORT_SYMBOL(blk_start_queue);
...@@ -296,11 +296,9 @@ EXPORT_SYMBOL(blk_sync_queue); ...@@ -296,11 +296,9 @@ EXPORT_SYMBOL(blk_sync_queue);
* *
* Description: * Description:
* See @blk_run_queue. This variant must be called with the queue lock * See @blk_run_queue. This variant must be called with the queue lock
* held and interrupts disabled. If force_kblockd is true, then it is * held and interrupts disabled.
* safe to call this without holding the queue lock.
*
*/ */
void __blk_run_queue(struct request_queue *q, bool force_kblockd) void __blk_run_queue(struct request_queue *q)
{ {
if (unlikely(blk_queue_stopped(q))) if (unlikely(blk_queue_stopped(q)))
return; return;
...@@ -309,7 +307,7 @@ void __blk_run_queue(struct request_queue *q, bool force_kblockd) ...@@ -309,7 +307,7 @@ void __blk_run_queue(struct request_queue *q, bool force_kblockd)
* Only recurse once to avoid overrunning the stack, let the unplug * Only recurse once to avoid overrunning the stack, let the unplug
* handling reinvoke the handler shortly if we already got there. * handling reinvoke the handler shortly if we already got there.
*/ */
if (!force_kblockd && !queue_flag_test_and_set(QUEUE_FLAG_REENTER, q)) { if (!queue_flag_test_and_set(QUEUE_FLAG_REENTER, q)) {
q->request_fn(q); q->request_fn(q);
queue_flag_clear(QUEUE_FLAG_REENTER, q); queue_flag_clear(QUEUE_FLAG_REENTER, q);
} else } else
...@@ -317,6 +315,20 @@ void __blk_run_queue(struct request_queue *q, bool force_kblockd) ...@@ -317,6 +315,20 @@ void __blk_run_queue(struct request_queue *q, bool force_kblockd)
} }
EXPORT_SYMBOL(__blk_run_queue); EXPORT_SYMBOL(__blk_run_queue);
/**
* blk_run_queue_async - run a single device queue in workqueue context
* @q: The queue to run
*
* Description:
* Tells kblockd to perform the equivalent of @blk_run_queue on behalf
* of us.
*/
void blk_run_queue_async(struct request_queue *q)
{
if (likely(!blk_queue_stopped(q)))
queue_delayed_work(kblockd_workqueue, &q->delay_work, 0);
}
/** /**
* blk_run_queue - run a single device queue * blk_run_queue - run a single device queue
* @q: The queue to run * @q: The queue to run
...@@ -330,7 +342,7 @@ void blk_run_queue(struct request_queue *q) ...@@ -330,7 +342,7 @@ void blk_run_queue(struct request_queue *q)
unsigned long flags; unsigned long flags;
spin_lock_irqsave(q->queue_lock, flags); spin_lock_irqsave(q->queue_lock, flags);
__blk_run_queue(q, false); __blk_run_queue(q);
spin_unlock_irqrestore(q->queue_lock, flags); spin_unlock_irqrestore(q->queue_lock, flags);
} }
EXPORT_SYMBOL(blk_run_queue); EXPORT_SYMBOL(blk_run_queue);
...@@ -979,7 +991,7 @@ void blk_insert_request(struct request_queue *q, struct request *rq, ...@@ -979,7 +991,7 @@ void blk_insert_request(struct request_queue *q, struct request *rq,
blk_queue_end_tag(q, rq); blk_queue_end_tag(q, rq);
add_acct_request(q, rq, where); add_acct_request(q, rq, where);
__blk_run_queue(q, false); __blk_run_queue(q);
spin_unlock_irqrestore(q->queue_lock, flags); spin_unlock_irqrestore(q->queue_lock, flags);
} }
EXPORT_SYMBOL(blk_insert_request); EXPORT_SYMBOL(blk_insert_request);
...@@ -1323,7 +1335,7 @@ static int __make_request(struct request_queue *q, struct bio *bio) ...@@ -1323,7 +1335,7 @@ static int __make_request(struct request_queue *q, struct bio *bio)
} else { } else {
spin_lock_irq(q->queue_lock); spin_lock_irq(q->queue_lock);
add_acct_request(q, req, where); add_acct_request(q, req, where);
__blk_run_queue(q, false); __blk_run_queue(q);
out_unlock: out_unlock:
spin_unlock_irq(q->queue_lock); spin_unlock_irq(q->queue_lock);
} }
...@@ -2684,9 +2696,9 @@ static void queue_unplugged(struct request_queue *q, unsigned int depth, ...@@ -2684,9 +2696,9 @@ static void queue_unplugged(struct request_queue *q, unsigned int depth,
*/ */
if (from_schedule) { if (from_schedule) {
spin_unlock(q->queue_lock); spin_unlock(q->queue_lock);
__blk_run_queue(q, true); blk_run_queue_async(q);
} else { } else {
__blk_run_queue(q, false); __blk_run_queue(q);
spin_unlock(q->queue_lock); spin_unlock(q->queue_lock);
} }
......
...@@ -55,7 +55,7 @@ void blk_execute_rq_nowait(struct request_queue *q, struct gendisk *bd_disk, ...@@ -55,7 +55,7 @@ void blk_execute_rq_nowait(struct request_queue *q, struct gendisk *bd_disk,
WARN_ON(irqs_disabled()); WARN_ON(irqs_disabled());
spin_lock_irq(q->queue_lock); spin_lock_irq(q->queue_lock);
__elv_add_request(q, rq, where); __elv_add_request(q, rq, where);
__blk_run_queue(q, false); __blk_run_queue(q);
/* the queue is stopped so it won't be plugged+unplugged */ /* the queue is stopped so it won't be plugged+unplugged */
if (rq->cmd_type == REQ_TYPE_PM_RESUME) if (rq->cmd_type == REQ_TYPE_PM_RESUME)
q->request_fn(q); q->request_fn(q);
......
...@@ -218,7 +218,7 @@ static void flush_end_io(struct request *flush_rq, int error) ...@@ -218,7 +218,7 @@ static void flush_end_io(struct request *flush_rq, int error)
* request_fn may confuse the driver. Always use kblockd. * request_fn may confuse the driver. Always use kblockd.
*/ */
if (queued) if (queued)
__blk_run_queue(q, true); blk_run_queue_async(q);
} }
/** /**
...@@ -274,7 +274,7 @@ static void flush_data_end_io(struct request *rq, int error) ...@@ -274,7 +274,7 @@ static void flush_data_end_io(struct request *rq, int error)
* the comment in flush_end_io(). * the comment in flush_end_io().
*/ */
if (blk_flush_complete_seq(rq, REQ_FSEQ_DATA, error)) if (blk_flush_complete_seq(rq, REQ_FSEQ_DATA, error))
__blk_run_queue(q, true); blk_run_queue_async(q);
} }
/** /**
......
...@@ -22,6 +22,7 @@ void blk_rq_timed_out_timer(unsigned long data); ...@@ -22,6 +22,7 @@ void blk_rq_timed_out_timer(unsigned long data);
void blk_delete_timer(struct request *); void blk_delete_timer(struct request *);
void blk_add_timer(struct request *); void blk_add_timer(struct request *);
void __generic_unplug_device(struct request_queue *); void __generic_unplug_device(struct request_queue *);
void blk_run_queue_async(struct request_queue *q);
/* /*
* Internal atomic flags for request handling * Internal atomic flags for request handling
......
...@@ -3368,7 +3368,7 @@ cfq_rq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq, ...@@ -3368,7 +3368,7 @@ cfq_rq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq,
cfqd->busy_queues > 1) { cfqd->busy_queues > 1) {
cfq_del_timer(cfqd, cfqq); cfq_del_timer(cfqd, cfqq);
cfq_clear_cfqq_wait_request(cfqq); cfq_clear_cfqq_wait_request(cfqq);
__blk_run_queue(cfqd->queue, false); __blk_run_queue(cfqd->queue);
} else { } else {
cfq_blkiocg_update_idle_time_stats( cfq_blkiocg_update_idle_time_stats(
&cfqq->cfqg->blkg); &cfqq->cfqg->blkg);
...@@ -3383,7 +3383,7 @@ cfq_rq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq, ...@@ -3383,7 +3383,7 @@ cfq_rq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq,
* this new queue is RT and the current one is BE * this new queue is RT and the current one is BE
*/ */
cfq_preempt_queue(cfqd, cfqq); cfq_preempt_queue(cfqd, cfqq);
__blk_run_queue(cfqd->queue, false); __blk_run_queue(cfqd->queue);
} }
} }
...@@ -3743,7 +3743,7 @@ static void cfq_kick_queue(struct work_struct *work) ...@@ -3743,7 +3743,7 @@ static void cfq_kick_queue(struct work_struct *work)
struct request_queue *q = cfqd->queue; struct request_queue *q = cfqd->queue;
spin_lock_irq(q->queue_lock); spin_lock_irq(q->queue_lock);
__blk_run_queue(cfqd->queue, false); __blk_run_queue(cfqd->queue);
spin_unlock_irq(q->queue_lock); spin_unlock_irq(q->queue_lock);
} }
......
...@@ -642,7 +642,7 @@ void elv_quiesce_start(struct request_queue *q) ...@@ -642,7 +642,7 @@ void elv_quiesce_start(struct request_queue *q)
*/ */
elv_drain_elevator(q); elv_drain_elevator(q);
while (q->rq.elvpriv) { while (q->rq.elvpriv) {
__blk_run_queue(q, false); __blk_run_queue(q);
spin_unlock_irq(q->queue_lock); spin_unlock_irq(q->queue_lock);
msleep(10); msleep(10);
spin_lock_irq(q->queue_lock); spin_lock_irq(q->queue_lock);
...@@ -695,7 +695,7 @@ void __elv_add_request(struct request_queue *q, struct request *rq, int where) ...@@ -695,7 +695,7 @@ void __elv_add_request(struct request_queue *q, struct request *rq, int where)
* with anything. There's no point in delaying queue * with anything. There's no point in delaying queue
* processing. * processing.
*/ */
__blk_run_queue(q, false); __blk_run_queue(q);
break; break;
case ELEVATOR_INSERT_SORT_MERGE: case ELEVATOR_INSERT_SORT_MERGE:
......
...@@ -443,7 +443,7 @@ static void scsi_run_queue(struct request_queue *q) ...@@ -443,7 +443,7 @@ static void scsi_run_queue(struct request_queue *q)
&sdev->request_queue->queue_flags); &sdev->request_queue->queue_flags);
if (flagset) if (flagset)
queue_flag_set(QUEUE_FLAG_REENTER, sdev->request_queue); queue_flag_set(QUEUE_FLAG_REENTER, sdev->request_queue);
__blk_run_queue(sdev->request_queue, false); __blk_run_queue(sdev->request_queue);
if (flagset) if (flagset)
queue_flag_clear(QUEUE_FLAG_REENTER, sdev->request_queue); queue_flag_clear(QUEUE_FLAG_REENTER, sdev->request_queue);
spin_unlock(sdev->request_queue->queue_lock); spin_unlock(sdev->request_queue->queue_lock);
......
...@@ -3829,7 +3829,7 @@ fc_bsg_goose_queue(struct fc_rport *rport) ...@@ -3829,7 +3829,7 @@ fc_bsg_goose_queue(struct fc_rport *rport)
!test_bit(QUEUE_FLAG_REENTER, &rport->rqst_q->queue_flags); !test_bit(QUEUE_FLAG_REENTER, &rport->rqst_q->queue_flags);
if (flagset) if (flagset)
queue_flag_set(QUEUE_FLAG_REENTER, rport->rqst_q); queue_flag_set(QUEUE_FLAG_REENTER, rport->rqst_q);
__blk_run_queue(rport->rqst_q, false); __blk_run_queue(rport->rqst_q);
if (flagset) if (flagset)
queue_flag_clear(QUEUE_FLAG_REENTER, rport->rqst_q); queue_flag_clear(QUEUE_FLAG_REENTER, rport->rqst_q);
spin_unlock_irqrestore(rport->rqst_q->queue_lock, flags); spin_unlock_irqrestore(rport->rqst_q->queue_lock, flags);
......
...@@ -697,7 +697,7 @@ extern void blk_start_queue(struct request_queue *q); ...@@ -697,7 +697,7 @@ extern void blk_start_queue(struct request_queue *q);
extern void blk_stop_queue(struct request_queue *q); extern void blk_stop_queue(struct request_queue *q);
extern void blk_sync_queue(struct request_queue *q); extern void blk_sync_queue(struct request_queue *q);
extern void __blk_stop_queue(struct request_queue *q); extern void __blk_stop_queue(struct request_queue *q);
extern void __blk_run_queue(struct request_queue *q, bool force_kblockd); extern void __blk_run_queue(struct request_queue *q);
extern void blk_run_queue(struct request_queue *); extern void blk_run_queue(struct request_queue *);
extern int blk_rq_map_user(struct request_queue *, struct request *, extern int blk_rq_map_user(struct request_queue *, struct request *,
struct rq_map_data *, void __user *, unsigned long, struct rq_map_data *, void __user *, unsigned long,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment