Commit 50864670 authored by Thomas Gleixner's avatar Thomas Gleixner Committed by Jens Axboe

block: Shorten interrupt disabled regions

Commit 9c40cef2 ("sched: Move blk_schedule_flush_plug() out of
__schedule()") moved the blk_schedule_flush_plug() call out of the
interrupt/preempt disabled region in the scheduler. This allows to replace
local_irq_save/restore(flags) by local_irq_disable/enable() in
blk_flush_plug_list().

But it makes more sense to disable interrupts explicitly when the request
queue is locked end reenable them when the request to is unlocked. This
shortens the interrupt disabled section which is important when the plug
list contains requests for more than one queue. The comment which claims
that disabling interrupts around the loop is misleading as the called
functions can reenable interrupts unconditionally anyway and obfuscates the
scope badly:

 local_irq_save(flags);
   spin_lock(q->queue_lock);
   ...
   queue_unplugged(q...);
     scsi_request_fn();
       spin_unlock_irq(q->queue_lock);

-------------------^^^ ????

       spin_lock_irq(q->queue_lock);
     spin_unlock(q->queue_lock);
 local_irq_restore(flags);

Aside of that the detached interrupt disabling is a constant pain for
PREEMPT_RT as it requires patching and special casing when RT is enabled
while with the spin_*_irq() variants this happens automatically.
Signed-off-by: default avatarThomas Gleixner <tglx@linutronix.de>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Tejun Heo <tj@kernel.org>
Cc: Jens Axboe <axboe@kernel.dk>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Link: http://lkml.kernel.org/r/20110622174919.025446432@linutronix.deSigned-off-by: default avatarSebastian Andrzej Siewior <bigeasy@linutronix.de>
Signed-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent 656cb6d0
...@@ -3629,7 +3629,7 @@ static void queue_unplugged(struct request_queue *q, unsigned int depth, ...@@ -3629,7 +3629,7 @@ static void queue_unplugged(struct request_queue *q, unsigned int depth,
blk_run_queue_async(q); blk_run_queue_async(q);
else else
__blk_run_queue(q); __blk_run_queue(q);
spin_unlock(q->queue_lock); spin_unlock_irq(q->queue_lock);
} }
static void flush_plug_callbacks(struct blk_plug *plug, bool from_schedule) static void flush_plug_callbacks(struct blk_plug *plug, bool from_schedule)
...@@ -3677,7 +3677,6 @@ EXPORT_SYMBOL(blk_check_plugged); ...@@ -3677,7 +3677,6 @@ EXPORT_SYMBOL(blk_check_plugged);
void blk_flush_plug_list(struct blk_plug *plug, bool from_schedule) void blk_flush_plug_list(struct blk_plug *plug, bool from_schedule)
{ {
struct request_queue *q; struct request_queue *q;
unsigned long flags;
struct request *rq; struct request *rq;
LIST_HEAD(list); LIST_HEAD(list);
unsigned int depth; unsigned int depth;
...@@ -3697,11 +3696,6 @@ void blk_flush_plug_list(struct blk_plug *plug, bool from_schedule) ...@@ -3697,11 +3696,6 @@ void blk_flush_plug_list(struct blk_plug *plug, bool from_schedule)
q = NULL; q = NULL;
depth = 0; depth = 0;
/*
* Save and disable interrupts here, to avoid doing it for every
* queue lock we have to take.
*/
local_irq_save(flags);
while (!list_empty(&list)) { while (!list_empty(&list)) {
rq = list_entry_rq(list.next); rq = list_entry_rq(list.next);
list_del_init(&rq->queuelist); list_del_init(&rq->queuelist);
...@@ -3714,7 +3708,7 @@ void blk_flush_plug_list(struct blk_plug *plug, bool from_schedule) ...@@ -3714,7 +3708,7 @@ void blk_flush_plug_list(struct blk_plug *plug, bool from_schedule)
queue_unplugged(q, depth, from_schedule); queue_unplugged(q, depth, from_schedule);
q = rq->q; q = rq->q;
depth = 0; depth = 0;
spin_lock(q->queue_lock); spin_lock_irq(q->queue_lock);
} }
/* /*
...@@ -3741,8 +3735,6 @@ void blk_flush_plug_list(struct blk_plug *plug, bool from_schedule) ...@@ -3741,8 +3735,6 @@ void blk_flush_plug_list(struct blk_plug *plug, bool from_schedule)
*/ */
if (q) if (q)
queue_unplugged(q, depth, from_schedule); queue_unplugged(q, depth, from_schedule);
local_irq_restore(flags);
} }
void blk_finish_plug(struct blk_plug *plug) void blk_finish_plug(struct blk_plug *plug)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment