Commit 3f1fe673 authored by Jens Axboe's avatar Jens Axboe Committed by Linus Torvalds

[PATCH] final plug stuff

This is a merge of the two things that are needed for the plugging as it
stands in 2.5.21. The first is fixing the locking to be both clearer and
safe (Andrew repeatedly broke the old version). The second is a few
changes that allow make_request_fn drivers to utilize plugging. This is
needed for umem and raid, for instance, that have their private
plugging.
parent d8320130
......@@ -49,6 +49,9 @@ extern int mac_floppy_init(void);
*/
static kmem_cache_t *request_cachep;
/*
* plug management
*/
static struct list_head blk_plug_list;
static spinlock_t blk_plug_lock __cacheline_aligned_in_smp = SPIN_LOCK_UNLOCKED;
......@@ -794,33 +797,40 @@ static int ll_merge_requests_fn(request_queue_t *q, struct request *req,
* force the transfer to start only after we have put all the requests
* on the list.
*
* This is called with interrupts off and no requests on the queue.
* (and with the request spinlock acquired)
* This is called with interrupts off and no requests on the queue and
* with the queue lock held.
*/
void blk_plug_device(request_queue_t *q)
{
/*
* common case
*/
if (!elv_queue_empty(q))
return;
if (!test_and_set_bit(QUEUE_FLAG_PLUGGED, &q->queue_flags)) {
if (!blk_queue_plugged(q)) {
spin_lock(&blk_plug_lock);
list_add_tail(&q->plug_list, &blk_plug_list);
spin_unlock(&blk_plug_lock);
}
}
/*
* remove the queue from the plugged list, if present. called with
* queue lock held and interrupts disabled.
*/
inline int blk_remove_plug(request_queue_t *q)
{
if (blk_queue_plugged(q)) {
spin_lock(&blk_plug_lock);
list_del_init(&q->plug_list);
spin_unlock(&blk_plug_lock);
return 1;
}
return 0;
}
/*
* remove the plug and let it rip..
*/
static inline void __generic_unplug_device(request_queue_t *q)
{
/*
* not plugged
*/
if (!test_and_clear_bit(QUEUE_FLAG_PLUGGED, &q->queue_flags))
if (!blk_remove_plug(q))
return;
if (test_bit(QUEUE_FLAG_STOPPED, &q->queue_flags))
......@@ -848,11 +858,10 @@ static inline void __generic_unplug_device(request_queue_t *q)
void generic_unplug_device(void *data)
{
request_queue_t *q = data;
unsigned long flags;
spin_lock_irqsave(q->queue_lock, flags);
spin_lock_irq(q->queue_lock);
__generic_unplug_device(q);
spin_unlock_irqrestore(q->queue_lock, flags);
spin_unlock_irq(q->queue_lock);
}
/**
......@@ -895,17 +904,9 @@ void blk_stop_queue(request_queue_t *q)
unsigned long flags;
spin_lock_irqsave(q->queue_lock, flags);
/*
* remove from the plugged list, queue must not be called.
*/
if (test_and_clear_bit(QUEUE_FLAG_PLUGGED, &q->queue_flags)) {
spin_lock(&blk_plug_lock);
list_del(&q->plug_list);
spin_unlock(&blk_plug_lock);
}
blk_remove_plug(q);
spin_unlock_irqrestore(q->queue_lock, flags);
set_bit(QUEUE_FLAG_STOPPED, &q->queue_flags);
}
......@@ -941,12 +942,7 @@ void blk_run_queues(void)
while (!list_empty(&local_plug_list)) {
request_queue_t *q = blk_plug_entry(local_plug_list.next);
BUG_ON(test_bit(QUEUE_FLAG_STOPPED, &q->queue_flags));
spin_lock_irq(q->queue_lock);
list_del(&q->plug_list);
__generic_unplug_device(q);
spin_unlock_irq(q->queue_lock);
q->unplug_fn(q);
}
}
......@@ -1089,6 +1085,7 @@ int blk_init_queue(request_queue_t *q, request_fn_proc *rfn, spinlock_t *lock)
q->front_merge_fn = ll_front_merge_fn;
q->merge_requests_fn = ll_merge_requests_fn;
q->prep_rq_fn = NULL;
q->unplug_fn = generic_unplug_device;
q->queue_flags = (1 << QUEUE_FLAG_CLUSTER);
q->queue_lock = lock;
......@@ -1386,10 +1383,12 @@ static int __make_request(request_queue_t *q, struct bio *bio)
req = NULL;
insert_here = q->queue_head.prev;
if (blk_queue_empty(q) || barrier) {
if (blk_queue_empty(q)) {
blk_plug_device(q);
goto get_rq;
}
if (barrier)
goto get_rq;
el_ret = elv_merge(q, &req, bio);
switch (el_ret) {
......@@ -2011,6 +2010,8 @@ EXPORT_SYMBOL(blk_queue_bounce_limit);
EXPORT_SYMBOL(generic_make_request);
EXPORT_SYMBOL(blkdev_release_request);
EXPORT_SYMBOL(generic_unplug_device);
EXPORT_SYMBOL(blk_plug_device);
EXPORT_SYMBOL(blk_remove_plug);
EXPORT_SYMBOL(blk_attempt_remerge);
EXPORT_SYMBOL(blk_max_low_pfn);
EXPORT_SYMBOL(blk_max_pfn);
......
......@@ -113,7 +113,7 @@ typedef void (request_fn_proc) (request_queue_t *q);
typedef request_queue_t * (queue_proc) (kdev_t dev);
typedef int (make_request_fn) (request_queue_t *q, struct bio *bio);
typedef int (prep_rq_fn) (request_queue_t *, struct request *);
typedef void (unplug_device_fn) (void *q);
typedef void (unplug_fn) (void *q);
enum blk_queue_state {
Queue_down,
......@@ -157,6 +157,7 @@ struct request_queue
merge_requests_fn *merge_requests_fn;
make_request_fn *make_request_fn;
prep_rq_fn *prep_rq_fn;
unplug_fn *unplug_fn;
struct backing_dev_info backing_dev_info;
......@@ -206,13 +207,11 @@ struct request_queue
#define RQ_SCSI_DONE 0xfffe
#define RQ_SCSI_DISCONNECTING 0xffe0
#define QUEUE_FLAG_PLUGGED 0 /* queue is plugged */
#define QUEUE_FLAG_CLUSTER 1 /* cluster several segments into 1 */
#define QUEUE_FLAG_QUEUED 2 /* uses generic tag queueing */
#define QUEUE_FLAG_STOPPED 3 /* queue is stopped */
#define QUEUE_FLAG_CLUSTER 0 /* cluster several segments into 1 */
#define QUEUE_FLAG_QUEUED 1 /* uses generic tag queueing */
#define QUEUE_FLAG_STOPPED 2 /* queue is stopped */
#define blk_queue_plugged(q) test_bit(QUEUE_FLAG_PLUGGED, &(q)->queue_flags)
#define blk_mark_plugged(q) set_bit(QUEUE_FLAG_PLUGGED, &(q)->queue_flags)
#define blk_queue_plugged(q) !list_empty(&(q)->plug_list)
#define blk_queue_tagged(q) test_bit(QUEUE_FLAG_QUEUED, &(q)->queue_flags)
#define blk_queue_empty(q) elv_queue_empty(q)
#define list_entry_rq(ptr) list_entry((ptr), struct request, queuelist)
......@@ -292,6 +291,7 @@ extern void blk_attempt_remerge(request_queue_t *, struct request *);
extern struct request *blk_get_request(request_queue_t *, int, int);
extern void blk_put_request(struct request *);
extern void blk_plug_device(request_queue_t *);
extern int blk_remove_plug(request_queue_t *);
extern void blk_recount_segments(request_queue_t *, struct bio *);
extern inline int blk_phys_contig_segment(request_queue_t *q, struct bio *, struct bio *);
extern inline int blk_hw_contig_segment(request_queue_t *q, struct bio *, struct bio *);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment