Commit 86b9d08b authored by Jens Axboe's avatar Jens Axboe

[PATCH] elv_add_request cleanups

Request insertion in the current tree is a mess. We have all sorts of
variants of *elv_add_request*, and it's not at all clear who does what
and with what locks (or not). This patch cleans it up to be:

o __elv_add_request(queue, request, at_end, plug)
      Core function, requires queue lock to be held

o elv_add_request(queue, request, at_end, plug)
      Like __elv_add_request(), but grabs queue lock

o __elv_add_request_pos(queue, request, position)
      Insert request at a given location, lock must be held
parent 6d94d882
...@@ -272,13 +272,27 @@ void elv_merge_requests(request_queue_t *q, struct request *rq, ...@@ -272,13 +272,27 @@ void elv_merge_requests(request_queue_t *q, struct request *rq,
e->elevator_merge_req_fn(q, rq, next); e->elevator_merge_req_fn(q, rq, next);
} }
/* void __elv_add_request(request_queue_t *q, struct request *rq, int at_end,
* add_request and next_request are required to be supported, naturally int plug)
*/ {
void __elv_add_request(request_queue_t *q, struct request *rq, struct list_head *insert = &q->queue_head;
struct list_head *insert_here)
if (at_end)
insert = insert->prev;
if (plug)
blk_plug_device(q);
q->elevator.elevator_add_req_fn(q, rq, insert);
}
void elv_add_request(request_queue_t *q, struct request *rq, int at_end,
int plug)
{ {
q->elevator.elevator_add_req_fn(q, rq, insert_here); unsigned long flags;
spin_lock_irqsave(q->queue_lock, flags);
__elv_add_request(q, rq, at_end, plug);
spin_unlock_irqrestore(q->queue_lock, flags);
} }
static inline struct request *__elv_next_request(request_queue_t *q) static inline struct request *__elv_next_request(request_queue_t *q)
...@@ -357,6 +371,7 @@ module_init(elevator_global_init); ...@@ -357,6 +371,7 @@ module_init(elevator_global_init);
EXPORT_SYMBOL(elevator_noop); EXPORT_SYMBOL(elevator_noop);
EXPORT_SYMBOL(elv_add_request);
EXPORT_SYMBOL(__elv_add_request); EXPORT_SYMBOL(__elv_add_request);
EXPORT_SYMBOL(elv_next_request); EXPORT_SYMBOL(elv_next_request);
EXPORT_SYMBOL(elv_remove_request); EXPORT_SYMBOL(elv_remove_request);
......
...@@ -639,7 +639,7 @@ void blk_queue_invalidate_tags(request_queue_t *q) ...@@ -639,7 +639,7 @@ void blk_queue_invalidate_tags(request_queue_t *q)
blk_queue_end_tag(q, rq); blk_queue_end_tag(q, rq);
rq->flags &= ~REQ_STARTED; rq->flags &= ~REQ_STARTED;
elv_add_request(q, rq, 0); __elv_add_request(q, rq, 0, 0);
} }
} }
...@@ -1466,7 +1466,7 @@ static inline void add_request(request_queue_t * q, struct request * req, ...@@ -1466,7 +1466,7 @@ static inline void add_request(request_queue_t * q, struct request * req,
* elevator indicated where it wants this request to be * elevator indicated where it wants this request to be
* inserted at elevator_merge time * inserted at elevator_merge time
*/ */
__elv_add_request(q, req, insert_here); __elv_add_request_pos(q, req, insert_here);
} }
/* /*
......
...@@ -240,7 +240,7 @@ void scsi_queue_next_request(request_queue_t * q, Scsi_Cmnd * SCpnt) ...@@ -240,7 +240,7 @@ void scsi_queue_next_request(request_queue_t * q, Scsi_Cmnd * SCpnt)
SCpnt->request->special = (void *) SCpnt; SCpnt->request->special = (void *) SCpnt;
if(blk_rq_tagged(SCpnt->request)) if(blk_rq_tagged(SCpnt->request))
blk_queue_end_tag(q, SCpnt->request); blk_queue_end_tag(q, SCpnt->request);
_elv_add_request(q, SCpnt->request, 0, 0); __elv_add_request(q, SCpnt->request, 0, 0);
} }
/* /*
...@@ -951,7 +951,7 @@ void scsi_request_fn(request_queue_t * q) ...@@ -951,7 +951,7 @@ void scsi_request_fn(request_queue_t * q)
SCpnt->request->flags |= REQ_SPECIAL; SCpnt->request->flags |= REQ_SPECIAL;
if(blk_rq_tagged(SCpnt->request)) if(blk_rq_tagged(SCpnt->request))
blk_queue_end_tag(q, SCpnt->request); blk_queue_end_tag(q, SCpnt->request);
_elv_add_request(q, SCpnt->request, 0, 0); __elv_add_request(q, SCpnt->request, 0, 0);
break; break;
} }
......
...@@ -50,22 +50,6 @@ static inline void blkdev_dequeue_request(struct request *req) ...@@ -50,22 +50,6 @@ static inline void blkdev_dequeue_request(struct request *req)
elv_remove_request(req->q, req); elv_remove_request(req->q, req);
} }
#define _elv_add_request_core(q, rq, where, plug) \
do { \
if ((plug)) \
blk_plug_device((q)); \
(q)->elevator.elevator_add_req_fn((q), (rq), (where)); \
} while (0)
#define _elv_add_request(q, rq, back, p) do { \
if ((back)) \
_elv_add_request_core((q), (rq), (q)->queue_head.prev, (p)); \
else \
_elv_add_request_core((q), (rq), &(q)->queue_head, (p)); \
} while (0)
#define elv_add_request(q, rq, back) _elv_add_request((q), (rq), (back), 1)
#if defined(MAJOR_NR) || defined(IDE_DRIVER) #if defined(MAJOR_NR) || defined(IDE_DRIVER)
#if (MAJOR_NR != SCSI_TAPE_MAJOR) && (MAJOR_NR != OSST_MAJOR) #if (MAJOR_NR != SCSI_TAPE_MAJOR) && (MAJOR_NR != OSST_MAJOR)
#if !defined(IDE_DRIVER) #if !defined(IDE_DRIVER)
......
...@@ -40,8 +40,8 @@ struct elevator_s ...@@ -40,8 +40,8 @@ struct elevator_s
/* /*
* block elevator interface * block elevator interface
*/ */
extern void __elv_add_request(request_queue_t *, struct request *, extern void elv_add_request(request_queue_t *, struct request *, int, int);
struct list_head *); extern void __elv_add_request(request_queue_t *, struct request *, int, int);
extern int elv_merge(request_queue_t *, struct list_head **, struct bio *); extern int elv_merge(request_queue_t *, struct list_head **, struct bio *);
extern void elv_merge_requests(request_queue_t *, struct request *, extern void elv_merge_requests(request_queue_t *, struct request *,
struct request *); struct request *);
...@@ -50,6 +50,9 @@ extern void elv_remove_request(request_queue_t *, struct request *); ...@@ -50,6 +50,9 @@ extern void elv_remove_request(request_queue_t *, struct request *);
extern int elv_queue_empty(request_queue_t *); extern int elv_queue_empty(request_queue_t *);
extern inline struct list_head *elv_get_sort_head(request_queue_t *, struct request *); extern inline struct list_head *elv_get_sort_head(request_queue_t *, struct request *);
#define __elv_add_request_pos(q, rq, pos) \
(q)->elevator.elevator_add_req_fn((q), (rq), (pos))
/* /*
* noop I/O scheduler. always merges, always inserts new request at tail * noop I/O scheduler. always merges, always inserts new request at tail
*/ */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment