Commit 1cfcda6a authored by Andrew Morton's avatar Andrew Morton Committed by Linus Torvalds

[PATCH] elevator insertion fixes

From: Nick Piggin <piggin@cyberone.com.au>

This fixes a bug in deadline and AS that causes insert_here to be ignored on
blk_fs_requests. This has been causing problems with SCSI requeueing code.
It makes elevator insertion more correct as advertised wrt insert_here and
REQ_SOFTBARRIER.

It also fixes a buglet in the as_requeue code where the request wasn't being
put into the front of the list (in rare cases).
parent c12f96f2
......@@ -910,12 +910,7 @@ static void as_completed_request(request_queue_t *q, struct request *rq)
struct as_rq *arq = RQ_DATA(rq);
struct as_io_context *aic;
if (unlikely(!blk_fs_request(rq)))
return;
WARN_ON(blk_fs_request(rq) && arq->state == AS_RQ_NEW);
if (arq->state != AS_RQ_DISPATCHED)
if (unlikely(arq->state != AS_RQ_DISPATCHED))
return;
if (ad->changed_batch && ad->nr_dispatched == 1) {
......@@ -1035,7 +1030,7 @@ static void as_remove_request(request_queue_t *q, struct request *rq)
{
struct as_rq *arq = RQ_DATA(rq);
if (unlikely(!blk_fs_request(rq)))
if (unlikely(arq->state == AS_RQ_NEW))
return;
if (!arq) {
......@@ -1341,9 +1336,9 @@ static void as_requeue_request(request_queue_t *q, struct request *rq)
atomic_inc(&arq->io_context->aic->nr_dispatched);
} else
WARN_ON(blk_fs_request(rq)
&& (!(rq->flags & REQ_HARDBARRIER)) );
&& (!(rq->flags & (REQ_HARDBARRIER|REQ_SOFTBARRIER))) );
list_add_tail(&rq->queuelist, ad->dispatch);
list_add(&rq->queuelist, ad->dispatch);
/* Stop anticipating - let this request get through */
as_antic_stop(ad);
......@@ -1358,26 +1353,31 @@ as_insert_request(request_queue_t *q, struct request *rq,
struct as_data *ad = q->elevator.elevator_data;
struct as_rq *arq = RQ_DATA(rq);
if (unlikely(rq->flags & REQ_HARDBARRIER)) {
if (unlikely(rq->flags & (REQ_HARDBARRIER|REQ_SOFTBARRIER))) {
q->last_merge = NULL;
while (ad->next_arq[REQ_SYNC])
as_move_to_dispatch(ad, ad->next_arq[REQ_SYNC]);
if (insert_here != ad->dispatch) {
while (ad->next_arq[REQ_SYNC])
as_move_to_dispatch(ad, ad->next_arq[REQ_SYNC]);
while (ad->next_arq[REQ_ASYNC])
as_move_to_dispatch(ad, ad->next_arq[REQ_ASYNC]);
}
while (ad->next_arq[REQ_ASYNC])
as_move_to_dispatch(ad, ad->next_arq[REQ_ASYNC]);
if (!insert_here)
insert_here = ad->dispatch->prev;
}
if (unlikely(!blk_fs_request(rq))) {
if (!insert_here)
insert_here = ad->dispatch->prev;
insert_here = ad->dispatch;
}
if (insert_here) {
list_add(&rq->queuelist, insert_here);
/* Stop anticipating - let this request get through */
if (!list_empty(ad->dispatch)
&& (ad->antic_status == ANTIC_WAIT_REQ
|| ad->antic_status == ANTIC_WAIT_NEXT))
if (list_empty(ad->dispatch))
as_antic_stop(ad);
return;
......
......@@ -627,21 +627,25 @@ deadline_insert_request(request_queue_t *q, struct request *rq,
struct deadline_data *dd = q->elevator.elevator_data;
struct deadline_rq *drq = RQ_DATA(rq);
if (unlikely(rq->flags & REQ_HARDBARRIER)) {
if (unlikely(rq->flags & (REQ_HARDBARRIER|REQ_SOFTBARRIER))) {
DL_INVALIDATE_HASH(dd);
q->last_merge = NULL;
while (deadline_dispatch_requests(dd))
;
if (insert_here != dd->dispatch) {
while (deadline_dispatch_requests(dd))
;
}
list_add_tail(&rq->queuelist, dd->dispatch);
return;
if (!insert_here)
insert_here = dd->dispatch->prev;
}
if (unlikely(!blk_fs_request(rq))) {
if (!insert_here)
insert_here = dd->dispatch->prev;
insert_here = dd->dispatch;
}
if (insert_here) {
list_add(&rq->queuelist, insert_here);
return;
}
......
......@@ -162,10 +162,10 @@ void elv_requeue_request(request_queue_t *q, struct request *rq)
void __elv_add_request(request_queue_t *q, struct request *rq, int at_end,
int plug)
{
struct list_head *insert = &q->queue_head;
struct list_head *insert = NULL;
if (at_end)
insert = insert->prev;
if (!at_end)
insert = &q->queue_head;
if (plug)
blk_plug_device(q);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment