Commit 41b986b6 authored by Nick Piggin's avatar Nick Piggin Committed by Linus Torvalds

[PATCH] fix AS crappy performance

A while ago some guy complained that his server connections / sec was
way down due to AS. The server would fork a process which would then read
some tiny bit of random data and hang around for a while doing nothing.
This defeated all AS's heuristics, and due to a new process being a good
anticipation candidate by default, his performance went bad.

The fix was just to treat new processes as being unsuitable for
anticipation. Fixed. Unfortunately ls, cat, grep, small programs
starting up, find | xargs, etc only submit a couple of requests, and they
really want anticipation to be on.

This patch keeps a simple goodness value per queue based on how successful
anticipation has been for new processes.
parent 5daa5a40
......@@ -99,6 +99,7 @@ struct as_data {
sector_t last_sector[2]; /* last REQ_SYNC & REQ_ASYNC sectors */
struct list_head *dispatch; /* driver dispatch queue */
struct list_head *hash; /* request hash */
unsigned long new_success; /* anticipation success on new proc */
unsigned long current_batch_expires;
unsigned long last_check_fifo[2];
int changed_batch; /* 1: waiting for old batch to end */
......@@ -588,11 +589,18 @@ static void as_antic_stop(struct as_data *ad)
int status = ad->antic_status;
if (status == ANTIC_WAIT_REQ || status == ANTIC_WAIT_NEXT) {
struct as_io_context *aic;
if (status == ANTIC_WAIT_NEXT)
del_timer(&ad->antic_timer);
ad->antic_status = ANTIC_FINISHED;
/* see as_work_handler */
kblockd_schedule_work(&ad->antic_work);
aic = ad->io_context->aic;
if (aic->seek_samples == 0)
/* new process */
ad->new_success = (ad->new_success * 3) / 4 + 256;
}
}
......@@ -608,8 +616,14 @@ static void as_antic_timeout(unsigned long data)
spin_lock_irqsave(q->queue_lock, flags);
if (ad->antic_status == ANTIC_WAIT_REQ
|| ad->antic_status == ANTIC_WAIT_NEXT) {
struct as_io_context *aic;
ad->antic_status = ANTIC_FINISHED;
kblockd_schedule_work(&ad->antic_work);
aic = ad->io_context->aic;
if (aic->seek_samples == 0)
/* new process */
ad->new_success = (ad->new_success * 3) / 4;
}
spin_unlock_irqrestore(q->queue_lock, flags);
}
......@@ -698,10 +712,11 @@ static int as_can_break_anticipation(struct as_data *ad, struct as_rq *arq)
return 1;
}
if (aic->seek_samples == 0 || aic->ttime_samples == 0) {
if (ad->new_success < 256 &&
(aic->seek_samples == 0 || aic->ttime_samples == 0)) {
/*
* Process has just started IO so default to not anticipate.
* Maybe should be smarter.
* Process has just started IO and we have a bad history of
* success anticipating on new processes!
*/
return 1;
}
......@@ -1795,6 +1810,9 @@ static int as_init(request_queue_t *q, elevator_t *e)
ad->write_batch_count = ad->batch_expire[REQ_ASYNC] / 10;
if (ad->write_batch_count < 2)
ad->write_batch_count = 2;
ad->new_success = 512;
return 0;
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment