Commit abc3c744 authored by Shaohua Li's avatar Shaohua Li Committed by Jens Axboe

cfq-iosched: quantum check tweak

Currently a queue can only dispatch up to 4 requests if there are other queues.
This isn't optimal, device can handle more requests, for example, AHCI can
handle 31 requests. I can understand the limit is for fairness, but we could
do a tweak: if the queue still has a lot of slice left, sounds we could
ignore the limit. Test shows this boost my workload (two thread randread of
a SSD) from 78m/s to 100m/s.
Thanks for suggestions from Corrado and Vivek for the patch.
Signed-off-by: default avatarShaohua Li <shaohua.li@intel.com>
Signed-off-by: default avatarJens Axboe <jens.axboe@oracle.com>
parent 9a8c28c8
...@@ -19,7 +19,7 @@ ...@@ -19,7 +19,7 @@
* tunables * tunables
*/ */
/* max queue in one round of service */ /* max queue in one round of service */
static const int cfq_quantum = 4; static const int cfq_quantum = 8;
static const int cfq_fifo_expire[2] = { HZ / 4, HZ / 8 }; static const int cfq_fifo_expire[2] = { HZ / 4, HZ / 8 };
/* maximum backwards seek, in KiB */ /* maximum backwards seek, in KiB */
static const int cfq_back_max = 16 * 1024; static const int cfq_back_max = 16 * 1024;
...@@ -2197,6 +2197,19 @@ static int cfq_forced_dispatch(struct cfq_data *cfqd) ...@@ -2197,6 +2197,19 @@ static int cfq_forced_dispatch(struct cfq_data *cfqd)
return dispatched; return dispatched;
} }
static inline bool cfq_slice_used_soon(struct cfq_data *cfqd,
struct cfq_queue *cfqq)
{
/* the queue hasn't finished any request, can't estimate */
if (cfq_cfqq_slice_new(cfqq))
return 1;
if (time_after(jiffies + cfqd->cfq_slice_idle * cfqq->dispatched,
cfqq->slice_end))
return 1;
return 0;
}
static bool cfq_may_dispatch(struct cfq_data *cfqd, struct cfq_queue *cfqq) static bool cfq_may_dispatch(struct cfq_data *cfqd, struct cfq_queue *cfqq)
{ {
unsigned int max_dispatch; unsigned int max_dispatch;
...@@ -2213,7 +2226,7 @@ static bool cfq_may_dispatch(struct cfq_data *cfqd, struct cfq_queue *cfqq) ...@@ -2213,7 +2226,7 @@ static bool cfq_may_dispatch(struct cfq_data *cfqd, struct cfq_queue *cfqq)
if (cfqd->rq_in_flight[BLK_RW_SYNC] && !cfq_cfqq_sync(cfqq)) if (cfqd->rq_in_flight[BLK_RW_SYNC] && !cfq_cfqq_sync(cfqq))
return false; return false;
max_dispatch = cfqd->cfq_quantum; max_dispatch = max_t(unsigned int, cfqd->cfq_quantum / 2, 1);
if (cfq_class_idle(cfqq)) if (cfq_class_idle(cfqq))
max_dispatch = 1; max_dispatch = 1;
...@@ -2230,13 +2243,22 @@ static bool cfq_may_dispatch(struct cfq_data *cfqd, struct cfq_queue *cfqq) ...@@ -2230,13 +2243,22 @@ static bool cfq_may_dispatch(struct cfq_data *cfqd, struct cfq_queue *cfqq)
/* /*
* We have other queues, don't allow more IO from this one * We have other queues, don't allow more IO from this one
*/ */
if (cfqd->busy_queues > 1) if (cfqd->busy_queues > 1 && cfq_slice_used_soon(cfqd, cfqq))
return false; return false;
/* /*
* Sole queue user, no limit * Sole queue user, no limit
*/ */
max_dispatch = -1; if (cfqd->busy_queues == 1)
max_dispatch = -1;
else
/*
* Normally we start throttling cfqq when cfq_quantum/2
* requests have been dispatched. But we can drive
* deeper queue depths at the beginning of slice
* subjected to upper limit of cfq_quantum.
* */
max_dispatch = cfqd->cfq_quantum;
} }
/* /*
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment