Commit 15c31be4 authored by Jens Axboe's avatar Jens Axboe

cfq-iosched: fix async queue behaviour

With the cfq_queue hash removal, we inadvertently got rid of the
async queue sharing. This was not intentional, in fact CFQ purposely
shares the async queue per priority level to get good merging for
async writes.

So put some logic in cfq_get_queue() to track the shared queues.
Signed-off-by: default avatarJens Axboe <jens.axboe@oracle.com>
parent 72d3a38e
...@@ -92,6 +92,8 @@ struct cfq_data { ...@@ -92,6 +92,8 @@ struct cfq_data {
struct cfq_queue *active_queue; struct cfq_queue *active_queue;
struct cfq_io_context *active_cic; struct cfq_io_context *active_cic;
struct cfq_queue *async_cfqq[IOPRIO_BE_NR];
struct timer_list idle_class_timer; struct timer_list idle_class_timer;
sector_t last_position; sector_t last_position;
...@@ -1351,8 +1353,8 @@ static void cfq_ioc_set_ioprio(struct io_context *ioc) ...@@ -1351,8 +1353,8 @@ static void cfq_ioc_set_ioprio(struct io_context *ioc)
} }
static struct cfq_queue * static struct cfq_queue *
cfq_get_queue(struct cfq_data *cfqd, int is_sync, struct task_struct *tsk, cfq_find_alloc_queue(struct cfq_data *cfqd, int is_sync,
gfp_t gfp_mask) struct task_struct *tsk, gfp_t gfp_mask)
{ {
struct cfq_queue *cfqq, *new_cfqq = NULL; struct cfq_queue *cfqq, *new_cfqq = NULL;
struct cfq_io_context *cic; struct cfq_io_context *cic;
...@@ -1405,12 +1407,35 @@ cfq_get_queue(struct cfq_data *cfqd, int is_sync, struct task_struct *tsk, ...@@ -1405,12 +1407,35 @@ cfq_get_queue(struct cfq_data *cfqd, int is_sync, struct task_struct *tsk,
if (new_cfqq) if (new_cfqq)
kmem_cache_free(cfq_pool, new_cfqq); kmem_cache_free(cfq_pool, new_cfqq);
atomic_inc(&cfqq->ref);
out: out:
WARN_ON((gfp_mask & __GFP_WAIT) && !cfqq); WARN_ON((gfp_mask & __GFP_WAIT) && !cfqq);
return cfqq; return cfqq;
} }
static struct cfq_queue *
cfq_get_queue(struct cfq_data *cfqd, int is_sync, struct task_struct *tsk,
gfp_t gfp_mask)
{
const int ioprio = task_ioprio(tsk);
struct cfq_queue *cfqq = NULL;
if (!is_sync)
cfqq = cfqd->async_cfqq[ioprio];
if (!cfqq)
cfqq = cfq_find_alloc_queue(cfqd, is_sync, tsk, gfp_mask);
/*
* pin the queue now that it's allocated, scheduler exit will prune it
*/
if (!is_sync && !cfqd->async_cfqq[ioprio]) {
atomic_inc(&cfqq->ref);
cfqd->async_cfqq[ioprio] = cfqq;
}
atomic_inc(&cfqq->ref);
return cfqq;
}
/* /*
* We drop cfq io contexts lazily, so we may find a dead one. * We drop cfq io contexts lazily, so we may find a dead one.
*/ */
...@@ -2019,6 +2044,7 @@ static void cfq_exit_queue(elevator_t *e) ...@@ -2019,6 +2044,7 @@ static void cfq_exit_queue(elevator_t *e)
{ {
struct cfq_data *cfqd = e->elevator_data; struct cfq_data *cfqd = e->elevator_data;
request_queue_t *q = cfqd->queue; request_queue_t *q = cfqd->queue;
int i;
cfq_shutdown_timer_wq(cfqd); cfq_shutdown_timer_wq(cfqd);
...@@ -2035,6 +2061,13 @@ static void cfq_exit_queue(elevator_t *e) ...@@ -2035,6 +2061,13 @@ static void cfq_exit_queue(elevator_t *e)
__cfq_exit_single_io_context(cfqd, cic); __cfq_exit_single_io_context(cfqd, cic);
} }
/*
* Put the async queues
*/
for (i = 0; i < IOPRIO_BE_NR; i++)
if (cfqd->async_cfqq[i])
cfq_put_queue(cfqd->async_cfqq[i]);
spin_unlock_irq(q->queue_lock); spin_unlock_irq(q->queue_lock);
cfq_shutdown_timer_wq(cfqd); cfq_shutdown_timer_wq(cfqd);
......
...@@ -47,8 +47,10 @@ enum { ...@@ -47,8 +47,10 @@ enum {
#define IOPRIO_NORM (4) #define IOPRIO_NORM (4)
static inline int task_ioprio(struct task_struct *task) static inline int task_ioprio(struct task_struct *task)
{ {
WARN_ON(!ioprio_valid(task->ioprio)); if (ioprio_valid(task->ioprio))
return IOPRIO_PRIO_DATA(task->ioprio); return IOPRIO_PRIO_DATA(task->ioprio);
return IOPRIO_NORM;
} }
static inline int task_nice_ioprio(struct task_struct *task) static inline int task_nice_ioprio(struct task_struct *task)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment