Commit fc46379d authored by Jens Axboe's avatar Jens Axboe Committed by Jens Axboe

[PATCH] cfq-iosched: kill cfq_exit_lock

cfq_exit_lock is protecting two things now:

- The per-ioc rbtree of cfq_io_contexts

- The per-cfqd linked list of cfq_io_contexts

The per-cfqd linked list can be protected by the queue lock, as it is (by
definition) per cfqd as the queue lock is.

The per-ioc rbtree is mainly used and updated by the process itself only.
The only outside use is the io priority changing. If we move the
priority changing to not browsing the rbtree, we can remove any locking
from the rbtree updates and lookup completely. Let the sys_ioprio syscall
just mark processes as having the iopriority changed and lazily update
the private cfq io contexts the next time io is queued, and we can
remove this locking as well.
Signed-off-by: default avatarJens Axboe <axboe@suse.de>
parent 89850f7e
...@@ -31,8 +31,6 @@ static int cfq_slice_idle = HZ / 125; ...@@ -31,8 +31,6 @@ static int cfq_slice_idle = HZ / 125;
#define CFQ_KEY_ASYNC (0) #define CFQ_KEY_ASYNC (0)
static DEFINE_SPINLOCK(cfq_exit_lock);
/* /*
* for the hash of cfqq inside the cfqd * for the hash of cfqq inside the cfqd
*/ */
...@@ -1084,12 +1082,6 @@ static void cfq_free_io_context(struct io_context *ioc) ...@@ -1084,12 +1082,6 @@ static void cfq_free_io_context(struct io_context *ioc)
complete(ioc_gone); complete(ioc_gone);
} }
static void cfq_trim(struct io_context *ioc)
{
ioc->set_ioprio = NULL;
cfq_free_io_context(ioc);
}
static void cfq_exit_cfqq(struct cfq_data *cfqd, struct cfq_queue *cfqq) static void cfq_exit_cfqq(struct cfq_data *cfqd, struct cfq_queue *cfqq)
{ {
if (unlikely(cfqq == cfqd->active_queue)) if (unlikely(cfqq == cfqd->active_queue))
...@@ -1101,6 +1093,10 @@ static void cfq_exit_cfqq(struct cfq_data *cfqd, struct cfq_queue *cfqq) ...@@ -1101,6 +1093,10 @@ static void cfq_exit_cfqq(struct cfq_data *cfqd, struct cfq_queue *cfqq)
static void __cfq_exit_single_io_context(struct cfq_data *cfqd, static void __cfq_exit_single_io_context(struct cfq_data *cfqd,
struct cfq_io_context *cic) struct cfq_io_context *cic)
{ {
list_del_init(&cic->queue_list);
smp_wmb();
cic->key = NULL;
if (cic->cfqq[ASYNC]) { if (cic->cfqq[ASYNC]) {
cfq_exit_cfqq(cfqd, cic->cfqq[ASYNC]); cfq_exit_cfqq(cfqd, cic->cfqq[ASYNC]);
cic->cfqq[ASYNC] = NULL; cic->cfqq[ASYNC] = NULL;
...@@ -1110,9 +1106,6 @@ static void __cfq_exit_single_io_context(struct cfq_data *cfqd, ...@@ -1110,9 +1106,6 @@ static void __cfq_exit_single_io_context(struct cfq_data *cfqd,
cfq_exit_cfqq(cfqd, cic->cfqq[SYNC]); cfq_exit_cfqq(cfqd, cic->cfqq[SYNC]);
cic->cfqq[SYNC] = NULL; cic->cfqq[SYNC] = NULL;
} }
cic->key = NULL;
list_del_init(&cic->queue_list);
} }
...@@ -1123,27 +1116,23 @@ static void cfq_exit_single_io_context(struct cfq_io_context *cic) ...@@ -1123,27 +1116,23 @@ static void cfq_exit_single_io_context(struct cfq_io_context *cic)
{ {
struct cfq_data *cfqd = cic->key; struct cfq_data *cfqd = cic->key;
WARN_ON(!irqs_disabled());
if (cfqd) { if (cfqd) {
request_queue_t *q = cfqd->queue; request_queue_t *q = cfqd->queue;
spin_lock(q->queue_lock); spin_lock_irq(q->queue_lock);
__cfq_exit_single_io_context(cfqd, cic); __cfq_exit_single_io_context(cfqd, cic);
spin_unlock(q->queue_lock); spin_unlock_irq(q->queue_lock);
} }
} }
static void cfq_exit_io_context(struct io_context *ioc) static void cfq_exit_io_context(struct io_context *ioc)
{ {
struct cfq_io_context *__cic; struct cfq_io_context *__cic;
unsigned long flags;
struct rb_node *n; struct rb_node *n;
/* /*
* put the reference this task is holding to the various queues * put the reference this task is holding to the various queues
*/ */
spin_lock_irqsave(&cfq_exit_lock, flags);
n = rb_first(&ioc->cic_root); n = rb_first(&ioc->cic_root);
while (n != NULL) { while (n != NULL) {
...@@ -1152,8 +1141,6 @@ static void cfq_exit_io_context(struct io_context *ioc) ...@@ -1152,8 +1141,6 @@ static void cfq_exit_io_context(struct io_context *ioc)
cfq_exit_single_io_context(__cic); cfq_exit_single_io_context(__cic);
n = rb_next(n); n = rb_next(n);
} }
spin_unlock_irqrestore(&cfq_exit_lock, flags);
} }
static struct cfq_io_context * static struct cfq_io_context *
...@@ -1248,15 +1235,12 @@ static inline void changed_ioprio(struct cfq_io_context *cic) ...@@ -1248,15 +1235,12 @@ static inline void changed_ioprio(struct cfq_io_context *cic)
spin_unlock(cfqd->queue->queue_lock); spin_unlock(cfqd->queue->queue_lock);
} }
/* static void cfq_ioc_set_ioprio(struct io_context *ioc)
* callback from sys_ioprio_set, irqs are disabled
*/
static int cfq_ioc_set_ioprio(struct io_context *ioc, unsigned int ioprio)
{ {
struct cfq_io_context *cic; struct cfq_io_context *cic;
struct rb_node *n; struct rb_node *n;
spin_lock(&cfq_exit_lock); ioc->ioprio_changed = 0;
n = rb_first(&ioc->cic_root); n = rb_first(&ioc->cic_root);
while (n != NULL) { while (n != NULL) {
...@@ -1265,10 +1249,6 @@ static int cfq_ioc_set_ioprio(struct io_context *ioc, unsigned int ioprio) ...@@ -1265,10 +1249,6 @@ static int cfq_ioc_set_ioprio(struct io_context *ioc, unsigned int ioprio)
changed_ioprio(cic); changed_ioprio(cic);
n = rb_next(n); n = rb_next(n);
} }
spin_unlock(&cfq_exit_lock);
return 0;
} }
static struct cfq_queue * static struct cfq_queue *
...@@ -1336,10 +1316,8 @@ cfq_get_queue(struct cfq_data *cfqd, unsigned int key, struct task_struct *tsk, ...@@ -1336,10 +1316,8 @@ cfq_get_queue(struct cfq_data *cfqd, unsigned int key, struct task_struct *tsk,
static void static void
cfq_drop_dead_cic(struct io_context *ioc, struct cfq_io_context *cic) cfq_drop_dead_cic(struct io_context *ioc, struct cfq_io_context *cic)
{ {
spin_lock(&cfq_exit_lock); WARN_ON(!list_empty(&cic->queue_list));
rb_erase(&cic->rb_node, &ioc->cic_root); rb_erase(&cic->rb_node, &ioc->cic_root);
list_del_init(&cic->queue_list);
spin_unlock(&cfq_exit_lock);
kmem_cache_free(cfq_ioc_pool, cic); kmem_cache_free(cfq_ioc_pool, cic);
atomic_dec(&ioc_count); atomic_dec(&ioc_count);
} }
...@@ -1385,7 +1363,6 @@ cfq_cic_link(struct cfq_data *cfqd, struct io_context *ioc, ...@@ -1385,7 +1363,6 @@ cfq_cic_link(struct cfq_data *cfqd, struct io_context *ioc,
cic->ioc = ioc; cic->ioc = ioc;
cic->key = cfqd; cic->key = cfqd;
ioc->set_ioprio = cfq_ioc_set_ioprio;
restart: restart:
parent = NULL; parent = NULL;
p = &ioc->cic_root.rb_node; p = &ioc->cic_root.rb_node;
...@@ -1407,11 +1384,12 @@ cfq_cic_link(struct cfq_data *cfqd, struct io_context *ioc, ...@@ -1407,11 +1384,12 @@ cfq_cic_link(struct cfq_data *cfqd, struct io_context *ioc,
BUG(); BUG();
} }
spin_lock(&cfq_exit_lock);
rb_link_node(&cic->rb_node, parent, p); rb_link_node(&cic->rb_node, parent, p);
rb_insert_color(&cic->rb_node, &ioc->cic_root); rb_insert_color(&cic->rb_node, &ioc->cic_root);
spin_lock_irq(cfqd->queue->queue_lock);
list_add(&cic->queue_list, &cfqd->cic_list); list_add(&cic->queue_list, &cfqd->cic_list);
spin_unlock(&cfq_exit_lock); spin_unlock_irq(cfqd->queue->queue_lock);
} }
/* /*
...@@ -1441,6 +1419,10 @@ cfq_get_io_context(struct cfq_data *cfqd, gfp_t gfp_mask) ...@@ -1441,6 +1419,10 @@ cfq_get_io_context(struct cfq_data *cfqd, gfp_t gfp_mask)
cfq_cic_link(cfqd, ioc, cic); cfq_cic_link(cfqd, ioc, cic);
out: out:
smp_read_barrier_depends();
if (unlikely(ioc->ioprio_changed))
cfq_ioc_set_ioprio(ioc);
return cic; return cic;
err: err:
put_io_context(ioc); put_io_context(ioc);
...@@ -1945,7 +1927,6 @@ static void cfq_exit_queue(elevator_t *e) ...@@ -1945,7 +1927,6 @@ static void cfq_exit_queue(elevator_t *e)
cfq_shutdown_timer_wq(cfqd); cfq_shutdown_timer_wq(cfqd);
spin_lock(&cfq_exit_lock);
spin_lock_irq(q->queue_lock); spin_lock_irq(q->queue_lock);
if (cfqd->active_queue) if (cfqd->active_queue)
...@@ -1960,7 +1941,6 @@ static void cfq_exit_queue(elevator_t *e) ...@@ -1960,7 +1941,6 @@ static void cfq_exit_queue(elevator_t *e)
} }
spin_unlock_irq(q->queue_lock); spin_unlock_irq(q->queue_lock);
spin_unlock(&cfq_exit_lock);
cfq_shutdown_timer_wq(cfqd); cfq_shutdown_timer_wq(cfqd);
...@@ -2149,7 +2129,7 @@ static struct elevator_type iosched_cfq = { ...@@ -2149,7 +2129,7 @@ static struct elevator_type iosched_cfq = {
.elevator_may_queue_fn = cfq_may_queue, .elevator_may_queue_fn = cfq_may_queue,
.elevator_init_fn = cfq_init_queue, .elevator_init_fn = cfq_init_queue,
.elevator_exit_fn = cfq_exit_queue, .elevator_exit_fn = cfq_exit_queue,
.trim = cfq_trim, .trim = cfq_free_io_context,
}, },
.elevator_attrs = cfq_attrs, .elevator_attrs = cfq_attrs,
.elevator_name = "cfq", .elevator_name = "cfq",
......
...@@ -3654,7 +3654,7 @@ struct io_context *current_io_context(gfp_t gfp_flags) ...@@ -3654,7 +3654,7 @@ struct io_context *current_io_context(gfp_t gfp_flags)
if (ret) { if (ret) {
atomic_set(&ret->refcount, 1); atomic_set(&ret->refcount, 1);
ret->task = current; ret->task = current;
ret->set_ioprio = NULL; ret->ioprio_changed = 0;
ret->last_waited = jiffies; /* doesn't matter... */ ret->last_waited = jiffies; /* doesn't matter... */
ret->nr_batch_requests = 0; /* because this is 0 */ ret->nr_batch_requests = 0; /* because this is 0 */
ret->aic = NULL; ret->aic = NULL;
......
...@@ -47,8 +47,8 @@ static int set_task_ioprio(struct task_struct *task, int ioprio) ...@@ -47,8 +47,8 @@ static int set_task_ioprio(struct task_struct *task, int ioprio)
/* see wmb() in current_io_context() */ /* see wmb() in current_io_context() */
smp_read_barrier_depends(); smp_read_barrier_depends();
if (ioc && ioc->set_ioprio) if (ioc)
ioc->set_ioprio(ioc, ioprio); ioc->ioprio_changed = 1;
task_unlock(task); task_unlock(task);
return 0; return 0;
......
...@@ -90,7 +90,7 @@ struct io_context { ...@@ -90,7 +90,7 @@ struct io_context {
atomic_t refcount; atomic_t refcount;
struct task_struct *task; struct task_struct *task;
int (*set_ioprio)(struct io_context *, unsigned int); unsigned int ioprio_changed;
/* /*
* For request batching * For request batching
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment