Commit 216284c3 authored by Tejun Heo's avatar Tejun Heo Committed by Jens Axboe

block, cfq: fix race condition in cic creation path and tighten locking

cfq_get_io_context() would fail if multiple tasks race to insert cic's
for the same association.  This patch restructures
cfq_get_io_context() such that slow path insertion race is handled
properly.

Note that the restructuring also makes cfq_get_io_context() called
under queue_lock and performs both ioc and cfqd insertions while
holding both ioc and queue locks.  This is part of on-going locking
tightening and will be used to simplify synchronization rules.
Signed-off-by: default avatarTejun Heo <tj@kernel.org>
Signed-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent dc86900e
...@@ -2908,13 +2908,10 @@ static void changed_ioprio(struct cfq_io_context *cic) ...@@ -2908,13 +2908,10 @@ static void changed_ioprio(struct cfq_io_context *cic)
{ {
struct cfq_data *cfqd = cic_to_cfqd(cic); struct cfq_data *cfqd = cic_to_cfqd(cic);
struct cfq_queue *cfqq; struct cfq_queue *cfqq;
unsigned long flags;
if (unlikely(!cfqd)) if (unlikely(!cfqd))
return; return;
spin_lock_irqsave(cfqd->queue->queue_lock, flags);
cfqq = cic->cfqq[BLK_RW_ASYNC]; cfqq = cic->cfqq[BLK_RW_ASYNC];
if (cfqq) { if (cfqq) {
struct cfq_queue *new_cfqq; struct cfq_queue *new_cfqq;
...@@ -2929,8 +2926,6 @@ static void changed_ioprio(struct cfq_io_context *cic) ...@@ -2929,8 +2926,6 @@ static void changed_ioprio(struct cfq_io_context *cic)
cfqq = cic->cfqq[BLK_RW_SYNC]; cfqq = cic->cfqq[BLK_RW_SYNC];
if (cfqq) if (cfqq)
cfq_mark_cfqq_prio_changed(cfqq); cfq_mark_cfqq_prio_changed(cfqq);
spin_unlock_irqrestore(cfqd->queue->queue_lock, flags);
} }
static void cfq_init_cfqq(struct cfq_data *cfqd, struct cfq_queue *cfqq, static void cfq_init_cfqq(struct cfq_data *cfqd, struct cfq_queue *cfqq,
...@@ -2958,7 +2953,6 @@ static void changed_cgroup(struct cfq_io_context *cic) ...@@ -2958,7 +2953,6 @@ static void changed_cgroup(struct cfq_io_context *cic)
{ {
struct cfq_queue *sync_cfqq = cic_to_cfqq(cic, 1); struct cfq_queue *sync_cfqq = cic_to_cfqq(cic, 1);
struct cfq_data *cfqd = cic_to_cfqd(cic); struct cfq_data *cfqd = cic_to_cfqd(cic);
unsigned long flags;
struct request_queue *q; struct request_queue *q;
if (unlikely(!cfqd)) if (unlikely(!cfqd))
...@@ -2966,8 +2960,6 @@ static void changed_cgroup(struct cfq_io_context *cic) ...@@ -2966,8 +2960,6 @@ static void changed_cgroup(struct cfq_io_context *cic)
q = cfqd->queue; q = cfqd->queue;
spin_lock_irqsave(q->queue_lock, flags);
if (sync_cfqq) { if (sync_cfqq) {
/* /*
* Drop reference to sync queue. A new sync queue will be * Drop reference to sync queue. A new sync queue will be
...@@ -2977,8 +2969,6 @@ static void changed_cgroup(struct cfq_io_context *cic) ...@@ -2977,8 +2969,6 @@ static void changed_cgroup(struct cfq_io_context *cic)
cic_set_cfqq(cic, NULL, 1); cic_set_cfqq(cic, NULL, 1);
cfq_put_queue(sync_cfqq); cfq_put_queue(sync_cfqq);
} }
spin_unlock_irqrestore(q->queue_lock, flags);
} }
#endif /* CONFIG_CFQ_GROUP_IOSCHED */ #endif /* CONFIG_CFQ_GROUP_IOSCHED */
...@@ -3142,16 +3132,31 @@ cfq_cic_lookup(struct cfq_data *cfqd, struct io_context *ioc) ...@@ -3142,16 +3132,31 @@ cfq_cic_lookup(struct cfq_data *cfqd, struct io_context *ioc)
return cic; return cic;
} }
/* /**
* Add cic into ioc, using cfqd as the search key. This enables us to lookup * cfq_create_cic - create and link a cfq_io_context
* the process specific cfq io context when entered from the block layer. * @cfqd: cfqd of interest
* Also adds the cic to a per-cfqd list, used when this queue is removed. * @gfp_mask: allocation mask
*
* Make sure cfq_io_context linking %current->io_context and @cfqd exists.
* If ioc and/or cic doesn't exist, they will be created using @gfp_mask.
*/ */
static int cfq_cic_link(struct cfq_data *cfqd, struct io_context *ioc, static int cfq_create_cic(struct cfq_data *cfqd, gfp_t gfp_mask)
struct cfq_io_context *cic, gfp_t gfp_mask)
{ {
unsigned long flags; struct request_queue *q = cfqd->queue;
int ret; struct cfq_io_context *cic = NULL;
struct io_context *ioc;
int ret = -ENOMEM;
might_sleep_if(gfp_mask & __GFP_WAIT);
/* allocate stuff */
ioc = current_io_context(gfp_mask, q->node);
if (!ioc)
goto out;
cic = cfq_alloc_io_context(cfqd, gfp_mask);
if (!cic)
goto out;
ret = radix_tree_preload(gfp_mask); ret = radix_tree_preload(gfp_mask);
if (ret) if (ret)
...@@ -3161,53 +3166,72 @@ static int cfq_cic_link(struct cfq_data *cfqd, struct io_context *ioc, ...@@ -3161,53 +3166,72 @@ static int cfq_cic_link(struct cfq_data *cfqd, struct io_context *ioc,
cic->key = cfqd; cic->key = cfqd;
cic->q = cfqd->queue; cic->q = cfqd->queue;
spin_lock_irqsave(&ioc->lock, flags); /* lock both q and ioc and try to link @cic */
ret = radix_tree_insert(&ioc->radix_root, cfqd->queue->id, cic); spin_lock_irq(q->queue_lock);
if (!ret) spin_lock(&ioc->lock);
hlist_add_head_rcu(&cic->cic_list, &ioc->cic_list);
spin_unlock_irqrestore(&ioc->lock, flags);
radix_tree_preload_end();
if (!ret) { ret = radix_tree_insert(&ioc->radix_root, q->id, cic);
spin_lock_irqsave(cfqd->queue->queue_lock, flags); if (likely(!ret)) {
hlist_add_head_rcu(&cic->cic_list, &ioc->cic_list);
list_add(&cic->queue_list, &cfqd->cic_list); list_add(&cic->queue_list, &cfqd->cic_list);
spin_unlock_irqrestore(cfqd->queue->queue_lock, flags); cic = NULL;
} else if (ret == -EEXIST) {
/* someone else already did it */
ret = 0;
} }
spin_unlock(&ioc->lock);
spin_unlock_irq(q->queue_lock);
radix_tree_preload_end();
out: out:
if (ret) if (ret)
printk(KERN_ERR "cfq: cic link failed!\n"); printk(KERN_ERR "cfq: cic link failed!\n");
if (cic)
cfq_cic_free(cic);
return ret; return ret;
} }
/* /**
* Setup general io context and cfq io context. There can be several cfq * cfq_get_io_context - acquire cfq_io_context and bump refcnt on io_context
* io contexts per general io context, if this process is doing io to more * @cfqd: cfqd to setup cic for
* than one device managed by cfq. * @gfp_mask: allocation mask
*
* Return cfq_io_context associating @cfqd and %current->io_context and
* bump refcnt on io_context. If ioc or cic doesn't exist, they're created
* using @gfp_mask.
*
* Must be called under queue_lock which may be released and re-acquired.
* This function also may sleep depending on @gfp_mask.
*/ */
static struct cfq_io_context * static struct cfq_io_context *
cfq_get_io_context(struct cfq_data *cfqd, gfp_t gfp_mask) cfq_get_io_context(struct cfq_data *cfqd, gfp_t gfp_mask)
{ {
struct io_context *ioc = NULL; struct request_queue *q = cfqd->queue;
struct cfq_io_context *cic = NULL; struct cfq_io_context *cic = NULL;
struct io_context *ioc;
int err;
lockdep_assert_held(q->queue_lock);
while (true) {
/* fast path */
ioc = current->io_context;
if (likely(ioc)) {
cic = cfq_cic_lookup(cfqd, ioc);
if (likely(cic))
break;
}
might_sleep_if(gfp_mask & __GFP_WAIT); /* slow path - unlock, create missing ones and retry */
spin_unlock_irq(q->queue_lock);
ioc = current_io_context(gfp_mask, cfqd->queue->node); err = cfq_create_cic(cfqd, gfp_mask);
if (!ioc) spin_lock_irq(q->queue_lock);
goto err; if (err)
return NULL;
cic = cfq_cic_lookup(cfqd, ioc); }
if (cic)
goto out;
cic = cfq_alloc_io_context(cfqd, gfp_mask);
if (cic == NULL)
goto err;
if (cfq_cic_link(cfqd, ioc, cic, gfp_mask)) /* bump @ioc's refcnt and handle changed notifications */
goto err;
out:
get_io_context(ioc); get_io_context(ioc);
if (unlikely(cic->changed)) { if (unlikely(cic->changed)) {
...@@ -3220,10 +3244,6 @@ cfq_get_io_context(struct cfq_data *cfqd, gfp_t gfp_mask) ...@@ -3220,10 +3244,6 @@ cfq_get_io_context(struct cfq_data *cfqd, gfp_t gfp_mask)
} }
return cic; return cic;
err:
if (cic)
cfq_cic_free(cic);
return NULL;
} }
static void static void
...@@ -3759,14 +3779,11 @@ cfq_set_request(struct request_queue *q, struct request *rq, gfp_t gfp_mask) ...@@ -3759,14 +3779,11 @@ cfq_set_request(struct request_queue *q, struct request *rq, gfp_t gfp_mask)
const int rw = rq_data_dir(rq); const int rw = rq_data_dir(rq);
const bool is_sync = rq_is_sync(rq); const bool is_sync = rq_is_sync(rq);
struct cfq_queue *cfqq; struct cfq_queue *cfqq;
unsigned long flags;
might_sleep_if(gfp_mask & __GFP_WAIT); might_sleep_if(gfp_mask & __GFP_WAIT);
spin_lock_irq(q->queue_lock);
cic = cfq_get_io_context(cfqd, gfp_mask); cic = cfq_get_io_context(cfqd, gfp_mask);
spin_lock_irqsave(q->queue_lock, flags);
if (!cic) if (!cic)
goto queue_fail; goto queue_fail;
...@@ -3802,12 +3819,12 @@ cfq_set_request(struct request_queue *q, struct request *rq, gfp_t gfp_mask) ...@@ -3802,12 +3819,12 @@ cfq_set_request(struct request_queue *q, struct request *rq, gfp_t gfp_mask)
rq->elevator_private[0] = cic; rq->elevator_private[0] = cic;
rq->elevator_private[1] = cfqq; rq->elevator_private[1] = cfqq;
rq->elevator_private[2] = cfq_ref_get_cfqg(cfqq->cfqg); rq->elevator_private[2] = cfq_ref_get_cfqg(cfqq->cfqg);
spin_unlock_irqrestore(q->queue_lock, flags); spin_unlock_irq(q->queue_lock);
return 0; return 0;
queue_fail: queue_fail:
cfq_schedule_dispatch(cfqd); cfq_schedule_dispatch(cfqd);
spin_unlock_irqrestore(q->queue_lock, flags); spin_unlock_irq(q->queue_lock);
cfq_log(cfqd, "set_request fail"); cfq_log(cfqd, "set_request fail");
return 1; return 1;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment