Commit 69eea5a4 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'for-linus' of git://git.kernel.dk/linux-block

Pull block fixes from Jens Axboe:
 "Four small fixes for this cycle:

   - followup fix from Neil for a fix that went in before -rc2, ensuring
     that we always see the full per-task bio_list.

   - fix for blk-mq-sched from me that ensures that we retain similar
     direct-to-issue behavior on running the queue.

   - fix from Sagi fixing a potential NULL pointer dereference in blk-mq
     on spurious CPU unplug.

   - a memory leak fix in writeback from Tahsin, fixing a case where
     device removal of a mounted device can leak a struct
     wb_writeback_work"

* 'for-linus' of git://git.kernel.dk/linux-block:
  blk-mq-sched: don't run the queue async from blk_mq_try_issue_directly()
  writeback: fix memory leak in wb_queue_work()
  blk-mq: Fix tagset reinit in the presence of cpu hot-unplug
  blk: Ensure users for current->bio_list can see the full list.
parents 95422dec 9c621104
...@@ -376,10 +376,14 @@ static void punt_bios_to_rescuer(struct bio_set *bs) ...@@ -376,10 +376,14 @@ static void punt_bios_to_rescuer(struct bio_set *bs)
bio_list_init(&punt); bio_list_init(&punt);
bio_list_init(&nopunt); bio_list_init(&nopunt);
while ((bio = bio_list_pop(current->bio_list))) while ((bio = bio_list_pop(&current->bio_list[0])))
bio_list_add(bio->bi_pool == bs ? &punt : &nopunt, bio); bio_list_add(bio->bi_pool == bs ? &punt : &nopunt, bio);
current->bio_list[0] = nopunt;
*current->bio_list = nopunt; bio_list_init(&nopunt);
while ((bio = bio_list_pop(&current->bio_list[1])))
bio_list_add(bio->bi_pool == bs ? &punt : &nopunt, bio);
current->bio_list[1] = nopunt;
spin_lock(&bs->rescue_lock); spin_lock(&bs->rescue_lock);
bio_list_merge(&bs->rescue_list, &punt); bio_list_merge(&bs->rescue_list, &punt);
...@@ -466,7 +470,9 @@ struct bio *bio_alloc_bioset(gfp_t gfp_mask, int nr_iovecs, struct bio_set *bs) ...@@ -466,7 +470,9 @@ struct bio *bio_alloc_bioset(gfp_t gfp_mask, int nr_iovecs, struct bio_set *bs)
* we retry with the original gfp_flags. * we retry with the original gfp_flags.
*/ */
if (current->bio_list && !bio_list_empty(current->bio_list)) if (current->bio_list &&
(!bio_list_empty(&current->bio_list[0]) ||
!bio_list_empty(&current->bio_list[1])))
gfp_mask &= ~__GFP_DIRECT_RECLAIM; gfp_mask &= ~__GFP_DIRECT_RECLAIM;
p = mempool_alloc(bs->bio_pool, gfp_mask); p = mempool_alloc(bs->bio_pool, gfp_mask);
......
...@@ -1973,7 +1973,14 @@ generic_make_request_checks(struct bio *bio) ...@@ -1973,7 +1973,14 @@ generic_make_request_checks(struct bio *bio)
*/ */
blk_qc_t generic_make_request(struct bio *bio) blk_qc_t generic_make_request(struct bio *bio)
{ {
struct bio_list bio_list_on_stack; /*
* bio_list_on_stack[0] contains bios submitted by the current
* make_request_fn.
* bio_list_on_stack[1] contains bios that were submitted before
* the current make_request_fn, but that haven't been processed
* yet.
*/
struct bio_list bio_list_on_stack[2];
blk_qc_t ret = BLK_QC_T_NONE; blk_qc_t ret = BLK_QC_T_NONE;
if (!generic_make_request_checks(bio)) if (!generic_make_request_checks(bio))
...@@ -1990,7 +1997,7 @@ blk_qc_t generic_make_request(struct bio *bio) ...@@ -1990,7 +1997,7 @@ blk_qc_t generic_make_request(struct bio *bio)
* should be added at the tail * should be added at the tail
*/ */
if (current->bio_list) { if (current->bio_list) {
bio_list_add(current->bio_list, bio); bio_list_add(&current->bio_list[0], bio);
goto out; goto out;
} }
...@@ -2009,18 +2016,17 @@ blk_qc_t generic_make_request(struct bio *bio) ...@@ -2009,18 +2016,17 @@ blk_qc_t generic_make_request(struct bio *bio)
* bio_list, and call into ->make_request() again. * bio_list, and call into ->make_request() again.
*/ */
BUG_ON(bio->bi_next); BUG_ON(bio->bi_next);
bio_list_init(&bio_list_on_stack); bio_list_init(&bio_list_on_stack[0]);
current->bio_list = &bio_list_on_stack; current->bio_list = bio_list_on_stack;
do { do {
struct request_queue *q = bdev_get_queue(bio->bi_bdev); struct request_queue *q = bdev_get_queue(bio->bi_bdev);
if (likely(blk_queue_enter(q, false) == 0)) { if (likely(blk_queue_enter(q, false) == 0)) {
struct bio_list hold;
struct bio_list lower, same; struct bio_list lower, same;
/* Create a fresh bio_list for all subordinate requests */ /* Create a fresh bio_list for all subordinate requests */
hold = bio_list_on_stack; bio_list_on_stack[1] = bio_list_on_stack[0];
bio_list_init(&bio_list_on_stack); bio_list_init(&bio_list_on_stack[0]);
ret = q->make_request_fn(q, bio); ret = q->make_request_fn(q, bio);
blk_queue_exit(q); blk_queue_exit(q);
...@@ -2030,19 +2036,19 @@ blk_qc_t generic_make_request(struct bio *bio) ...@@ -2030,19 +2036,19 @@ blk_qc_t generic_make_request(struct bio *bio)
*/ */
bio_list_init(&lower); bio_list_init(&lower);
bio_list_init(&same); bio_list_init(&same);
while ((bio = bio_list_pop(&bio_list_on_stack)) != NULL) while ((bio = bio_list_pop(&bio_list_on_stack[0])) != NULL)
if (q == bdev_get_queue(bio->bi_bdev)) if (q == bdev_get_queue(bio->bi_bdev))
bio_list_add(&same, bio); bio_list_add(&same, bio);
else else
bio_list_add(&lower, bio); bio_list_add(&lower, bio);
/* now assemble so we handle the lowest level first */ /* now assemble so we handle the lowest level first */
bio_list_merge(&bio_list_on_stack, &lower); bio_list_merge(&bio_list_on_stack[0], &lower);
bio_list_merge(&bio_list_on_stack, &same); bio_list_merge(&bio_list_on_stack[0], &same);
bio_list_merge(&bio_list_on_stack, &hold); bio_list_merge(&bio_list_on_stack[0], &bio_list_on_stack[1]);
} else { } else {
bio_io_error(bio); bio_io_error(bio);
} }
bio = bio_list_pop(current->bio_list); bio = bio_list_pop(&bio_list_on_stack[0]);
} while (bio); } while (bio);
current->bio_list = NULL; /* deactivate */ current->bio_list = NULL; /* deactivate */
......
...@@ -295,6 +295,9 @@ int blk_mq_reinit_tagset(struct blk_mq_tag_set *set) ...@@ -295,6 +295,9 @@ int blk_mq_reinit_tagset(struct blk_mq_tag_set *set)
for (i = 0; i < set->nr_hw_queues; i++) { for (i = 0; i < set->nr_hw_queues; i++) {
struct blk_mq_tags *tags = set->tags[i]; struct blk_mq_tags *tags = set->tags[i];
if (!tags)
continue;
for (j = 0; j < tags->nr_tags; j++) { for (j = 0; j < tags->nr_tags; j++) {
if (!tags->static_rqs[j]) if (!tags->static_rqs[j])
continue; continue;
......
...@@ -1434,7 +1434,8 @@ static blk_qc_t request_to_qc_t(struct blk_mq_hw_ctx *hctx, struct request *rq) ...@@ -1434,7 +1434,8 @@ static blk_qc_t request_to_qc_t(struct blk_mq_hw_ctx *hctx, struct request *rq)
return blk_tag_to_qc_t(rq->internal_tag, hctx->queue_num, true); return blk_tag_to_qc_t(rq->internal_tag, hctx->queue_num, true);
} }
static void blk_mq_try_issue_directly(struct request *rq, blk_qc_t *cookie) static void blk_mq_try_issue_directly(struct request *rq, blk_qc_t *cookie,
bool may_sleep)
{ {
struct request_queue *q = rq->q; struct request_queue *q = rq->q;
struct blk_mq_queue_data bd = { struct blk_mq_queue_data bd = {
...@@ -1475,7 +1476,7 @@ static void blk_mq_try_issue_directly(struct request *rq, blk_qc_t *cookie) ...@@ -1475,7 +1476,7 @@ static void blk_mq_try_issue_directly(struct request *rq, blk_qc_t *cookie)
} }
insert: insert:
blk_mq_sched_insert_request(rq, false, true, true, false); blk_mq_sched_insert_request(rq, false, true, false, may_sleep);
} }
/* /*
...@@ -1569,11 +1570,11 @@ static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio) ...@@ -1569,11 +1570,11 @@ static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
if (!(data.hctx->flags & BLK_MQ_F_BLOCKING)) { if (!(data.hctx->flags & BLK_MQ_F_BLOCKING)) {
rcu_read_lock(); rcu_read_lock();
blk_mq_try_issue_directly(old_rq, &cookie); blk_mq_try_issue_directly(old_rq, &cookie, false);
rcu_read_unlock(); rcu_read_unlock();
} else { } else {
srcu_idx = srcu_read_lock(&data.hctx->queue_rq_srcu); srcu_idx = srcu_read_lock(&data.hctx->queue_rq_srcu);
blk_mq_try_issue_directly(old_rq, &cookie); blk_mq_try_issue_directly(old_rq, &cookie, true);
srcu_read_unlock(&data.hctx->queue_rq_srcu, srcu_idx); srcu_read_unlock(&data.hctx->queue_rq_srcu, srcu_idx);
} }
goto done; goto done;
......
...@@ -989,26 +989,29 @@ static void flush_current_bio_list(struct blk_plug_cb *cb, bool from_schedule) ...@@ -989,26 +989,29 @@ static void flush_current_bio_list(struct blk_plug_cb *cb, bool from_schedule)
struct dm_offload *o = container_of(cb, struct dm_offload, cb); struct dm_offload *o = container_of(cb, struct dm_offload, cb);
struct bio_list list; struct bio_list list;
struct bio *bio; struct bio *bio;
int i;
INIT_LIST_HEAD(&o->cb.list); INIT_LIST_HEAD(&o->cb.list);
if (unlikely(!current->bio_list)) if (unlikely(!current->bio_list))
return; return;
list = *current->bio_list; for (i = 0; i < 2; i++) {
bio_list_init(current->bio_list); list = current->bio_list[i];
bio_list_init(&current->bio_list[i]);
while ((bio = bio_list_pop(&list))) {
struct bio_set *bs = bio->bi_pool; while ((bio = bio_list_pop(&list))) {
if (unlikely(!bs) || bs == fs_bio_set) { struct bio_set *bs = bio->bi_pool;
bio_list_add(current->bio_list, bio); if (unlikely(!bs) || bs == fs_bio_set) {
continue; bio_list_add(&current->bio_list[i], bio);
continue;
}
spin_lock(&bs->rescue_lock);
bio_list_add(&bs->rescue_list, bio);
queue_work(bs->rescue_workqueue, &bs->rescue_work);
spin_unlock(&bs->rescue_lock);
} }
spin_lock(&bs->rescue_lock);
bio_list_add(&bs->rescue_list, bio);
queue_work(bs->rescue_workqueue, &bs->rescue_work);
spin_unlock(&bs->rescue_lock);
} }
} }
......
...@@ -974,7 +974,8 @@ static void wait_barrier(struct r10conf *conf) ...@@ -974,7 +974,8 @@ static void wait_barrier(struct r10conf *conf)
!conf->barrier || !conf->barrier ||
(atomic_read(&conf->nr_pending) && (atomic_read(&conf->nr_pending) &&
current->bio_list && current->bio_list &&
!bio_list_empty(current->bio_list)), (!bio_list_empty(&current->bio_list[0]) ||
!bio_list_empty(&current->bio_list[1]))),
conf->resync_lock); conf->resync_lock);
conf->nr_waiting--; conf->nr_waiting--;
if (!conf->nr_waiting) if (!conf->nr_waiting)
......
...@@ -173,19 +173,33 @@ static void wb_wakeup(struct bdi_writeback *wb) ...@@ -173,19 +173,33 @@ static void wb_wakeup(struct bdi_writeback *wb)
spin_unlock_bh(&wb->work_lock); spin_unlock_bh(&wb->work_lock);
} }
static void finish_writeback_work(struct bdi_writeback *wb,
struct wb_writeback_work *work)
{
struct wb_completion *done = work->done;
if (work->auto_free)
kfree(work);
if (done && atomic_dec_and_test(&done->cnt))
wake_up_all(&wb->bdi->wb_waitq);
}
static void wb_queue_work(struct bdi_writeback *wb, static void wb_queue_work(struct bdi_writeback *wb,
struct wb_writeback_work *work) struct wb_writeback_work *work)
{ {
trace_writeback_queue(wb, work); trace_writeback_queue(wb, work);
spin_lock_bh(&wb->work_lock);
if (!test_bit(WB_registered, &wb->state))
goto out_unlock;
if (work->done) if (work->done)
atomic_inc(&work->done->cnt); atomic_inc(&work->done->cnt);
list_add_tail(&work->list, &wb->work_list);
mod_delayed_work(bdi_wq, &wb->dwork, 0); spin_lock_bh(&wb->work_lock);
out_unlock:
if (test_bit(WB_registered, &wb->state)) {
list_add_tail(&work->list, &wb->work_list);
mod_delayed_work(bdi_wq, &wb->dwork, 0);
} else
finish_writeback_work(wb, work);
spin_unlock_bh(&wb->work_lock); spin_unlock_bh(&wb->work_lock);
} }
...@@ -1873,16 +1887,9 @@ static long wb_do_writeback(struct bdi_writeback *wb) ...@@ -1873,16 +1887,9 @@ static long wb_do_writeback(struct bdi_writeback *wb)
set_bit(WB_writeback_running, &wb->state); set_bit(WB_writeback_running, &wb->state);
while ((work = get_next_work_item(wb)) != NULL) { while ((work = get_next_work_item(wb)) != NULL) {
struct wb_completion *done = work->done;
trace_writeback_exec(wb, work); trace_writeback_exec(wb, work);
wrote += wb_writeback(wb, work); wrote += wb_writeback(wb, work);
finish_writeback_work(wb, work);
if (work->auto_free)
kfree(work);
if (done && atomic_dec_and_test(&done->cnt))
wake_up_all(&wb->bdi->wb_waitq);
} }
/* /*
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment