Commit 18741986 authored by Christoph Hellwig's avatar Christoph Hellwig Committed by Jens Axboe

blk-mq: rework flush sequencing logic

Witch to using a preallocated flush_rq for blk-mq similar to what's done
with the old request path.  This allows us to set up the request properly
with a tag from the actually allowed range and ->rq_disk as needed by
some drivers.  To make life easier we also switch to dynamic allocation
of ->flush_rq for the old path.

This effectively reverts most of

    "blk-mq: fix for flush deadlock"

and

    "blk-mq: Don't reserve a tag for flush request"
Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
Signed-off-by: default avatarJens Axboe <axboe@fb.com>
parent ce2c350b
...@@ -693,11 +693,20 @@ blk_init_queue_node(request_fn_proc *rfn, spinlock_t *lock, int node_id) ...@@ -693,11 +693,20 @@ blk_init_queue_node(request_fn_proc *rfn, spinlock_t *lock, int node_id)
if (!uninit_q) if (!uninit_q)
return NULL; return NULL;
uninit_q->flush_rq = kzalloc(sizeof(struct request), GFP_KERNEL);
if (!uninit_q->flush_rq)
goto out_cleanup_queue;
q = blk_init_allocated_queue(uninit_q, rfn, lock); q = blk_init_allocated_queue(uninit_q, rfn, lock);
if (!q) if (!q)
blk_cleanup_queue(uninit_q); goto out_free_flush_rq;
return q; return q;
out_free_flush_rq:
kfree(uninit_q->flush_rq);
out_cleanup_queue:
blk_cleanup_queue(uninit_q);
return NULL;
} }
EXPORT_SYMBOL(blk_init_queue_node); EXPORT_SYMBOL(blk_init_queue_node);
...@@ -1127,7 +1136,7 @@ static struct request *blk_old_get_request(struct request_queue *q, int rw, ...@@ -1127,7 +1136,7 @@ static struct request *blk_old_get_request(struct request_queue *q, int rw,
struct request *blk_get_request(struct request_queue *q, int rw, gfp_t gfp_mask) struct request *blk_get_request(struct request_queue *q, int rw, gfp_t gfp_mask)
{ {
if (q->mq_ops) if (q->mq_ops)
return blk_mq_alloc_request(q, rw, gfp_mask, false); return blk_mq_alloc_request(q, rw, gfp_mask);
else else
return blk_old_get_request(q, rw, gfp_mask); return blk_old_get_request(q, rw, gfp_mask);
} }
......
...@@ -130,20 +130,26 @@ static void blk_flush_restore_request(struct request *rq) ...@@ -130,20 +130,26 @@ static void blk_flush_restore_request(struct request *rq)
blk_clear_rq_complete(rq); blk_clear_rq_complete(rq);
} }
static void mq_flush_data_run(struct work_struct *work) static void mq_flush_run(struct work_struct *work)
{ {
struct request *rq; struct request *rq;
rq = container_of(work, struct request, mq_flush_data); rq = container_of(work, struct request, mq_flush_work);
memset(&rq->csd, 0, sizeof(rq->csd)); memset(&rq->csd, 0, sizeof(rq->csd));
blk_mq_run_request(rq, true, false); blk_mq_run_request(rq, true, false);
} }
static void blk_mq_flush_data_insert(struct request *rq) static bool blk_flush_queue_rq(struct request *rq)
{ {
INIT_WORK(&rq->mq_flush_data, mq_flush_data_run); if (rq->q->mq_ops) {
kblockd_schedule_work(rq->q, &rq->mq_flush_data); INIT_WORK(&rq->mq_flush_work, mq_flush_run);
kblockd_schedule_work(rq->q, &rq->mq_flush_work);
return false;
} else {
list_add_tail(&rq->queuelist, &rq->q->queue_head);
return true;
}
} }
/** /**
...@@ -187,12 +193,7 @@ static bool blk_flush_complete_seq(struct request *rq, unsigned int seq, ...@@ -187,12 +193,7 @@ static bool blk_flush_complete_seq(struct request *rq, unsigned int seq,
case REQ_FSEQ_DATA: case REQ_FSEQ_DATA:
list_move_tail(&rq->flush.list, &q->flush_data_in_flight); list_move_tail(&rq->flush.list, &q->flush_data_in_flight);
if (q->mq_ops) queued = blk_flush_queue_rq(rq);
blk_mq_flush_data_insert(rq);
else {
list_add(&rq->queuelist, &q->queue_head);
queued = true;
}
break; break;
case REQ_FSEQ_DONE: case REQ_FSEQ_DONE:
...@@ -216,9 +217,6 @@ static bool blk_flush_complete_seq(struct request *rq, unsigned int seq, ...@@ -216,9 +217,6 @@ static bool blk_flush_complete_seq(struct request *rq, unsigned int seq,
} }
kicked = blk_kick_flush(q); kicked = blk_kick_flush(q);
/* blk_mq_run_flush will run queue */
if (q->mq_ops)
return queued;
return kicked | queued; return kicked | queued;
} }
...@@ -230,10 +228,9 @@ static void flush_end_io(struct request *flush_rq, int error) ...@@ -230,10 +228,9 @@ static void flush_end_io(struct request *flush_rq, int error)
struct request *rq, *n; struct request *rq, *n;
unsigned long flags = 0; unsigned long flags = 0;
if (q->mq_ops) { if (q->mq_ops)
blk_mq_free_request(flush_rq);
spin_lock_irqsave(&q->mq_flush_lock, flags); spin_lock_irqsave(&q->mq_flush_lock, flags);
}
running = &q->flush_queue[q->flush_running_idx]; running = &q->flush_queue[q->flush_running_idx];
BUG_ON(q->flush_pending_idx == q->flush_running_idx); BUG_ON(q->flush_pending_idx == q->flush_running_idx);
...@@ -263,48 +260,14 @@ static void flush_end_io(struct request *flush_rq, int error) ...@@ -263,48 +260,14 @@ static void flush_end_io(struct request *flush_rq, int error)
* kblockd. * kblockd.
*/ */
if (queued || q->flush_queue_delayed) { if (queued || q->flush_queue_delayed) {
if (!q->mq_ops) WARN_ON(q->mq_ops);
blk_run_queue_async(q); blk_run_queue_async(q);
else
/*
* This can be optimized to only run queues with requests
* queued if necessary.
*/
blk_mq_run_queues(q, true);
} }
q->flush_queue_delayed = 0; q->flush_queue_delayed = 0;
if (q->mq_ops) if (q->mq_ops)
spin_unlock_irqrestore(&q->mq_flush_lock, flags); spin_unlock_irqrestore(&q->mq_flush_lock, flags);
} }
static void mq_flush_work(struct work_struct *work)
{
struct request_queue *q;
struct request *rq;
q = container_of(work, struct request_queue, mq_flush_work);
rq = blk_mq_alloc_request(q, WRITE_FLUSH|REQ_FLUSH_SEQ,
__GFP_WAIT|GFP_ATOMIC, false);
rq->cmd_type = REQ_TYPE_FS;
rq->end_io = flush_end_io;
blk_mq_run_request(rq, true, false);
}
/*
* We can't directly use q->flush_rq, because it doesn't have tag and is not in
* hctx->rqs[]. so we must allocate a new request, since we can't sleep here,
* so offload the work to workqueue.
*
* Note: we assume a flush request finished in any hardware queue will flush
* the whole disk cache.
*/
static void mq_run_flush(struct request_queue *q)
{
kblockd_schedule_work(q, &q->mq_flush_work);
}
/** /**
* blk_kick_flush - consider issuing flush request * blk_kick_flush - consider issuing flush request
* @q: request_queue being kicked * @q: request_queue being kicked
...@@ -339,19 +302,31 @@ static bool blk_kick_flush(struct request_queue *q) ...@@ -339,19 +302,31 @@ static bool blk_kick_flush(struct request_queue *q)
* different from running_idx, which means flush is in flight. * different from running_idx, which means flush is in flight.
*/ */
q->flush_pending_idx ^= 1; q->flush_pending_idx ^= 1;
if (q->mq_ops) { if (q->mq_ops) {
mq_run_flush(q); struct blk_mq_ctx *ctx = first_rq->mq_ctx;
return true; struct blk_mq_hw_ctx *hctx = q->mq_ops->map_queue(q, ctx->cpu);
blk_mq_rq_init(hctx, q->flush_rq);
q->flush_rq->mq_ctx = ctx;
/*
* Reuse the tag value from the fist waiting request,
* with blk-mq the tag is generated during request
* allocation and drivers can rely on it being inside
* the range they asked for.
*/
q->flush_rq->tag = first_rq->tag;
} else {
blk_rq_init(q, q->flush_rq);
} }
blk_rq_init(q, &q->flush_rq); q->flush_rq->cmd_type = REQ_TYPE_FS;
q->flush_rq.cmd_type = REQ_TYPE_FS; q->flush_rq->cmd_flags = WRITE_FLUSH | REQ_FLUSH_SEQ;
q->flush_rq.cmd_flags = WRITE_FLUSH | REQ_FLUSH_SEQ; q->flush_rq->rq_disk = first_rq->rq_disk;
q->flush_rq.rq_disk = first_rq->rq_disk; q->flush_rq->end_io = flush_end_io;
q->flush_rq.end_io = flush_end_io;
list_add_tail(&q->flush_rq.queuelist, &q->queue_head); return blk_flush_queue_rq(q->flush_rq);
return true;
} }
static void flush_data_end_io(struct request *rq, int error) static void flush_data_end_io(struct request *rq, int error)
...@@ -407,10 +382,7 @@ void blk_insert_flush(struct request *rq) ...@@ -407,10 +382,7 @@ void blk_insert_flush(struct request *rq)
/* /*
* @policy now records what operations need to be done. Adjust * @policy now records what operations need to be done. Adjust
* REQ_FLUSH and FUA for the driver. * REQ_FLUSH and FUA for the driver.
* We keep REQ_FLUSH for mq to track flush requests. For !FUA,
* we never dispatch the request directly.
*/ */
if (rq->cmd_flags & REQ_FUA)
rq->cmd_flags &= ~REQ_FLUSH; rq->cmd_flags &= ~REQ_FLUSH;
if (!(fflags & REQ_FUA)) if (!(fflags & REQ_FUA))
rq->cmd_flags &= ~REQ_FUA; rq->cmd_flags &= ~REQ_FUA;
...@@ -560,5 +532,4 @@ EXPORT_SYMBOL(blkdev_issue_flush); ...@@ -560,5 +532,4 @@ EXPORT_SYMBOL(blkdev_issue_flush);
void blk_mq_init_flush(struct request_queue *q) void blk_mq_init_flush(struct request_queue *q)
{ {
spin_lock_init(&q->mq_flush_lock); spin_lock_init(&q->mq_flush_lock);
INIT_WORK(&q->mq_flush_work, mq_flush_work);
} }
...@@ -194,27 +194,9 @@ static void blk_mq_rq_ctx_init(struct request_queue *q, struct blk_mq_ctx *ctx, ...@@ -194,27 +194,9 @@ static void blk_mq_rq_ctx_init(struct request_queue *q, struct blk_mq_ctx *ctx,
} }
static struct request *__blk_mq_alloc_request(struct blk_mq_hw_ctx *hctx, static struct request *__blk_mq_alloc_request(struct blk_mq_hw_ctx *hctx,
gfp_t gfp, bool reserved, gfp_t gfp, bool reserved)
int rw)
{ {
struct request *req; return blk_mq_alloc_rq(hctx, gfp, reserved);
bool is_flush = false;
/*
* flush need allocate a request, leave at least one request for
* non-flush IO to avoid deadlock
*/
if ((rw & REQ_FLUSH) && !(rw & REQ_FLUSH_SEQ)) {
if (atomic_inc_return(&hctx->pending_flush) >=
hctx->queue_depth - hctx->reserved_tags - 1) {
atomic_dec(&hctx->pending_flush);
return NULL;
}
is_flush = true;
}
req = blk_mq_alloc_rq(hctx, gfp, reserved);
if (!req && is_flush)
atomic_dec(&hctx->pending_flush);
return req;
} }
static struct request *blk_mq_alloc_request_pinned(struct request_queue *q, static struct request *blk_mq_alloc_request_pinned(struct request_queue *q,
...@@ -227,7 +209,7 @@ static struct request *blk_mq_alloc_request_pinned(struct request_queue *q, ...@@ -227,7 +209,7 @@ static struct request *blk_mq_alloc_request_pinned(struct request_queue *q,
struct blk_mq_ctx *ctx = blk_mq_get_ctx(q); struct blk_mq_ctx *ctx = blk_mq_get_ctx(q);
struct blk_mq_hw_ctx *hctx = q->mq_ops->map_queue(q, ctx->cpu); struct blk_mq_hw_ctx *hctx = q->mq_ops->map_queue(q, ctx->cpu);
rq = __blk_mq_alloc_request(hctx, gfp & ~__GFP_WAIT, reserved, rw); rq = __blk_mq_alloc_request(hctx, gfp & ~__GFP_WAIT, reserved);
if (rq) { if (rq) {
blk_mq_rq_ctx_init(q, ctx, rq, rw); blk_mq_rq_ctx_init(q, ctx, rq, rw);
break; break;
...@@ -244,15 +226,14 @@ static struct request *blk_mq_alloc_request_pinned(struct request_queue *q, ...@@ -244,15 +226,14 @@ static struct request *blk_mq_alloc_request_pinned(struct request_queue *q,
return rq; return rq;
} }
struct request *blk_mq_alloc_request(struct request_queue *q, int rw, struct request *blk_mq_alloc_request(struct request_queue *q, int rw, gfp_t gfp)
gfp_t gfp, bool reserved)
{ {
struct request *rq; struct request *rq;
if (blk_mq_queue_enter(q)) if (blk_mq_queue_enter(q))
return NULL; return NULL;
rq = blk_mq_alloc_request_pinned(q, rw, gfp, reserved); rq = blk_mq_alloc_request_pinned(q, rw, gfp, false);
if (rq) if (rq)
blk_mq_put_ctx(rq->mq_ctx); blk_mq_put_ctx(rq->mq_ctx);
return rq; return rq;
...@@ -276,7 +257,7 @@ EXPORT_SYMBOL(blk_mq_alloc_reserved_request); ...@@ -276,7 +257,7 @@ EXPORT_SYMBOL(blk_mq_alloc_reserved_request);
/* /*
* Re-init and set pdu, if we have it * Re-init and set pdu, if we have it
*/ */
static void blk_mq_rq_init(struct blk_mq_hw_ctx *hctx, struct request *rq) void blk_mq_rq_init(struct blk_mq_hw_ctx *hctx, struct request *rq)
{ {
blk_rq_init(hctx->queue, rq); blk_rq_init(hctx->queue, rq);
...@@ -290,9 +271,6 @@ static void __blk_mq_free_request(struct blk_mq_hw_ctx *hctx, ...@@ -290,9 +271,6 @@ static void __blk_mq_free_request(struct blk_mq_hw_ctx *hctx,
const int tag = rq->tag; const int tag = rq->tag;
struct request_queue *q = rq->q; struct request_queue *q = rq->q;
if ((rq->cmd_flags & REQ_FLUSH) && !(rq->cmd_flags & REQ_FLUSH_SEQ))
atomic_dec(&hctx->pending_flush);
blk_mq_rq_init(hctx, rq); blk_mq_rq_init(hctx, rq);
blk_mq_put_tag(hctx->tags, tag); blk_mq_put_tag(hctx->tags, tag);
...@@ -946,14 +924,14 @@ static void blk_mq_make_request(struct request_queue *q, struct bio *bio) ...@@ -946,14 +924,14 @@ static void blk_mq_make_request(struct request_queue *q, struct bio *bio)
hctx = q->mq_ops->map_queue(q, ctx->cpu); hctx = q->mq_ops->map_queue(q, ctx->cpu);
trace_block_getrq(q, bio, rw); trace_block_getrq(q, bio, rw);
rq = __blk_mq_alloc_request(hctx, GFP_ATOMIC, false, bio->bi_rw); rq = __blk_mq_alloc_request(hctx, GFP_ATOMIC, false);
if (likely(rq)) if (likely(rq))
blk_mq_rq_ctx_init(q, ctx, rq, bio->bi_rw); blk_mq_rq_ctx_init(q, ctx, rq, rw);
else { else {
blk_mq_put_ctx(ctx); blk_mq_put_ctx(ctx);
trace_block_sleeprq(q, bio, rw); trace_block_sleeprq(q, bio, rw);
rq = blk_mq_alloc_request_pinned(q, bio->bi_rw, rq = blk_mq_alloc_request_pinned(q, rw, __GFP_WAIT|GFP_ATOMIC,
__GFP_WAIT|GFP_ATOMIC, false); false);
ctx = rq->mq_ctx; ctx = rq->mq_ctx;
hctx = q->mq_ops->map_queue(q, ctx->cpu); hctx = q->mq_ops->map_queue(q, ctx->cpu);
} }
...@@ -1230,9 +1208,7 @@ static int blk_mq_init_hw_queues(struct request_queue *q, ...@@ -1230,9 +1208,7 @@ static int blk_mq_init_hw_queues(struct request_queue *q,
hctx->queue_num = i; hctx->queue_num = i;
hctx->flags = reg->flags; hctx->flags = reg->flags;
hctx->queue_depth = reg->queue_depth; hctx->queue_depth = reg->queue_depth;
hctx->reserved_tags = reg->reserved_tags;
hctx->cmd_size = reg->cmd_size; hctx->cmd_size = reg->cmd_size;
atomic_set(&hctx->pending_flush, 0);
blk_mq_init_cpu_notifier(&hctx->cpu_notifier, blk_mq_init_cpu_notifier(&hctx->cpu_notifier,
blk_mq_hctx_notify, hctx); blk_mq_hctx_notify, hctx);
...@@ -1412,9 +1388,14 @@ struct request_queue *blk_mq_init_queue(struct blk_mq_reg *reg, ...@@ -1412,9 +1388,14 @@ struct request_queue *blk_mq_init_queue(struct blk_mq_reg *reg,
blk_mq_init_flush(q); blk_mq_init_flush(q);
blk_mq_init_cpu_queues(q, reg->nr_hw_queues); blk_mq_init_cpu_queues(q, reg->nr_hw_queues);
if (blk_mq_init_hw_queues(q, reg, driver_data)) q->flush_rq = kzalloc(round_up(sizeof(struct request) + reg->cmd_size,
cache_line_size()), GFP_KERNEL);
if (!q->flush_rq)
goto err_hw; goto err_hw;
if (blk_mq_init_hw_queues(q, reg, driver_data))
goto err_flush_rq;
blk_mq_map_swqueue(q); blk_mq_map_swqueue(q);
mutex_lock(&all_q_mutex); mutex_lock(&all_q_mutex);
...@@ -1422,6 +1403,9 @@ struct request_queue *blk_mq_init_queue(struct blk_mq_reg *reg, ...@@ -1422,6 +1403,9 @@ struct request_queue *blk_mq_init_queue(struct blk_mq_reg *reg,
mutex_unlock(&all_q_mutex); mutex_unlock(&all_q_mutex);
return q; return q;
err_flush_rq:
kfree(q->flush_rq);
err_hw: err_hw:
kfree(q->mq_map); kfree(q->mq_map);
err_map: err_map:
......
...@@ -28,6 +28,7 @@ void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async); ...@@ -28,6 +28,7 @@ void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async);
void blk_mq_init_flush(struct request_queue *q); void blk_mq_init_flush(struct request_queue *q);
void blk_mq_drain_queue(struct request_queue *q); void blk_mq_drain_queue(struct request_queue *q);
void blk_mq_free_queue(struct request_queue *q); void blk_mq_free_queue(struct request_queue *q);
void blk_mq_rq_init(struct blk_mq_hw_ctx *hctx, struct request *rq);
/* /*
* CPU hotplug helpers * CPU hotplug helpers
......
...@@ -549,6 +549,8 @@ static void blk_release_queue(struct kobject *kobj) ...@@ -549,6 +549,8 @@ static void blk_release_queue(struct kobject *kobj)
if (q->mq_ops) if (q->mq_ops)
blk_mq_free_queue(q); blk_mq_free_queue(q);
kfree(q->flush_rq);
blk_trace_shutdown(q); blk_trace_shutdown(q);
bdi_destroy(&q->backing_dev_info); bdi_destroy(&q->backing_dev_info);
......
...@@ -36,15 +36,12 @@ struct blk_mq_hw_ctx { ...@@ -36,15 +36,12 @@ struct blk_mq_hw_ctx {
struct list_head page_list; struct list_head page_list;
struct blk_mq_tags *tags; struct blk_mq_tags *tags;
atomic_t pending_flush;
unsigned long queued; unsigned long queued;
unsigned long run; unsigned long run;
#define BLK_MQ_MAX_DISPATCH_ORDER 10 #define BLK_MQ_MAX_DISPATCH_ORDER 10
unsigned long dispatched[BLK_MQ_MAX_DISPATCH_ORDER]; unsigned long dispatched[BLK_MQ_MAX_DISPATCH_ORDER];
unsigned int queue_depth; unsigned int queue_depth;
unsigned int reserved_tags;
unsigned int numa_node; unsigned int numa_node;
unsigned int cmd_size; /* per-request extra data */ unsigned int cmd_size; /* per-request extra data */
...@@ -129,7 +126,7 @@ void blk_mq_insert_request(struct request_queue *, struct request *, ...@@ -129,7 +126,7 @@ void blk_mq_insert_request(struct request_queue *, struct request *,
void blk_mq_run_queues(struct request_queue *q, bool async); void blk_mq_run_queues(struct request_queue *q, bool async);
void blk_mq_free_request(struct request *rq); void blk_mq_free_request(struct request *rq);
bool blk_mq_can_queue(struct blk_mq_hw_ctx *); bool blk_mq_can_queue(struct blk_mq_hw_ctx *);
struct request *blk_mq_alloc_request(struct request_queue *q, int rw, gfp_t gfp, bool reserved); struct request *blk_mq_alloc_request(struct request_queue *q, int rw, gfp_t gfp);
struct request *blk_mq_alloc_reserved_request(struct request_queue *q, int rw, gfp_t gfp); struct request *blk_mq_alloc_reserved_request(struct request_queue *q, int rw, gfp_t gfp);
struct request *blk_mq_rq_from_tag(struct request_queue *q, unsigned int tag); struct request *blk_mq_rq_from_tag(struct request_queue *q, unsigned int tag);
......
...@@ -101,7 +101,7 @@ struct request { ...@@ -101,7 +101,7 @@ struct request {
}; };
union { union {
struct call_single_data csd; struct call_single_data csd;
struct work_struct mq_flush_data; struct work_struct mq_flush_work;
}; };
struct request_queue *q; struct request_queue *q;
...@@ -451,13 +451,8 @@ struct request_queue { ...@@ -451,13 +451,8 @@ struct request_queue {
unsigned long flush_pending_since; unsigned long flush_pending_since;
struct list_head flush_queue[2]; struct list_head flush_queue[2];
struct list_head flush_data_in_flight; struct list_head flush_data_in_flight;
union { struct request *flush_rq;
struct request flush_rq;
struct {
spinlock_t mq_flush_lock; spinlock_t mq_flush_lock;
struct work_struct mq_flush_work;
};
};
struct mutex sysfs_lock; struct mutex sysfs_lock;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment