Commit 47c122e3 authored by Jens Axboe's avatar Jens Axboe

block: pre-allocate requests if plug is started and is a batch

The caller typically has a good (or even exact) idea of how many requests
it needs to submit. We can make the request/tag allocation a lot more
efficient if we just allocate N requests/tags upfront when we queue the
first bio from the batch.

Provide a new plug start helper that allows the caller to specify how many
IOs are expected. This sets plug->nr_ios, and we can use that for smarter
request allocation. The plug provides a holding spot for requests, and
request allocation will check it before calling into the normal request
allocation path.

The blk_finish_plug() is called, check if there are unused requests and
free them. This should not happen in normal operations. The exception is
if we get merging, then we may be left with requests that need freeing
when done.

This raises the per-core performance on my setup from ~5.8M to ~6.1M
IOPS.
Signed-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent ba0ffdd8
...@@ -1632,6 +1632,31 @@ int kblockd_mod_delayed_work_on(int cpu, struct delayed_work *dwork, ...@@ -1632,6 +1632,31 @@ int kblockd_mod_delayed_work_on(int cpu, struct delayed_work *dwork,
} }
EXPORT_SYMBOL(kblockd_mod_delayed_work_on); EXPORT_SYMBOL(kblockd_mod_delayed_work_on);
void blk_start_plug_nr_ios(struct blk_plug *plug, unsigned short nr_ios)
{
struct task_struct *tsk = current;
/*
* If this is a nested plug, don't actually assign it.
*/
if (tsk->plug)
return;
INIT_LIST_HEAD(&plug->mq_list);
plug->cached_rq = NULL;
plug->nr_ios = min_t(unsigned short, nr_ios, BLK_MAX_REQUEST_COUNT);
plug->rq_count = 0;
plug->multiple_queues = false;
plug->nowait = false;
INIT_LIST_HEAD(&plug->cb_list);
/*
* Store ordering should not be needed here, since a potential
* preempt will imply a full memory barrier
*/
tsk->plug = plug;
}
/** /**
* blk_start_plug - initialize blk_plug and track it inside the task_struct * blk_start_plug - initialize blk_plug and track it inside the task_struct
* @plug: The &struct blk_plug that needs to be initialized * @plug: The &struct blk_plug that needs to be initialized
...@@ -1657,25 +1682,7 @@ EXPORT_SYMBOL(kblockd_mod_delayed_work_on); ...@@ -1657,25 +1682,7 @@ EXPORT_SYMBOL(kblockd_mod_delayed_work_on);
*/ */
void blk_start_plug(struct blk_plug *plug) void blk_start_plug(struct blk_plug *plug)
{ {
struct task_struct *tsk = current; blk_start_plug_nr_ios(plug, 1);
/*
* If this is a nested plug, don't actually assign it.
*/
if (tsk->plug)
return;
INIT_LIST_HEAD(&plug->mq_list);
INIT_LIST_HEAD(&plug->cb_list);
plug->rq_count = 0;
plug->multiple_queues = false;
plug->nowait = false;
/*
* Store ordering should not be needed here, since a potential
* preempt will imply a full memory barrier
*/
tsk->plug = plug;
} }
EXPORT_SYMBOL(blk_start_plug); EXPORT_SYMBOL(blk_start_plug);
...@@ -1727,6 +1734,8 @@ void blk_flush_plug_list(struct blk_plug *plug, bool from_schedule) ...@@ -1727,6 +1734,8 @@ void blk_flush_plug_list(struct blk_plug *plug, bool from_schedule)
if (!list_empty(&plug->mq_list)) if (!list_empty(&plug->mq_list))
blk_mq_flush_plug_list(plug, from_schedule); blk_mq_flush_plug_list(plug, from_schedule);
if (unlikely(!from_schedule && plug->cached_rq))
blk_mq_free_plug_rqs(plug);
} }
/** /**
......
...@@ -359,6 +359,7 @@ static struct request *__blk_mq_alloc_request(struct blk_mq_alloc_data *data) ...@@ -359,6 +359,7 @@ static struct request *__blk_mq_alloc_request(struct blk_mq_alloc_data *data)
struct request_queue *q = data->q; struct request_queue *q = data->q;
struct elevator_queue *e = q->elevator; struct elevator_queue *e = q->elevator;
u64 alloc_time_ns = 0; u64 alloc_time_ns = 0;
struct request *rq;
unsigned int tag; unsigned int tag;
/* alloc_time includes depth and tag waits */ /* alloc_time includes depth and tag waits */
...@@ -392,10 +393,21 @@ static struct request *__blk_mq_alloc_request(struct blk_mq_alloc_data *data) ...@@ -392,10 +393,21 @@ static struct request *__blk_mq_alloc_request(struct blk_mq_alloc_data *data)
* case just retry the hctx assignment and tag allocation as CPU hotplug * case just retry the hctx assignment and tag allocation as CPU hotplug
* should have migrated us to an online CPU by now. * should have migrated us to an online CPU by now.
*/ */
tag = blk_mq_get_tag(data); do {
if (tag == BLK_MQ_NO_TAG) { tag = blk_mq_get_tag(data);
if (tag != BLK_MQ_NO_TAG) {
rq = blk_mq_rq_ctx_init(data, tag, alloc_time_ns);
if (!--data->nr_tags)
return rq;
if (e || data->hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED)
return rq;
rq->rq_next = *data->cached_rq;
*data->cached_rq = rq;
data->flags |= BLK_MQ_REQ_NOWAIT;
continue;
}
if (data->flags & BLK_MQ_REQ_NOWAIT) if (data->flags & BLK_MQ_REQ_NOWAIT)
return NULL; break;
/* /*
* Give up the CPU and sleep for a random short time to ensure * Give up the CPU and sleep for a random short time to ensure
...@@ -404,8 +416,15 @@ static struct request *__blk_mq_alloc_request(struct blk_mq_alloc_data *data) ...@@ -404,8 +416,15 @@ static struct request *__blk_mq_alloc_request(struct blk_mq_alloc_data *data)
*/ */
msleep(3); msleep(3);
goto retry; goto retry;
} while (1);
if (data->cached_rq) {
rq = *data->cached_rq;
*data->cached_rq = rq->rq_next;
return rq;
} }
return blk_mq_rq_ctx_init(data, tag, alloc_time_ns);
return NULL;
} }
struct request *blk_mq_alloc_request(struct request_queue *q, unsigned int op, struct request *blk_mq_alloc_request(struct request_queue *q, unsigned int op,
...@@ -415,6 +434,7 @@ struct request *blk_mq_alloc_request(struct request_queue *q, unsigned int op, ...@@ -415,6 +434,7 @@ struct request *blk_mq_alloc_request(struct request_queue *q, unsigned int op,
.q = q, .q = q,
.flags = flags, .flags = flags,
.cmd_flags = op, .cmd_flags = op,
.nr_tags = 1,
}; };
struct request *rq; struct request *rq;
int ret; int ret;
...@@ -443,6 +463,7 @@ struct request *blk_mq_alloc_request_hctx(struct request_queue *q, ...@@ -443,6 +463,7 @@ struct request *blk_mq_alloc_request_hctx(struct request_queue *q,
.q = q, .q = q,
.flags = flags, .flags = flags,
.cmd_flags = op, .cmd_flags = op,
.nr_tags = 1,
}; };
u64 alloc_time_ns = 0; u64 alloc_time_ns = 0;
unsigned int cpu; unsigned int cpu;
...@@ -544,6 +565,18 @@ void blk_mq_free_request(struct request *rq) ...@@ -544,6 +565,18 @@ void blk_mq_free_request(struct request *rq)
} }
EXPORT_SYMBOL_GPL(blk_mq_free_request); EXPORT_SYMBOL_GPL(blk_mq_free_request);
void blk_mq_free_plug_rqs(struct blk_plug *plug)
{
while (plug->cached_rq) {
struct request *rq;
rq = plug->cached_rq;
plug->cached_rq = rq->rq_next;
percpu_ref_get(&rq->q->q_usage_counter);
blk_mq_free_request(rq);
}
}
inline void __blk_mq_end_request(struct request *rq, blk_status_t error) inline void __blk_mq_end_request(struct request *rq, blk_status_t error)
{ {
u64 now = 0; u64 now = 0;
...@@ -2185,6 +2218,7 @@ blk_qc_t blk_mq_submit_bio(struct bio *bio) ...@@ -2185,6 +2218,7 @@ blk_qc_t blk_mq_submit_bio(struct bio *bio)
const int is_flush_fua = op_is_flush(bio->bi_opf); const int is_flush_fua = op_is_flush(bio->bi_opf);
struct blk_mq_alloc_data data = { struct blk_mq_alloc_data data = {
.q = q, .q = q,
.nr_tags = 1,
}; };
struct request *rq; struct request *rq;
struct blk_plug *plug; struct blk_plug *plug;
...@@ -2211,13 +2245,26 @@ blk_qc_t blk_mq_submit_bio(struct bio *bio) ...@@ -2211,13 +2245,26 @@ blk_qc_t blk_mq_submit_bio(struct bio *bio)
hipri = bio->bi_opf & REQ_HIPRI; hipri = bio->bi_opf & REQ_HIPRI;
data.cmd_flags = bio->bi_opf; plug = blk_mq_plug(q, bio);
rq = __blk_mq_alloc_request(&data); if (plug && plug->cached_rq) {
if (unlikely(!rq)) { rq = plug->cached_rq;
rq_qos_cleanup(q, bio); plug->cached_rq = rq->rq_next;
if (bio->bi_opf & REQ_NOWAIT) INIT_LIST_HEAD(&rq->queuelist);
bio_wouldblock_error(bio); data.hctx = rq->mq_hctx;
goto queue_exit; } else {
data.cmd_flags = bio->bi_opf;
if (plug) {
data.nr_tags = plug->nr_ios;
plug->nr_ios = 1;
data.cached_rq = &plug->cached_rq;
}
rq = __blk_mq_alloc_request(&data);
if (unlikely(!rq)) {
rq_qos_cleanup(q, bio);
if (bio->bi_opf & REQ_NOWAIT)
bio_wouldblock_error(bio);
goto queue_exit;
}
} }
trace_block_getrq(bio); trace_block_getrq(bio);
...@@ -2236,7 +2283,6 @@ blk_qc_t blk_mq_submit_bio(struct bio *bio) ...@@ -2236,7 +2283,6 @@ blk_qc_t blk_mq_submit_bio(struct bio *bio)
return BLK_QC_T_NONE; return BLK_QC_T_NONE;
} }
plug = blk_mq_plug(q, bio);
if (unlikely(is_flush_fua)) { if (unlikely(is_flush_fua)) {
/* Bypass scheduler for flush requests */ /* Bypass scheduler for flush requests */
blk_insert_flush(rq); blk_insert_flush(rq);
......
...@@ -125,6 +125,7 @@ extern int __blk_mq_register_dev(struct device *dev, struct request_queue *q); ...@@ -125,6 +125,7 @@ extern int __blk_mq_register_dev(struct device *dev, struct request_queue *q);
extern int blk_mq_sysfs_register(struct request_queue *q); extern int blk_mq_sysfs_register(struct request_queue *q);
extern void blk_mq_sysfs_unregister(struct request_queue *q); extern void blk_mq_sysfs_unregister(struct request_queue *q);
extern void blk_mq_hctx_kobj_init(struct blk_mq_hw_ctx *hctx); extern void blk_mq_hctx_kobj_init(struct blk_mq_hw_ctx *hctx);
void blk_mq_free_plug_rqs(struct blk_plug *plug);
void blk_mq_release(struct request_queue *q); void blk_mq_release(struct request_queue *q);
...@@ -152,6 +153,10 @@ struct blk_mq_alloc_data { ...@@ -152,6 +153,10 @@ struct blk_mq_alloc_data {
unsigned int shallow_depth; unsigned int shallow_depth;
unsigned int cmd_flags; unsigned int cmd_flags;
/* allocate multiple requests/tags in one go */
unsigned int nr_tags;
struct request **cached_rq;
/* input & output parameter */ /* input & output parameter */
struct blk_mq_ctx *ctx; struct blk_mq_ctx *ctx;
struct blk_mq_hw_ctx *hctx; struct blk_mq_hw_ctx *hctx;
......
...@@ -90,7 +90,10 @@ struct request { ...@@ -90,7 +90,10 @@ struct request {
struct bio *bio; struct bio *bio;
struct bio *biotail; struct bio *biotail;
struct list_head queuelist; union {
struct list_head queuelist;
struct request *rq_next;
};
/* /*
* The hash is used inside the scheduler, and killed once the * The hash is used inside the scheduler, and killed once the
......
...@@ -722,10 +722,17 @@ extern void blk_set_queue_dying(struct request_queue *); ...@@ -722,10 +722,17 @@ extern void blk_set_queue_dying(struct request_queue *);
*/ */
struct blk_plug { struct blk_plug {
struct list_head mq_list; /* blk-mq requests */ struct list_head mq_list; /* blk-mq requests */
struct list_head cb_list; /* md requires an unplug callback */
/* if ios_left is > 1, we can batch tag/rq allocations */
struct request *cached_rq;
unsigned short nr_ios;
unsigned short rq_count; unsigned short rq_count;
bool multiple_queues; bool multiple_queues;
bool nowait; bool nowait;
struct list_head cb_list; /* md requires an unplug callback */
}; };
struct blk_plug_cb; struct blk_plug_cb;
...@@ -738,6 +745,7 @@ struct blk_plug_cb { ...@@ -738,6 +745,7 @@ struct blk_plug_cb {
extern struct blk_plug_cb *blk_check_plugged(blk_plug_cb_fn unplug, extern struct blk_plug_cb *blk_check_plugged(blk_plug_cb_fn unplug,
void *data, int size); void *data, int size);
extern void blk_start_plug(struct blk_plug *); extern void blk_start_plug(struct blk_plug *);
extern void blk_start_plug_nr_ios(struct blk_plug *, unsigned short);
extern void blk_finish_plug(struct blk_plug *); extern void blk_finish_plug(struct blk_plug *);
extern void blk_flush_plug_list(struct blk_plug *, bool); extern void blk_flush_plug_list(struct blk_plug *, bool);
...@@ -772,6 +780,11 @@ long nr_blockdev_pages(void); ...@@ -772,6 +780,11 @@ long nr_blockdev_pages(void);
struct blk_plug { struct blk_plug {
}; };
static inline void blk_start_plug_nr_ios(struct blk_plug *plug,
unsigned short nr_ios)
{
}
static inline void blk_start_plug(struct blk_plug *plug) static inline void blk_start_plug(struct blk_plug *plug)
{ {
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment