Commit 3e03f632 authored by Igor Konopko's avatar Igor Konopko Committed by Jens Axboe

lightnvm: pblk: IO path reorganization

This patch is made in order to prepare read path for new approach to
partial read handling, which is simpler in compare with previous one.

The most important change is to move the handling of completed and
failed bio from the pblk_make_rq() to particular read and write
functions. This is needed, since after partial read path changes,
sometimes completed/failed bio will be different from original one, so
we cannot do this any longer in pblk_make_rq().

Other changes are small read path refactor in order to reduce the size
of the following patch with partial read changes.

Generally the goal of this patch is not to change the functionality,
but just to prepare the code for the following changes.
Signed-off-by: default avatarIgor Konopko <igor.j.konopko@intel.com>
Reviewed-by: default avatarJavier González <javier@javigon.com>
Signed-off-by: default avatarMatias Bjørling <mb@lightnvm.io>
Signed-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent f2e02457
......@@ -18,7 +18,8 @@
#include "pblk.h"
int pblk_write_to_cache(struct pblk *pblk, struct bio *bio, unsigned long flags)
void pblk_write_to_cache(struct pblk *pblk, struct bio *bio,
unsigned long flags)
{
struct request_queue *q = pblk->dev->q;
struct pblk_w_ctx w_ctx;
......@@ -43,6 +44,7 @@ int pblk_write_to_cache(struct pblk *pblk, struct bio *bio, unsigned long flags)
goto retry;
case NVM_IO_ERR:
pblk_pipeline_stop(pblk);
bio_io_error(bio);
goto out;
}
......@@ -79,7 +81,9 @@ int pblk_write_to_cache(struct pblk *pblk, struct bio *bio, unsigned long flags)
out:
generic_end_io_acct(q, REQ_OP_WRITE, &pblk->disk->part0, start_time);
pblk_write_should_kick(pblk);
return ret;
if (ret == NVM_IO_DONE)
bio_endio(bio);
}
/*
......
......@@ -50,7 +50,6 @@ struct bio_set pblk_bio_set;
static blk_qc_t pblk_make_rq(struct request_queue *q, struct bio *bio)
{
struct pblk *pblk = q->queuedata;
int ret;
if (bio_op(bio) == REQ_OP_DISCARD) {
pblk_discard(pblk, bio);
......@@ -65,7 +64,7 @@ static blk_qc_t pblk_make_rq(struct request_queue *q, struct bio *bio)
*/
if (bio_data_dir(bio) == READ) {
blk_queue_split(q, &bio);
ret = pblk_submit_read(pblk, bio);
pblk_submit_read(pblk, bio);
} else {
/* Prevent deadlock in the case of a modest LUN configuration
* and large user I/Os. Unless stalled, the rate limiter
......@@ -74,16 +73,7 @@ static blk_qc_t pblk_make_rq(struct request_queue *q, struct bio *bio)
if (pblk_get_secs(bio) > pblk_rl_max_io(&pblk->rl))
blk_queue_split(q, &bio);
ret = pblk_write_to_cache(pblk, bio, PBLK_IOTYPE_USER);
}
switch (ret) {
case NVM_IO_ERR:
bio_io_error(bio);
break;
case NVM_IO_DONE:
bio_endio(bio);
break;
pblk_write_to_cache(pblk, bio, PBLK_IOTYPE_USER);
}
return BLK_QC_T_NONE;
......
......@@ -179,6 +179,7 @@ static void pblk_end_user_read(struct bio *bio, int error)
{
if (error && error != NVM_RSP_WARN_HIGHECC)
bio_io_error(bio);
else
bio_endio(bio);
}
......@@ -389,7 +390,6 @@ static int pblk_partial_read_bio(struct pblk *pblk, struct nvm_rq *rqd,
/* Free allocated pages in new bio */
pblk_bio_free_pages(pblk, rqd->bio, 0, rqd->bio->bi_vcnt);
__pblk_end_io_read(pblk, rqd, false);
return NVM_IO_ERR;
}
......@@ -434,7 +434,7 @@ static void pblk_read_rq(struct pblk *pblk, struct nvm_rq *rqd, struct bio *bio,
}
}
int pblk_submit_read(struct pblk *pblk, struct bio *bio)
void pblk_submit_read(struct pblk *pblk, struct bio *bio)
{
struct nvm_tgt_dev *dev = pblk->dev;
struct request_queue *q = dev->q;
......@@ -442,9 +442,9 @@ int pblk_submit_read(struct pblk *pblk, struct bio *bio)
unsigned int nr_secs = pblk_get_secs(bio);
struct pblk_g_ctx *r_ctx;
struct nvm_rq *rqd;
struct bio *int_bio;
unsigned int bio_init_idx;
DECLARE_BITMAP(read_bitmap, NVM_MAX_VLBA);
int ret = NVM_IO_ERR;
generic_start_io_acct(q, REQ_OP_READ, bio_sectors(bio),
&pblk->disk->part0);
......@@ -455,74 +455,67 @@ int pblk_submit_read(struct pblk *pblk, struct bio *bio)
rqd->opcode = NVM_OP_PREAD;
rqd->nr_ppas = nr_secs;
rqd->bio = NULL; /* cloned bio if needed */
rqd->private = pblk;
rqd->end_io = pblk_end_io_read;
r_ctx = nvm_rq_to_pdu(rqd);
r_ctx->start_time = jiffies;
r_ctx->lba = blba;
r_ctx->private = bio; /* original bio */
/* Save the index for this bio's start. This is needed in case
* we need to fill a partial read.
*/
bio_init_idx = pblk_get_bi_idx(bio);
if (pblk_alloc_rqd_meta(pblk, rqd))
goto fail_rqd_free;
if (pblk_alloc_rqd_meta(pblk, rqd)) {
bio_io_error(bio);
pblk_free_rqd(pblk, rqd, PBLK_READ);
return;
}
/* Clone read bio to deal internally with:
* -read errors when reading from drive
* -bio_advance() calls during l2p lookup and cache reads
*/
int_bio = bio_clone_fast(bio, GFP_KERNEL, &pblk_bio_set);
if (nr_secs > 1)
pblk_read_ppalist_rq(pblk, rqd, bio, blba, read_bitmap);
else
pblk_read_rq(pblk, rqd, bio, blba, read_bitmap);
r_ctx->private = bio; /* original bio */
rqd->bio = int_bio; /* internal bio */
if (bitmap_full(read_bitmap, nr_secs)) {
pblk_end_user_read(bio, 0);
atomic_inc(&pblk->inflight_io);
__pblk_end_io_read(pblk, rqd, false);
return NVM_IO_DONE;
}
/* All sectors are to be read from the device */
if (bitmap_empty(read_bitmap, rqd->nr_ppas)) {
struct bio *int_bio = NULL;
/* Clone read bio to deal with read errors internally */
int_bio = bio_clone_fast(bio, GFP_KERNEL, &pblk_bio_set);
if (!int_bio) {
pblk_err(pblk, "could not clone read bio\n");
goto fail_end_io;
return;
}
rqd->bio = int_bio;
if (pblk_submit_io(pblk, rqd)) {
if (!bitmap_empty(read_bitmap, rqd->nr_ppas)) {
/* The read bio request could be partially filled by the write
* buffer, but there are some holes that need to be read from
* the drive.
*/
bio_put(int_bio);
rqd->bio = NULL;
if (pblk_partial_read_bio(pblk, rqd, bio_init_idx, read_bitmap,
nr_secs)) {
pblk_err(pblk, "read IO submission failed\n");
ret = NVM_IO_ERR;
goto fail_end_io;
bio_io_error(bio);
__pblk_end_io_read(pblk, rqd, false);
}
return NVM_IO_OK;
return;
}
/* The read bio request could be partially filled by the write buffer,
* but there are some holes that need to be read from the drive.
*/
ret = pblk_partial_read_bio(pblk, rqd, bio_init_idx, read_bitmap,
nr_secs);
if (ret)
goto fail_meta_free;
return NVM_IO_OK;
fail_meta_free:
nvm_dev_dma_free(dev->parent, rqd->meta_list, rqd->dma_meta_list);
fail_rqd_free:
pblk_free_rqd(pblk, rqd, PBLK_READ);
return ret;
fail_end_io:
/* All sectors are to be read from the device */
if (pblk_submit_io(pblk, rqd)) {
pblk_err(pblk, "read IO submission failed\n");
bio_io_error(bio);
__pblk_end_io_read(pblk, rqd, false);
return ret;
}
}
static int read_ppalist_rq_gc(struct pblk *pblk, struct nvm_rq *rqd,
......
......@@ -867,7 +867,7 @@ void pblk_get_packed_meta(struct pblk *pblk, struct nvm_rq *rqd);
/*
* pblk user I/O write path
*/
int pblk_write_to_cache(struct pblk *pblk, struct bio *bio,
void pblk_write_to_cache(struct pblk *pblk, struct bio *bio,
unsigned long flags);
int pblk_write_gc_to_cache(struct pblk *pblk, struct pblk_gc_rq *gc_rq);
......@@ -893,7 +893,7 @@ void pblk_write_kick(struct pblk *pblk);
* pblk read path
*/
extern struct bio_set pblk_bio_set;
int pblk_submit_read(struct pblk *pblk, struct bio *bio);
void pblk_submit_read(struct pblk *pblk, struct bio *bio);
int pblk_submit_read_gc(struct pblk *pblk, struct pblk_gc_rq *gc_rq);
/*
* pblk recovery
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment