Commit 17912c49 authored by Javier González's avatar Javier González Committed by Jens Axboe

lightnvm: submit erases using the I/O path

Until now erases have been submitted as synchronous commands through a
dedicated erase function. In order to enable targets implementing
asynchronous erases, refactor the erase path so that it uses the normal
async I/O submission functions. If a target requires sync I/O, it can
implement it internally. Also, adapt rrpc to use the new erase path.
Signed-off-by: default avatarJavier González <javier@cnexlabs.com>
Fixed spelling error.
Signed-off-by: default avatarMatias Bjørling <matias@cnexlabs.com>
Signed-off-by: default avatarMatias Bjørling <matias@cnexlabs.com>
Signed-off-by: default avatarJens Axboe <axboe@fb.com>
parent 2849a7be
...@@ -590,11 +590,11 @@ int nvm_set_tgt_bb_tbl(struct nvm_tgt_dev *tgt_dev, struct ppa_addr *ppas, ...@@ -590,11 +590,11 @@ int nvm_set_tgt_bb_tbl(struct nvm_tgt_dev *tgt_dev, struct ppa_addr *ppas,
memset(&rqd, 0, sizeof(struct nvm_rq)); memset(&rqd, 0, sizeof(struct nvm_rq));
nvm_set_rqd_ppalist(dev, &rqd, ppas, nr_ppas, 1); nvm_set_rqd_ppalist(tgt_dev, &rqd, ppas, nr_ppas, 1);
nvm_rq_tgt_to_dev(tgt_dev, &rqd); nvm_rq_tgt_to_dev(tgt_dev, &rqd);
ret = dev->ops->set_bb_tbl(dev, &rqd.ppa_addr, rqd.nr_ppas, type); ret = dev->ops->set_bb_tbl(dev, &rqd.ppa_addr, rqd.nr_ppas, type);
nvm_free_rqd_ppalist(dev, &rqd); nvm_free_rqd_ppalist(tgt_dev, &rqd);
if (ret) { if (ret) {
pr_err("nvm: failed bb mark\n"); pr_err("nvm: failed bb mark\n");
return -EINVAL; return -EINVAL;
...@@ -626,34 +626,45 @@ int nvm_submit_io(struct nvm_tgt_dev *tgt_dev, struct nvm_rq *rqd) ...@@ -626,34 +626,45 @@ int nvm_submit_io(struct nvm_tgt_dev *tgt_dev, struct nvm_rq *rqd)
} }
EXPORT_SYMBOL(nvm_submit_io); EXPORT_SYMBOL(nvm_submit_io);
int nvm_erase_blk(struct nvm_tgt_dev *tgt_dev, struct ppa_addr *ppas, int flags) static void nvm_end_io_sync(struct nvm_rq *rqd)
{ {
struct nvm_dev *dev = tgt_dev->parent; struct completion *waiting = rqd->private;
struct nvm_rq rqd;
int ret;
if (!dev->ops->erase_block) complete(waiting);
return 0; }
nvm_map_to_dev(tgt_dev, ppas); int nvm_erase_sync(struct nvm_tgt_dev *tgt_dev, struct ppa_addr *ppas,
int nr_ppas)
{
struct nvm_geo *geo = &tgt_dev->geo;
struct nvm_rq rqd;
int ret;
DECLARE_COMPLETION_ONSTACK(wait);
memset(&rqd, 0, sizeof(struct nvm_rq)); memset(&rqd, 0, sizeof(struct nvm_rq));
ret = nvm_set_rqd_ppalist(dev, &rqd, ppas, 1, 1); rqd.opcode = NVM_OP_ERASE;
rqd.end_io = nvm_end_io_sync;
rqd.private = &wait;
rqd.flags = geo->plane_mode >> 1;
ret = nvm_set_rqd_ppalist(tgt_dev, &rqd, ppas, nr_ppas, 1);
if (ret) if (ret)
return ret; return ret;
nvm_rq_tgt_to_dev(tgt_dev, &rqd); ret = nvm_submit_io(tgt_dev, &rqd);
if (ret) {
rqd.flags = flags; pr_err("rrpr: erase I/O submission failed: %d\n", ret);
goto free_ppa_list;
ret = dev->ops->erase_block(dev, &rqd); }
wait_for_completion_io(&wait);
nvm_free_rqd_ppalist(dev, &rqd); free_ppa_list:
nvm_free_rqd_ppalist(tgt_dev, &rqd);
return ret; return ret;
} }
EXPORT_SYMBOL(nvm_erase_blk); EXPORT_SYMBOL(nvm_erase_sync);
int nvm_get_l2p_tbl(struct nvm_tgt_dev *tgt_dev, u64 slba, u32 nlb, int nvm_get_l2p_tbl(struct nvm_tgt_dev *tgt_dev, u64 slba, u32 nlb,
nvm_l2p_update_fn *update_l2p, void *priv) nvm_l2p_update_fn *update_l2p, void *priv)
...@@ -732,10 +743,11 @@ void nvm_put_area(struct nvm_tgt_dev *tgt_dev, sector_t begin) ...@@ -732,10 +743,11 @@ void nvm_put_area(struct nvm_tgt_dev *tgt_dev, sector_t begin)
} }
EXPORT_SYMBOL(nvm_put_area); EXPORT_SYMBOL(nvm_put_area);
int nvm_set_rqd_ppalist(struct nvm_dev *dev, struct nvm_rq *rqd, int nvm_set_rqd_ppalist(struct nvm_tgt_dev *tgt_dev, struct nvm_rq *rqd,
const struct ppa_addr *ppas, int nr_ppas, int vblk) const struct ppa_addr *ppas, int nr_ppas, int vblk)
{ {
struct nvm_geo *geo = &dev->geo; struct nvm_dev *dev = tgt_dev->parent;
struct nvm_geo *geo = &tgt_dev->geo;
int i, plane_cnt, pl_idx; int i, plane_cnt, pl_idx;
struct ppa_addr ppa; struct ppa_addr ppa;
...@@ -773,12 +785,12 @@ int nvm_set_rqd_ppalist(struct nvm_dev *dev, struct nvm_rq *rqd, ...@@ -773,12 +785,12 @@ int nvm_set_rqd_ppalist(struct nvm_dev *dev, struct nvm_rq *rqd,
} }
EXPORT_SYMBOL(nvm_set_rqd_ppalist); EXPORT_SYMBOL(nvm_set_rqd_ppalist);
void nvm_free_rqd_ppalist(struct nvm_dev *dev, struct nvm_rq *rqd) void nvm_free_rqd_ppalist(struct nvm_tgt_dev *tgt_dev, struct nvm_rq *rqd)
{ {
if (!rqd->ppa_list) if (!rqd->ppa_list)
return; return;
nvm_dev_dma_free(dev, rqd->ppa_list, rqd->dma_ppa_list); nvm_dev_dma_free(tgt_dev->parent, rqd->ppa_list, rqd->dma_ppa_list);
} }
EXPORT_SYMBOL(nvm_free_rqd_ppalist); EXPORT_SYMBOL(nvm_free_rqd_ppalist);
......
...@@ -414,7 +414,6 @@ static void rrpc_block_gc(struct work_struct *work) ...@@ -414,7 +414,6 @@ static void rrpc_block_gc(struct work_struct *work)
struct rrpc *rrpc = gcb->rrpc; struct rrpc *rrpc = gcb->rrpc;
struct rrpc_block *rblk = gcb->rblk; struct rrpc_block *rblk = gcb->rblk;
struct rrpc_lun *rlun = rblk->rlun; struct rrpc_lun *rlun = rblk->rlun;
struct nvm_tgt_dev *dev = rrpc->dev;
struct ppa_addr ppa; struct ppa_addr ppa;
mempool_free(gcb, rrpc->gcb_pool); mempool_free(gcb, rrpc->gcb_pool);
...@@ -430,7 +429,7 @@ static void rrpc_block_gc(struct work_struct *work) ...@@ -430,7 +429,7 @@ static void rrpc_block_gc(struct work_struct *work)
ppa.g.lun = rlun->bppa.g.lun; ppa.g.lun = rlun->bppa.g.lun;
ppa.g.blk = rblk->id; ppa.g.blk = rblk->id;
if (nvm_erase_blk(dev, &ppa, 0)) if (nvm_erase_sync(rrpc->dev, &ppa, 1))
goto put_back; goto put_back;
rrpc_put_blk(rrpc, rblk); rrpc_put_blk(rrpc, rblk);
......
...@@ -510,12 +510,16 @@ static int nvme_nvm_submit_io(struct nvm_dev *dev, struct nvm_rq *rqd) ...@@ -510,12 +510,16 @@ static int nvme_nvm_submit_io(struct nvm_dev *dev, struct nvm_rq *rqd)
} }
rq->cmd_flags &= ~REQ_FAILFAST_DRIVER; rq->cmd_flags &= ~REQ_FAILFAST_DRIVER;
rq->ioprio = bio_prio(bio); if (bio) {
if (bio_has_data(bio)) rq->ioprio = bio_prio(bio);
rq->nr_phys_segments = bio_phys_segments(q, bio); rq->__data_len = bio->bi_iter.bi_size;
rq->bio = rq->biotail = bio;
rq->__data_len = bio->bi_iter.bi_size; if (bio_has_data(bio))
rq->bio = rq->biotail = bio; rq->nr_phys_segments = bio_phys_segments(q, bio);
} else {
rq->ioprio = IOPRIO_PRIO_VALUE(IOPRIO_CLASS_BE, IOPRIO_NORM);
rq->__data_len = 0;
}
nvme_nvm_rqtocmd(rq, rqd, ns, cmd); nvme_nvm_rqtocmd(rq, rqd, ns, cmd);
...@@ -526,21 +530,6 @@ static int nvme_nvm_submit_io(struct nvm_dev *dev, struct nvm_rq *rqd) ...@@ -526,21 +530,6 @@ static int nvme_nvm_submit_io(struct nvm_dev *dev, struct nvm_rq *rqd)
return 0; return 0;
} }
static int nvme_nvm_erase_block(struct nvm_dev *dev, struct nvm_rq *rqd)
{
struct request_queue *q = dev->q;
struct nvme_ns *ns = q->queuedata;
struct nvme_nvm_command c = {};
c.erase.opcode = NVM_OP_ERASE;
c.erase.nsid = cpu_to_le32(ns->ns_id);
c.erase.spba = cpu_to_le64(rqd->ppa_addr.ppa);
c.erase.length = cpu_to_le16(rqd->nr_ppas - 1);
c.erase.control = cpu_to_le16(rqd->flags);
return nvme_submit_sync_cmd(q, (struct nvme_command *)&c, NULL, 0);
}
static void *nvme_nvm_create_dma_pool(struct nvm_dev *nvmdev, char *name) static void *nvme_nvm_create_dma_pool(struct nvm_dev *nvmdev, char *name)
{ {
struct nvme_ns *ns = nvmdev->q->queuedata; struct nvme_ns *ns = nvmdev->q->queuedata;
...@@ -576,7 +565,6 @@ static struct nvm_dev_ops nvme_nvm_dev_ops = { ...@@ -576,7 +565,6 @@ static struct nvm_dev_ops nvme_nvm_dev_ops = {
.set_bb_tbl = nvme_nvm_set_bb_tbl, .set_bb_tbl = nvme_nvm_set_bb_tbl,
.submit_io = nvme_nvm_submit_io, .submit_io = nvme_nvm_submit_io,
.erase_block = nvme_nvm_erase_block,
.create_dma_pool = nvme_nvm_create_dma_pool, .create_dma_pool = nvme_nvm_create_dma_pool,
.destroy_dma_pool = nvme_nvm_destroy_dma_pool, .destroy_dma_pool = nvme_nvm_destroy_dma_pool,
......
...@@ -56,7 +56,6 @@ typedef int (nvm_get_l2p_tbl_fn)(struct nvm_dev *, u64, u32, ...@@ -56,7 +56,6 @@ typedef int (nvm_get_l2p_tbl_fn)(struct nvm_dev *, u64, u32,
typedef int (nvm_op_bb_tbl_fn)(struct nvm_dev *, struct ppa_addr, u8 *); typedef int (nvm_op_bb_tbl_fn)(struct nvm_dev *, struct ppa_addr, u8 *);
typedef int (nvm_op_set_bb_fn)(struct nvm_dev *, struct ppa_addr *, int, int); typedef int (nvm_op_set_bb_fn)(struct nvm_dev *, struct ppa_addr *, int, int);
typedef int (nvm_submit_io_fn)(struct nvm_dev *, struct nvm_rq *); typedef int (nvm_submit_io_fn)(struct nvm_dev *, struct nvm_rq *);
typedef int (nvm_erase_blk_fn)(struct nvm_dev *, struct nvm_rq *);
typedef void *(nvm_create_dma_pool_fn)(struct nvm_dev *, char *); typedef void *(nvm_create_dma_pool_fn)(struct nvm_dev *, char *);
typedef void (nvm_destroy_dma_pool_fn)(void *); typedef void (nvm_destroy_dma_pool_fn)(void *);
typedef void *(nvm_dev_dma_alloc_fn)(struct nvm_dev *, void *, gfp_t, typedef void *(nvm_dev_dma_alloc_fn)(struct nvm_dev *, void *, gfp_t,
...@@ -70,7 +69,6 @@ struct nvm_dev_ops { ...@@ -70,7 +69,6 @@ struct nvm_dev_ops {
nvm_op_set_bb_fn *set_bb_tbl; nvm_op_set_bb_fn *set_bb_tbl;
nvm_submit_io_fn *submit_io; nvm_submit_io_fn *submit_io;
nvm_erase_blk_fn *erase_block;
nvm_create_dma_pool_fn *create_dma_pool; nvm_create_dma_pool_fn *create_dma_pool;
nvm_destroy_dma_pool_fn *destroy_dma_pool; nvm_destroy_dma_pool_fn *destroy_dma_pool;
...@@ -479,10 +477,10 @@ extern int nvm_set_tgt_bb_tbl(struct nvm_tgt_dev *, struct ppa_addr *, ...@@ -479,10 +477,10 @@ extern int nvm_set_tgt_bb_tbl(struct nvm_tgt_dev *, struct ppa_addr *,
int, int); int, int);
extern int nvm_max_phys_sects(struct nvm_tgt_dev *); extern int nvm_max_phys_sects(struct nvm_tgt_dev *);
extern int nvm_submit_io(struct nvm_tgt_dev *, struct nvm_rq *); extern int nvm_submit_io(struct nvm_tgt_dev *, struct nvm_rq *);
extern int nvm_set_rqd_ppalist(struct nvm_dev *, struct nvm_rq *, extern int nvm_erase_sync(struct nvm_tgt_dev *, struct ppa_addr *, int);
extern int nvm_set_rqd_ppalist(struct nvm_tgt_dev *, struct nvm_rq *,
const struct ppa_addr *, int, int); const struct ppa_addr *, int, int);
extern void nvm_free_rqd_ppalist(struct nvm_dev *, struct nvm_rq *); extern void nvm_free_rqd_ppalist(struct nvm_tgt_dev *, struct nvm_rq *);
extern int nvm_erase_blk(struct nvm_tgt_dev *, struct ppa_addr *, int);
extern int nvm_get_l2p_tbl(struct nvm_tgt_dev *, u64, u32, nvm_l2p_update_fn *, extern int nvm_get_l2p_tbl(struct nvm_tgt_dev *, u64, u32, nvm_l2p_update_fn *,
void *); void *);
extern int nvm_get_area(struct nvm_tgt_dev *, sector_t *, sector_t); extern int nvm_get_area(struct nvm_tgt_dev *, sector_t *, sector_t);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment