Commit fae7fae4 authored by Matias Bjørling's avatar Matias Bjørling Committed by Jens Axboe

lightnvm: make geometry structures 2.0 ready

Prepare for the 2.0 revision by adapting the geometry
structures to coexist with the 1.2 revision.
Signed-off-by: default avatarMatias Bjørling <m@bjorling.me>
Reviewed-by: default avatarJavier González <javier@cnexlabs.com>
Signed-off-by: default avatarMatias Bjørling <m@bjorling.me>
Signed-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent bb27aa9e
...@@ -98,7 +98,7 @@ static void nvm_remove_tgt_dev(struct nvm_tgt_dev *tgt_dev, int clear) ...@@ -98,7 +98,7 @@ static void nvm_remove_tgt_dev(struct nvm_tgt_dev *tgt_dev, int clear)
if (clear) { if (clear) {
for (j = 0; j < ch_map->nr_luns; j++) { for (j = 0; j < ch_map->nr_luns; j++) {
int lun = j + lun_offs[j]; int lun = j + lun_offs[j];
int lunid = (ch * dev->geo.luns_per_chnl) + lun; int lunid = (ch * dev->geo.nr_luns) + lun;
WARN_ON(!test_and_clear_bit(lunid, WARN_ON(!test_and_clear_bit(lunid,
dev->lun_map)); dev->lun_map));
...@@ -124,10 +124,10 @@ static struct nvm_tgt_dev *nvm_create_tgt_dev(struct nvm_dev *dev, ...@@ -124,10 +124,10 @@ static struct nvm_tgt_dev *nvm_create_tgt_dev(struct nvm_dev *dev,
struct ppa_addr *luns; struct ppa_addr *luns;
int nr_luns = lun_end - lun_begin + 1; int nr_luns = lun_end - lun_begin + 1;
int luns_left = nr_luns; int luns_left = nr_luns;
int nr_chnls = nr_luns / dev->geo.luns_per_chnl; int nr_chnls = nr_luns / dev->geo.nr_luns;
int nr_chnls_mod = nr_luns % dev->geo.luns_per_chnl; int nr_chnls_mod = nr_luns % dev->geo.nr_luns;
int bch = lun_begin / dev->geo.luns_per_chnl; int bch = lun_begin / dev->geo.nr_luns;
int blun = lun_begin % dev->geo.luns_per_chnl; int blun = lun_begin % dev->geo.nr_luns;
int lunid = 0; int lunid = 0;
int lun_balanced = 1; int lun_balanced = 1;
int prev_nr_luns; int prev_nr_luns;
...@@ -148,15 +148,15 @@ static struct nvm_tgt_dev *nvm_create_tgt_dev(struct nvm_dev *dev, ...@@ -148,15 +148,15 @@ static struct nvm_tgt_dev *nvm_create_tgt_dev(struct nvm_dev *dev,
if (!luns) if (!luns)
goto err_luns; goto err_luns;
prev_nr_luns = (luns_left > dev->geo.luns_per_chnl) ? prev_nr_luns = (luns_left > dev->geo.nr_luns) ?
dev->geo.luns_per_chnl : luns_left; dev->geo.nr_luns : luns_left;
for (i = 0; i < nr_chnls; i++) { for (i = 0; i < nr_chnls; i++) {
struct nvm_ch_map *ch_rmap = &dev_rmap->chnls[i + bch]; struct nvm_ch_map *ch_rmap = &dev_rmap->chnls[i + bch];
int *lun_roffs = ch_rmap->lun_offs; int *lun_roffs = ch_rmap->lun_offs;
struct nvm_ch_map *ch_map = &dev_map->chnls[i]; struct nvm_ch_map *ch_map = &dev_map->chnls[i];
int *lun_offs; int *lun_offs;
int luns_in_chnl = (luns_left > dev->geo.luns_per_chnl) ? int luns_in_chnl = (luns_left > dev->geo.nr_luns) ?
dev->geo.luns_per_chnl : luns_left; dev->geo.nr_luns : luns_left;
if (lun_balanced && prev_nr_luns != luns_in_chnl) if (lun_balanced && prev_nr_luns != luns_in_chnl)
lun_balanced = 0; lun_balanced = 0;
...@@ -193,8 +193,8 @@ static struct nvm_tgt_dev *nvm_create_tgt_dev(struct nvm_dev *dev, ...@@ -193,8 +193,8 @@ static struct nvm_tgt_dev *nvm_create_tgt_dev(struct nvm_dev *dev,
memcpy(&tgt_dev->geo, &dev->geo, sizeof(struct nvm_geo)); memcpy(&tgt_dev->geo, &dev->geo, sizeof(struct nvm_geo));
/* Target device only owns a portion of the physical device */ /* Target device only owns a portion of the physical device */
tgt_dev->geo.nr_chnls = nr_chnls; tgt_dev->geo.nr_chnls = nr_chnls;
tgt_dev->geo.nr_luns = nr_luns; tgt_dev->geo.all_luns = nr_luns;
tgt_dev->geo.luns_per_chnl = (lun_balanced) ? prev_nr_luns : -1; tgt_dev->geo.nr_luns = (lun_balanced) ? prev_nr_luns : -1;
tgt_dev->total_secs = nr_luns * tgt_dev->geo.sec_per_lun; tgt_dev->total_secs = nr_luns * tgt_dev->geo.sec_per_lun;
tgt_dev->q = dev->q; tgt_dev->q = dev->q;
tgt_dev->map = dev_map; tgt_dev->map = dev_map;
...@@ -414,7 +414,7 @@ static int nvm_register_map(struct nvm_dev *dev) ...@@ -414,7 +414,7 @@ static int nvm_register_map(struct nvm_dev *dev)
for (i = 0; i < dev->geo.nr_chnls; i++) { for (i = 0; i < dev->geo.nr_chnls; i++) {
struct nvm_ch_map *ch_rmap; struct nvm_ch_map *ch_rmap;
int *lun_roffs; int *lun_roffs;
int luns_in_chnl = dev->geo.luns_per_chnl; int luns_in_chnl = dev->geo.nr_luns;
ch_rmap = &rmap->chnls[i]; ch_rmap = &rmap->chnls[i];
...@@ -717,10 +717,10 @@ int nvm_bb_tbl_fold(struct nvm_dev *dev, u8 *blks, int nr_blks) ...@@ -717,10 +717,10 @@ int nvm_bb_tbl_fold(struct nvm_dev *dev, u8 *blks, int nr_blks)
struct nvm_geo *geo = &dev->geo; struct nvm_geo *geo = &dev->geo;
int blk, offset, pl, blktype; int blk, offset, pl, blktype;
if (nr_blks != geo->blks_per_lun * geo->plane_mode) if (nr_blks != geo->nr_chks * geo->plane_mode)
return -EINVAL; return -EINVAL;
for (blk = 0; blk < geo->blks_per_lun; blk++) { for (blk = 0; blk < geo->nr_chks; blk++) {
offset = blk * geo->plane_mode; offset = blk * geo->plane_mode;
blktype = blks[offset]; blktype = blks[offset];
...@@ -736,7 +736,7 @@ int nvm_bb_tbl_fold(struct nvm_dev *dev, u8 *blks, int nr_blks) ...@@ -736,7 +736,7 @@ int nvm_bb_tbl_fold(struct nvm_dev *dev, u8 *blks, int nr_blks)
blks[blk] = blktype; blks[blk] = blktype;
} }
return geo->blks_per_lun; return geo->nr_chks;
} }
EXPORT_SYMBOL(nvm_bb_tbl_fold); EXPORT_SYMBOL(nvm_bb_tbl_fold);
...@@ -758,43 +758,40 @@ static int nvm_core_init(struct nvm_dev *dev) ...@@ -758,43 +758,40 @@ static int nvm_core_init(struct nvm_dev *dev)
struct nvm_geo *geo = &dev->geo; struct nvm_geo *geo = &dev->geo;
int ret; int ret;
memcpy(&geo->ppaf, &id->ppaf, sizeof(struct nvm_addr_format));
if (grp->mtype != 0) {
pr_err("nvm: memory type not supported\n");
return -EINVAL;
}
/* Whole device values */ /* Whole device values */
geo->nr_chnls = grp->num_ch; geo->nr_chnls = grp->num_ch;
geo->luns_per_chnl = grp->num_lun; geo->nr_luns = grp->num_lun;
/* Generic device values */ /* Generic device geometry values */
geo->pgs_per_blk = grp->num_pg; geo->ws_min = grp->ws_min;
geo->blks_per_lun = grp->num_blk; geo->ws_opt = grp->ws_opt;
geo->nr_planes = grp->num_pln; geo->ws_seq = grp->ws_seq;
geo->fpg_size = grp->fpg_sz; geo->ws_per_chk = grp->ws_per_chk;
geo->pfpg_size = grp->fpg_sz * grp->num_pln; geo->nr_chks = grp->num_chk;
geo->sec_size = grp->csecs; geo->sec_size = grp->csecs;
geo->oob_size = grp->sos; geo->oob_size = grp->sos;
geo->sec_per_pg = grp->fpg_sz / grp->csecs;
geo->mccap = grp->mccap; geo->mccap = grp->mccap;
memcpy(&geo->ppaf, &id->ppaf, sizeof(struct nvm_addr_format));
geo->plane_mode = NVM_PLANE_SINGLE;
geo->max_rq_size = dev->ops->max_phys_sect * geo->sec_size; geo->max_rq_size = dev->ops->max_phys_sect * geo->sec_size;
if (grp->mpos & 0x020202) geo->sec_per_chk = grp->clba;
geo->plane_mode = NVM_PLANE_DOUBLE; geo->sec_per_lun = geo->sec_per_chk * geo->nr_chks;
if (grp->mpos & 0x040404) geo->all_luns = geo->nr_luns * geo->nr_chnls;
geo->plane_mode = NVM_PLANE_QUAD;
if (grp->mtype != 0) {
pr_err("nvm: memory type not supported\n");
return -EINVAL;
}
/* calculated values */ /* 1.2 spec device geometry values */
geo->plane_mode = 1 << geo->ws_seq;
geo->nr_planes = geo->ws_opt / geo->ws_min;
geo->sec_per_pg = geo->ws_min;
geo->sec_per_pl = geo->sec_per_pg * geo->nr_planes; geo->sec_per_pl = geo->sec_per_pg * geo->nr_planes;
geo->sec_per_blk = geo->sec_per_pl * geo->pgs_per_blk;
geo->sec_per_lun = geo->sec_per_blk * geo->blks_per_lun;
geo->nr_luns = geo->luns_per_chnl * geo->nr_chnls;
dev->total_secs = geo->nr_luns * geo->sec_per_lun; dev->total_secs = geo->all_luns * geo->sec_per_lun;
dev->lun_map = kcalloc(BITS_TO_LONGS(geo->nr_luns), dev->lun_map = kcalloc(BITS_TO_LONGS(geo->all_luns),
sizeof(unsigned long), GFP_KERNEL); sizeof(unsigned long), GFP_KERNEL);
if (!dev->lun_map) if (!dev->lun_map)
return -ENOMEM; return -ENOMEM;
...@@ -854,8 +851,8 @@ static int nvm_init(struct nvm_dev *dev) ...@@ -854,8 +851,8 @@ static int nvm_init(struct nvm_dev *dev)
pr_info("nvm: registered %s [%u/%u/%u/%u/%u/%u]\n", pr_info("nvm: registered %s [%u/%u/%u/%u/%u/%u]\n",
dev->name, geo->sec_per_pg, geo->nr_planes, dev->name, geo->sec_per_pg, geo->nr_planes,
geo->pgs_per_blk, geo->blks_per_lun, geo->ws_per_chk, geo->nr_chks,
geo->nr_luns, geo->nr_chnls); geo->all_luns, geo->nr_chnls);
return 0; return 0;
err: err:
pr_err("nvm: failed to initialize nvm\n"); pr_err("nvm: failed to initialize nvm\n");
...@@ -946,12 +943,12 @@ static int __nvm_configure_create(struct nvm_ioctl_create *create) ...@@ -946,12 +943,12 @@ static int __nvm_configure_create(struct nvm_ioctl_create *create)
if (s->lun_begin == -1 && s->lun_end == -1) { if (s->lun_begin == -1 && s->lun_end == -1) {
s->lun_begin = 0; s->lun_begin = 0;
s->lun_end = dev->geo.nr_luns - 1; s->lun_end = dev->geo.all_luns - 1;
} }
if (s->lun_begin > s->lun_end || s->lun_end >= dev->geo.nr_luns) { if (s->lun_begin > s->lun_end || s->lun_end >= dev->geo.all_luns) {
pr_err("nvm: lun out of bound (%u:%u > %u)\n", pr_err("nvm: lun out of bound (%u:%u > %u)\n",
s->lun_begin, s->lun_end, dev->geo.nr_luns - 1); s->lun_begin, s->lun_end, dev->geo.all_luns - 1);
return -EINVAL; return -EINVAL;
} }
......
...@@ -979,7 +979,7 @@ static int pblk_line_init_metadata(struct pblk *pblk, struct pblk_line *line, ...@@ -979,7 +979,7 @@ static int pblk_line_init_metadata(struct pblk *pblk, struct pblk_line *line,
/* Start metadata */ /* Start metadata */
smeta_buf->seq_nr = cpu_to_le64(line->seq_nr); smeta_buf->seq_nr = cpu_to_le64(line->seq_nr);
smeta_buf->window_wr_lun = cpu_to_le32(geo->nr_luns); smeta_buf->window_wr_lun = cpu_to_le32(geo->all_luns);
/* Fill metadata among lines */ /* Fill metadata among lines */
if (cur) { if (cur) {
...@@ -1032,7 +1032,7 @@ static int pblk_line_init_bb(struct pblk *pblk, struct pblk_line *line, ...@@ -1032,7 +1032,7 @@ static int pblk_line_init_bb(struct pblk *pblk, struct pblk_line *line,
lm->sec_per_line); lm->sec_per_line);
bitmap_or(line->map_bitmap, line->map_bitmap, l_mg->bb_aux, bitmap_or(line->map_bitmap, line->map_bitmap, l_mg->bb_aux,
lm->sec_per_line); lm->sec_per_line);
line->sec_in_line -= geo->sec_per_blk; line->sec_in_line -= geo->sec_per_chk;
if (bit >= lm->emeta_bb) if (bit >= lm->emeta_bb)
nr_bb++; nr_bb++;
} }
...@@ -1746,7 +1746,7 @@ void pblk_up_rq(struct pblk *pblk, struct ppa_addr *ppa_list, int nr_ppas, ...@@ -1746,7 +1746,7 @@ void pblk_up_rq(struct pblk *pblk, struct ppa_addr *ppa_list, int nr_ppas,
struct nvm_tgt_dev *dev = pblk->dev; struct nvm_tgt_dev *dev = pblk->dev;
struct nvm_geo *geo = &dev->geo; struct nvm_geo *geo = &dev->geo;
struct pblk_lun *rlun; struct pblk_lun *rlun;
int nr_luns = geo->nr_luns; int nr_luns = geo->all_luns;
int bit = -1; int bit = -1;
while ((bit = find_next_bit(lun_bitmap, nr_luns, bit + 1)) < nr_luns) { while ((bit = find_next_bit(lun_bitmap, nr_luns, bit + 1)) < nr_luns) {
......
...@@ -169,8 +169,8 @@ static int pblk_set_ppaf(struct pblk *pblk) ...@@ -169,8 +169,8 @@ static int pblk_set_ppaf(struct pblk *pblk)
} }
ppaf.ch_len = power_len; ppaf.ch_len = power_len;
power_len = get_count_order(geo->luns_per_chnl); power_len = get_count_order(geo->nr_luns);
if (1 << power_len != geo->luns_per_chnl) { if (1 << power_len != geo->nr_luns) {
pr_err("pblk: supports only power-of-two LUN config.\n"); pr_err("pblk: supports only power-of-two LUN config.\n");
return -EINVAL; return -EINVAL;
} }
...@@ -254,7 +254,7 @@ static int pblk_core_init(struct pblk *pblk) ...@@ -254,7 +254,7 @@ static int pblk_core_init(struct pblk *pblk)
struct nvm_geo *geo = &dev->geo; struct nvm_geo *geo = &dev->geo;
pblk->pgs_in_buffer = NVM_MEM_PAGE_WRITE * geo->sec_per_pg * pblk->pgs_in_buffer = NVM_MEM_PAGE_WRITE * geo->sec_per_pg *
geo->nr_planes * geo->nr_luns; geo->nr_planes * geo->all_luns;
if (pblk_init_global_caches(pblk)) if (pblk_init_global_caches(pblk))
return -ENOMEM; return -ENOMEM;
...@@ -270,21 +270,22 @@ static int pblk_core_init(struct pblk *pblk) ...@@ -270,21 +270,22 @@ static int pblk_core_init(struct pblk *pblk)
if (!pblk->gen_ws_pool) if (!pblk->gen_ws_pool)
goto free_page_bio_pool; goto free_page_bio_pool;
pblk->rec_pool = mempool_create_slab_pool(geo->nr_luns, pblk_rec_cache); pblk->rec_pool = mempool_create_slab_pool(geo->all_luns,
pblk_rec_cache);
if (!pblk->rec_pool) if (!pblk->rec_pool)
goto free_gen_ws_pool; goto free_gen_ws_pool;
pblk->r_rq_pool = mempool_create_slab_pool(geo->nr_luns, pblk->r_rq_pool = mempool_create_slab_pool(geo->all_luns,
pblk_g_rq_cache); pblk_g_rq_cache);
if (!pblk->r_rq_pool) if (!pblk->r_rq_pool)
goto free_rec_pool; goto free_rec_pool;
pblk->e_rq_pool = mempool_create_slab_pool(geo->nr_luns, pblk->e_rq_pool = mempool_create_slab_pool(geo->all_luns,
pblk_g_rq_cache); pblk_g_rq_cache);
if (!pblk->e_rq_pool) if (!pblk->e_rq_pool)
goto free_r_rq_pool; goto free_r_rq_pool;
pblk->w_rq_pool = mempool_create_slab_pool(geo->nr_luns, pblk->w_rq_pool = mempool_create_slab_pool(geo->all_luns,
pblk_w_rq_cache); pblk_w_rq_cache);
if (!pblk->w_rq_pool) if (!pblk->w_rq_pool)
goto free_e_rq_pool; goto free_e_rq_pool;
...@@ -409,7 +410,7 @@ static int pblk_bb_discovery(struct nvm_tgt_dev *dev, struct pblk_lun *rlun) ...@@ -409,7 +410,7 @@ static int pblk_bb_discovery(struct nvm_tgt_dev *dev, struct pblk_lun *rlun)
u8 *blks; u8 *blks;
int nr_blks, ret; int nr_blks, ret;
nr_blks = geo->blks_per_lun * geo->plane_mode; nr_blks = geo->nr_chks * geo->plane_mode;
blks = kmalloc(nr_blks, GFP_KERNEL); blks = kmalloc(nr_blks, GFP_KERNEL);
if (!blks) if (!blks)
return -ENOMEM; return -ENOMEM;
...@@ -482,20 +483,21 @@ static int pblk_luns_init(struct pblk *pblk, struct ppa_addr *luns) ...@@ -482,20 +483,21 @@ static int pblk_luns_init(struct pblk *pblk, struct ppa_addr *luns)
int i, ret; int i, ret;
/* TODO: Implement unbalanced LUN support */ /* TODO: Implement unbalanced LUN support */
if (geo->luns_per_chnl < 0) { if (geo->nr_luns < 0) {
pr_err("pblk: unbalanced LUN config.\n"); pr_err("pblk: unbalanced LUN config.\n");
return -EINVAL; return -EINVAL;
} }
pblk->luns = kcalloc(geo->nr_luns, sizeof(struct pblk_lun), GFP_KERNEL); pblk->luns = kcalloc(geo->all_luns, sizeof(struct pblk_lun),
GFP_KERNEL);
if (!pblk->luns) if (!pblk->luns)
return -ENOMEM; return -ENOMEM;
for (i = 0; i < geo->nr_luns; i++) { for (i = 0; i < geo->all_luns; i++) {
/* Stripe across channels */ /* Stripe across channels */
int ch = i % geo->nr_chnls; int ch = i % geo->nr_chnls;
int lun_raw = i / geo->nr_chnls; int lun_raw = i / geo->nr_chnls;
int lunid = lun_raw + ch * geo->luns_per_chnl; int lunid = lun_raw + ch * geo->nr_luns;
rlun = &pblk->luns[i]; rlun = &pblk->luns[i];
rlun->bppa = luns[lunid]; rlun->bppa = luns[lunid];
...@@ -590,8 +592,8 @@ static void pblk_set_provision(struct pblk *pblk, long nr_free_blks) ...@@ -590,8 +592,8 @@ static void pblk_set_provision(struct pblk *pblk, long nr_free_blks)
* on user capacity consider only provisioned blocks * on user capacity consider only provisioned blocks
*/ */
pblk->rl.total_blocks = nr_free_blks; pblk->rl.total_blocks = nr_free_blks;
pblk->rl.nr_secs = nr_free_blks * geo->sec_per_blk; pblk->rl.nr_secs = nr_free_blks * geo->sec_per_chk;
pblk->capacity = provisioned * geo->sec_per_blk; pblk->capacity = provisioned * geo->sec_per_chk;
atomic_set(&pblk->rl.free_blocks, nr_free_blks); atomic_set(&pblk->rl.free_blocks, nr_free_blks);
} }
...@@ -683,7 +685,7 @@ static int pblk_lines_init(struct pblk *pblk) ...@@ -683,7 +685,7 @@ static int pblk_lines_init(struct pblk *pblk)
int i, ret; int i, ret;
pblk->min_write_pgs = geo->sec_per_pl * (geo->sec_size / PAGE_SIZE); pblk->min_write_pgs = geo->sec_per_pl * (geo->sec_size / PAGE_SIZE);
max_write_ppas = pblk->min_write_pgs * geo->nr_luns; max_write_ppas = pblk->min_write_pgs * geo->all_luns;
pblk->max_write_pgs = (max_write_ppas < nvm_max_phys_sects(dev)) ? pblk->max_write_pgs = (max_write_ppas < nvm_max_phys_sects(dev)) ?
max_write_ppas : nvm_max_phys_sects(dev); max_write_ppas : nvm_max_phys_sects(dev);
pblk_set_sec_per_write(pblk, pblk->min_write_pgs); pblk_set_sec_per_write(pblk, pblk->min_write_pgs);
...@@ -693,26 +695,26 @@ static int pblk_lines_init(struct pblk *pblk) ...@@ -693,26 +695,26 @@ static int pblk_lines_init(struct pblk *pblk)
return -EINVAL; return -EINVAL;
} }
div_u64_rem(geo->sec_per_blk, pblk->min_write_pgs, &mod); div_u64_rem(geo->sec_per_chk, pblk->min_write_pgs, &mod);
if (mod) { if (mod) {
pr_err("pblk: bad configuration of sectors/pages\n"); pr_err("pblk: bad configuration of sectors/pages\n");
return -EINVAL; return -EINVAL;
} }
l_mg->nr_lines = geo->blks_per_lun; l_mg->nr_lines = geo->nr_chks;
l_mg->log_line = l_mg->data_line = NULL; l_mg->log_line = l_mg->data_line = NULL;
l_mg->l_seq_nr = l_mg->d_seq_nr = 0; l_mg->l_seq_nr = l_mg->d_seq_nr = 0;
l_mg->nr_free_lines = 0; l_mg->nr_free_lines = 0;
bitmap_zero(&l_mg->meta_bitmap, PBLK_DATA_LINES); bitmap_zero(&l_mg->meta_bitmap, PBLK_DATA_LINES);
lm->sec_per_line = geo->sec_per_blk * geo->nr_luns; lm->sec_per_line = geo->sec_per_chk * geo->all_luns;
lm->blk_per_line = geo->nr_luns; lm->blk_per_line = geo->all_luns;
lm->blk_bitmap_len = BITS_TO_LONGS(geo->nr_luns) * sizeof(long); lm->blk_bitmap_len = BITS_TO_LONGS(geo->all_luns) * sizeof(long);
lm->sec_bitmap_len = BITS_TO_LONGS(lm->sec_per_line) * sizeof(long); lm->sec_bitmap_len = BITS_TO_LONGS(lm->sec_per_line) * sizeof(long);
lm->lun_bitmap_len = BITS_TO_LONGS(geo->nr_luns) * sizeof(long); lm->lun_bitmap_len = BITS_TO_LONGS(geo->all_luns) * sizeof(long);
lm->mid_thrs = lm->sec_per_line / 2; lm->mid_thrs = lm->sec_per_line / 2;
lm->high_thrs = lm->sec_per_line / 4; lm->high_thrs = lm->sec_per_line / 4;
lm->meta_distance = (geo->nr_luns / 2) * pblk->min_write_pgs; lm->meta_distance = (geo->all_luns / 2) * pblk->min_write_pgs;
/* Calculate necessary pages for smeta. See comment over struct /* Calculate necessary pages for smeta. See comment over struct
* line_smeta definition * line_smeta definition
...@@ -742,12 +744,12 @@ static int pblk_lines_init(struct pblk *pblk) ...@@ -742,12 +744,12 @@ static int pblk_lines_init(struct pblk *pblk)
goto add_emeta_page; goto add_emeta_page;
} }
lm->emeta_bb = geo->nr_luns > i ? geo->nr_luns - i : 0; lm->emeta_bb = geo->all_luns > i ? geo->all_luns - i : 0;
lm->min_blk_line = 1; lm->min_blk_line = 1;
if (geo->nr_luns > 1) if (geo->all_luns > 1)
lm->min_blk_line += DIV_ROUND_UP(lm->smeta_sec + lm->min_blk_line += DIV_ROUND_UP(lm->smeta_sec +
lm->emeta_sec[0], geo->sec_per_blk); lm->emeta_sec[0], geo->sec_per_chk);
if (lm->min_blk_line > lm->blk_per_line) { if (lm->min_blk_line > lm->blk_per_line) {
pr_err("pblk: config. not supported. Min. LUN in line:%d\n", pr_err("pblk: config. not supported. Min. LUN in line:%d\n",
...@@ -772,7 +774,7 @@ static int pblk_lines_init(struct pblk *pblk) ...@@ -772,7 +774,7 @@ static int pblk_lines_init(struct pblk *pblk)
goto fail_free_bb_template; goto fail_free_bb_template;
} }
bb_distance = (geo->nr_luns) * geo->sec_per_pl; bb_distance = (geo->all_luns) * geo->sec_per_pl;
for (i = 0; i < lm->sec_per_line; i += bb_distance) for (i = 0; i < lm->sec_per_line; i += bb_distance)
bitmap_set(l_mg->bb_template, i, geo->sec_per_pl); bitmap_set(l_mg->bb_template, i, geo->sec_per_pl);
...@@ -844,7 +846,7 @@ static int pblk_lines_init(struct pblk *pblk) ...@@ -844,7 +846,7 @@ static int pblk_lines_init(struct pblk *pblk)
pblk_set_provision(pblk, nr_free_blks); pblk_set_provision(pblk, nr_free_blks);
/* Cleanup per-LUN bad block lists - managed within lines on run-time */ /* Cleanup per-LUN bad block lists - managed within lines on run-time */
for (i = 0; i < geo->nr_luns; i++) for (i = 0; i < geo->all_luns; i++)
kfree(pblk->luns[i].bb_list); kfree(pblk->luns[i].bb_list);
return 0; return 0;
...@@ -858,7 +860,7 @@ static int pblk_lines_init(struct pblk *pblk) ...@@ -858,7 +860,7 @@ static int pblk_lines_init(struct pblk *pblk)
fail_free_meta: fail_free_meta:
pblk_line_meta_free(pblk); pblk_line_meta_free(pblk);
fail: fail:
for (i = 0; i < geo->nr_luns; i++) for (i = 0; i < geo->all_luns; i++)
kfree(pblk->luns[i].bb_list); kfree(pblk->luns[i].bb_list);
return ret; return ret;
...@@ -1041,13 +1043,13 @@ static void *pblk_init(struct nvm_tgt_dev *dev, struct gendisk *tdisk, ...@@ -1041,13 +1043,13 @@ static void *pblk_init(struct nvm_tgt_dev *dev, struct gendisk *tdisk,
blk_queue_write_cache(tqueue, true, false); blk_queue_write_cache(tqueue, true, false);
tqueue->limits.discard_granularity = geo->pgs_per_blk * geo->pfpg_size; tqueue->limits.discard_granularity = geo->sec_per_chk * geo->sec_size;
tqueue->limits.discard_alignment = 0; tqueue->limits.discard_alignment = 0;
blk_queue_max_discard_sectors(tqueue, UINT_MAX >> 9); blk_queue_max_discard_sectors(tqueue, UINT_MAX >> 9);
queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, tqueue); queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, tqueue);
pr_info("pblk init: luns:%u, lines:%d, secs:%llu, buf entries:%u\n", pr_info("pblk init: luns:%u, lines:%d, secs:%llu, buf entries:%u\n",
geo->nr_luns, pblk->l_mg.nr_lines, geo->all_luns, pblk->l_mg.nr_lines,
(unsigned long long)pblk->rl.nr_secs, (unsigned long long)pblk->rl.nr_secs,
pblk->rwb.nr_entries); pblk->rwb.nr_entries);
......
...@@ -188,7 +188,7 @@ static int pblk_calc_sec_in_line(struct pblk *pblk, struct pblk_line *line) ...@@ -188,7 +188,7 @@ static int pblk_calc_sec_in_line(struct pblk *pblk, struct pblk_line *line)
int nr_bb = bitmap_weight(line->blk_bitmap, lm->blk_per_line); int nr_bb = bitmap_weight(line->blk_bitmap, lm->blk_per_line);
return lm->sec_per_line - lm->smeta_sec - lm->emeta_sec[0] - return lm->sec_per_line - lm->smeta_sec - lm->emeta_sec[0] -
nr_bb * geo->sec_per_blk; nr_bb * geo->sec_per_chk;
} }
struct pblk_recov_alloc { struct pblk_recov_alloc {
......
...@@ -28,7 +28,7 @@ static ssize_t pblk_sysfs_luns_show(struct pblk *pblk, char *page) ...@@ -28,7 +28,7 @@ static ssize_t pblk_sysfs_luns_show(struct pblk *pblk, char *page)
ssize_t sz = 0; ssize_t sz = 0;
int i; int i;
for (i = 0; i < geo->nr_luns; i++) { for (i = 0; i < geo->all_luns; i++) {
int active = 1; int active = 1;
rlun = &pblk->luns[i]; rlun = &pblk->luns[i];
...@@ -238,7 +238,7 @@ static ssize_t pblk_sysfs_lines(struct pblk *pblk, char *page) ...@@ -238,7 +238,7 @@ static ssize_t pblk_sysfs_lines(struct pblk *pblk, char *page)
sz = snprintf(page, PAGE_SIZE - sz, sz = snprintf(page, PAGE_SIZE - sz,
"line: nluns:%d, nblks:%d, nsecs:%d\n", "line: nluns:%d, nblks:%d, nsecs:%d\n",
geo->nr_luns, lm->blk_per_line, lm->sec_per_line); geo->all_luns, lm->blk_per_line, lm->sec_per_line);
sz += snprintf(page + sz, PAGE_SIZE - sz, sz += snprintf(page + sz, PAGE_SIZE - sz,
"lines:d:%d,l:%d-f:%d,m:%d/%d,c:%d,b:%d,co:%d(d:%d,l:%d)t:%d\n", "lines:d:%d,l:%d-f:%d,m:%d/%d,c:%d,b:%d,co:%d(d:%d,l:%d)t:%d\n",
...@@ -287,7 +287,7 @@ static ssize_t pblk_sysfs_lines_info(struct pblk *pblk, char *page) ...@@ -287,7 +287,7 @@ static ssize_t pblk_sysfs_lines_info(struct pblk *pblk, char *page)
"blk_line:%d, sec_line:%d, sec_blk:%d\n", "blk_line:%d, sec_line:%d, sec_blk:%d\n",
lm->blk_per_line, lm->blk_per_line,
lm->sec_per_line, lm->sec_per_line,
geo->sec_per_blk); geo->sec_per_chk);
return sz; return sz;
} }
......
...@@ -907,7 +907,7 @@ static inline int pblk_pad_distance(struct pblk *pblk) ...@@ -907,7 +907,7 @@ static inline int pblk_pad_distance(struct pblk *pblk)
struct nvm_tgt_dev *dev = pblk->dev; struct nvm_tgt_dev *dev = pblk->dev;
struct nvm_geo *geo = &dev->geo; struct nvm_geo *geo = &dev->geo;
return NVM_MEM_PAGE_WRITE * geo->nr_luns * geo->sec_per_pl; return NVM_MEM_PAGE_WRITE * geo->all_luns * geo->sec_per_pl;
} }
static inline int pblk_dev_ppa_to_line(struct ppa_addr p) static inline int pblk_dev_ppa_to_line(struct ppa_addr p)
...@@ -1212,10 +1212,10 @@ static inline int pblk_boundary_ppa_checks(struct nvm_tgt_dev *tgt_dev, ...@@ -1212,10 +1212,10 @@ static inline int pblk_boundary_ppa_checks(struct nvm_tgt_dev *tgt_dev,
if (!ppa->c.is_cached && if (!ppa->c.is_cached &&
ppa->g.ch < geo->nr_chnls && ppa->g.ch < geo->nr_chnls &&
ppa->g.lun < geo->luns_per_chnl && ppa->g.lun < geo->nr_luns &&
ppa->g.pl < geo->nr_planes && ppa->g.pl < geo->nr_planes &&
ppa->g.blk < geo->blks_per_lun && ppa->g.blk < geo->nr_chks &&
ppa->g.pg < geo->pgs_per_blk && ppa->g.pg < geo->ws_per_chk &&
ppa->g.sec < geo->sec_per_pg) ppa->g.sec < geo->sec_per_pg)
continue; continue;
......
...@@ -135,7 +135,7 @@ struct nvme_nvm_id_group { ...@@ -135,7 +135,7 @@ struct nvme_nvm_id_group {
__u8 num_lun; __u8 num_lun;
__u8 num_pln; __u8 num_pln;
__u8 rsvd1; __u8 rsvd1;
__le16 num_blk; __le16 num_chk;
__le16 num_pg; __le16 num_pg;
__le16 fpg_sz; __le16 fpg_sz;
__le16 csecs; __le16 csecs;
...@@ -215,36 +215,57 @@ static inline void _nvme_nvm_check_size(void) ...@@ -215,36 +215,57 @@ static inline void _nvme_nvm_check_size(void)
static int init_grps(struct nvm_id *nvm_id, struct nvme_nvm_id *nvme_nvm_id) static int init_grps(struct nvm_id *nvm_id, struct nvme_nvm_id *nvme_nvm_id)
{ {
struct nvme_nvm_id_group *src; struct nvme_nvm_id_group *src;
struct nvm_id_group *dst; struct nvm_id_group *grp;
int sec_per_pg, sec_per_pl, pg_per_blk;
if (nvme_nvm_id->cgrps != 1) if (nvme_nvm_id->cgrps != 1)
return -EINVAL; return -EINVAL;
src = &nvme_nvm_id->groups[0]; src = &nvme_nvm_id->groups[0];
dst = &nvm_id->grp; grp = &nvm_id->grp;
dst->mtype = src->mtype; grp->mtype = src->mtype;
dst->fmtype = src->fmtype; grp->fmtype = src->fmtype;
dst->num_ch = src->num_ch;
dst->num_lun = src->num_lun; grp->num_ch = src->num_ch;
dst->num_pln = src->num_pln; grp->num_lun = src->num_lun;
dst->num_pg = le16_to_cpu(src->num_pg); grp->num_chk = le16_to_cpu(src->num_chk);
dst->num_blk = le16_to_cpu(src->num_blk); grp->csecs = le16_to_cpu(src->csecs);
dst->fpg_sz = le16_to_cpu(src->fpg_sz); grp->sos = le16_to_cpu(src->sos);
dst->csecs = le16_to_cpu(src->csecs);
dst->sos = le16_to_cpu(src->sos); pg_per_blk = le16_to_cpu(src->num_pg);
sec_per_pg = le16_to_cpu(src->fpg_sz) / grp->csecs;
dst->trdt = le32_to_cpu(src->trdt); sec_per_pl = sec_per_pg * src->num_pln;
dst->trdm = le32_to_cpu(src->trdm); grp->clba = sec_per_pl * pg_per_blk;
dst->tprt = le32_to_cpu(src->tprt); grp->ws_per_chk = pg_per_blk;
dst->tprm = le32_to_cpu(src->tprm);
dst->tbet = le32_to_cpu(src->tbet); grp->mpos = le32_to_cpu(src->mpos);
dst->tbem = le32_to_cpu(src->tbem); grp->cpar = le16_to_cpu(src->cpar);
dst->mpos = le32_to_cpu(src->mpos); grp->mccap = le32_to_cpu(src->mccap);
dst->mccap = le32_to_cpu(src->mccap);
grp->ws_opt = grp->ws_min = sec_per_pg;
dst->cpar = le16_to_cpu(src->cpar); grp->ws_seq = NVM_IO_SNGL_ACCESS;
if (grp->mpos & 0x020202) {
grp->ws_seq = NVM_IO_DUAL_ACCESS;
grp->ws_opt <<= 1;
} else if (grp->mpos & 0x040404) {
grp->ws_seq = NVM_IO_QUAD_ACCESS;
grp->ws_opt <<= 2;
}
grp->trdt = le32_to_cpu(src->trdt);
grp->trdm = le32_to_cpu(src->trdm);
grp->tprt = le32_to_cpu(src->tprt);
grp->tprm = le32_to_cpu(src->tprm);
grp->tbet = le32_to_cpu(src->tbet);
grp->tbem = le32_to_cpu(src->tbem);
/* 1.2 compatibility */
grp->num_pln = src->num_pln;
grp->num_pg = le16_to_cpu(src->num_pg);
grp->fpg_sz = le16_to_cpu(src->fpg_sz);
return 0; return 0;
} }
...@@ -293,7 +314,7 @@ static int nvme_nvm_get_bb_tbl(struct nvm_dev *nvmdev, struct ppa_addr ppa, ...@@ -293,7 +314,7 @@ static int nvme_nvm_get_bb_tbl(struct nvm_dev *nvmdev, struct ppa_addr ppa,
struct nvme_ctrl *ctrl = ns->ctrl; struct nvme_ctrl *ctrl = ns->ctrl;
struct nvme_nvm_command c = {}; struct nvme_nvm_command c = {};
struct nvme_nvm_bb_tbl *bb_tbl; struct nvme_nvm_bb_tbl *bb_tbl;
int nr_blks = geo->blks_per_lun * geo->plane_mode; int nr_blks = geo->nr_chks * geo->plane_mode;
int tblsz = sizeof(struct nvme_nvm_bb_tbl) + nr_blks; int tblsz = sizeof(struct nvme_nvm_bb_tbl) + nr_blks;
int ret = 0; int ret = 0;
...@@ -334,7 +355,7 @@ static int nvme_nvm_get_bb_tbl(struct nvm_dev *nvmdev, struct ppa_addr ppa, ...@@ -334,7 +355,7 @@ static int nvme_nvm_get_bb_tbl(struct nvm_dev *nvmdev, struct ppa_addr ppa,
goto out; goto out;
} }
memcpy(blks, bb_tbl->blk, geo->blks_per_lun * geo->plane_mode); memcpy(blks, bb_tbl->blk, geo->nr_chks * geo->plane_mode);
out: out:
kfree(bb_tbl); kfree(bb_tbl);
return ret; return ret;
...@@ -773,7 +794,7 @@ static ssize_t nvm_dev_attr_show(struct device *dev, ...@@ -773,7 +794,7 @@ static ssize_t nvm_dev_attr_show(struct device *dev,
} else if (strcmp(attr->name, "num_planes") == 0) { } else if (strcmp(attr->name, "num_planes") == 0) {
return scnprintf(page, PAGE_SIZE, "%u\n", grp->num_pln); return scnprintf(page, PAGE_SIZE, "%u\n", grp->num_pln);
} else if (strcmp(attr->name, "num_blocks") == 0) { /* u16 */ } else if (strcmp(attr->name, "num_blocks") == 0) { /* u16 */
return scnprintf(page, PAGE_SIZE, "%u\n", grp->num_blk); return scnprintf(page, PAGE_SIZE, "%u\n", grp->num_chk);
} else if (strcmp(attr->name, "num_pages") == 0) { } else if (strcmp(attr->name, "num_pages") == 0) {
return scnprintf(page, PAGE_SIZE, "%u\n", grp->num_pg); return scnprintf(page, PAGE_SIZE, "%u\n", grp->num_pg);
} else if (strcmp(attr->name, "page_size") == 0) { } else if (strcmp(attr->name, "page_size") == 0) {
......
...@@ -159,12 +159,16 @@ struct nvm_id_group { ...@@ -159,12 +159,16 @@ struct nvm_id_group {
u8 fmtype; u8 fmtype;
u8 num_ch; u8 num_ch;
u8 num_lun; u8 num_lun;
u8 num_pln; u16 num_chk;
u16 num_blk; u16 clba;
u16 num_pg;
u16 fpg_sz;
u16 csecs; u16 csecs;
u16 sos; u16 sos;
u16 ws_min;
u16 ws_opt;
u16 ws_seq;
u16 ws_per_chk;
u32 trdt; u32 trdt;
u32 trdm; u32 trdm;
u32 tprt; u32 tprt;
...@@ -174,6 +178,11 @@ struct nvm_id_group { ...@@ -174,6 +178,11 @@ struct nvm_id_group {
u32 mpos; u32 mpos;
u32 mccap; u32 mccap;
u16 cpar; u16 cpar;
/* 1.2 compatibility */
u8 num_pln;
u16 num_pg;
u16 fpg_sz;
}; };
struct nvm_addr_format { struct nvm_addr_format {
...@@ -259,31 +268,36 @@ enum { ...@@ -259,31 +268,36 @@ enum {
NVM_BLK_ST_BAD = 0x8, /* Bad block */ NVM_BLK_ST_BAD = 0x8, /* Bad block */
}; };
/* Device generic information */ /* Device generic information */
struct nvm_geo { struct nvm_geo {
/* generic geometry */
int nr_chnls; int nr_chnls;
int nr_luns; int all_luns; /* across channels */
int luns_per_chnl; /* -1 if channels are not symmetric */ int nr_luns; /* per channel */
int nr_planes; int nr_chks; /* per lun */
int sec_per_pg; /* only sectors for a single page */
int pgs_per_blk;
int blks_per_lun;
int fpg_size;
int pfpg_size; /* size of buffer if all pages are to be read */
int sec_size; int sec_size;
int oob_size; int oob_size;
int mccap; int mccap;
struct nvm_addr_format ppaf;
/* Calculated/Cached values. These do not reflect the actual usable int sec_per_chk;
* blocks at run-time. int sec_per_lun;
*/
int ws_min;
int ws_opt;
int ws_seq;
int ws_per_chk;
int max_rq_size; int max_rq_size;
int plane_mode; /* drive device in single, double or quad mode */
struct nvm_addr_format ppaf;
/* Legacy 1.2 specific geometry */
int plane_mode; /* drive device in single, double or quad mode */
int nr_planes;
int sec_per_pg; /* only sectors for a single page */
int sec_per_pl; /* all sectors across planes */ int sec_per_pl; /* all sectors across planes */
int sec_per_blk;
int sec_per_lun;
}; };
/* sub-device structure */ /* sub-device structure */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment