Commit 41cd8b70 authored by Vishal Verma's avatar Vishal Verma Committed by Dan Williams

libnvdimm, btt: add support for blk integrity

Support multiple block sizes (sector + metadata) using the blk integrity
framework. This registers a new integrity template that defines the
protection information tuple size based on the configured metadata size,
and simply acts as a passthrough for protection information generated by
another layer. The metadata is written to the storage as-is, and read back
with each sector.
Signed-off-by: default avatarVishal Verma <vishal.l.verma@linux.intel.com>
Signed-off-by: default avatarDan Williams <dan.j.williams@intel.com>
parent f68eb1e7
...@@ -837,6 +837,11 @@ static int btt_meta_init(struct btt *btt) ...@@ -837,6 +837,11 @@ static int btt_meta_init(struct btt *btt)
return ret; return ret;
} }
static u32 btt_meta_size(struct btt *btt)
{
return btt->lbasize - btt->sector_size;
}
/* /*
* This function calculates the arena in which the given LBA lies * This function calculates the arena in which the given LBA lies
* by doing a linear walk. This is acceptable since we expect only * by doing a linear walk. This is acceptable since we expect only
...@@ -921,8 +926,63 @@ static void zero_fill_data(struct page *page, unsigned int off, u32 len) ...@@ -921,8 +926,63 @@ static void zero_fill_data(struct page *page, unsigned int off, u32 len)
kunmap_atomic(mem); kunmap_atomic(mem);
} }
static int btt_read_pg(struct btt *btt, struct page *page, unsigned int off, #ifdef CONFIG_BLK_DEV_INTEGRITY
sector_t sector, unsigned int len) static int btt_rw_integrity(struct btt *btt, struct bio_integrity_payload *bip,
struct arena_info *arena, u32 postmap, int rw)
{
unsigned int len = btt_meta_size(btt);
u64 meta_nsoff;
int ret = 0;
if (bip == NULL)
return 0;
meta_nsoff = to_namespace_offset(arena, postmap) + btt->sector_size;
while (len) {
unsigned int cur_len;
struct bio_vec bv;
void *mem;
bv = bvec_iter_bvec(bip->bip_vec, bip->bip_iter);
/*
* The 'bv' obtained from bvec_iter_bvec has its .bv_len and
* .bv_offset already adjusted for iter->bi_bvec_done, and we
* can use those directly
*/
cur_len = min(len, bv.bv_len);
mem = kmap_atomic(bv.bv_page);
if (rw)
ret = arena_write_bytes(arena, meta_nsoff,
mem + bv.bv_offset, cur_len);
else
ret = arena_read_bytes(arena, meta_nsoff,
mem + bv.bv_offset, cur_len);
kunmap_atomic(mem);
if (ret)
return ret;
len -= cur_len;
meta_nsoff += cur_len;
bvec_iter_advance(bip->bip_vec, &bip->bip_iter, cur_len);
}
return ret;
}
#else /* CONFIG_BLK_DEV_INTEGRITY */
static int btt_rw_integrity(struct btt *btt, struct bio_integrity_payload *bip,
struct arena_info *arena, u32 postmap, int rw)
{
return 0;
}
#endif
static int btt_read_pg(struct btt *btt, struct bio_integrity_payload *bip,
struct page *page, unsigned int off, sector_t sector,
unsigned int len)
{ {
int ret = 0; int ret = 0;
int t_flag, e_flag; int t_flag, e_flag;
...@@ -984,6 +1044,12 @@ static int btt_read_pg(struct btt *btt, struct page *page, unsigned int off, ...@@ -984,6 +1044,12 @@ static int btt_read_pg(struct btt *btt, struct page *page, unsigned int off,
if (ret) if (ret)
goto out_rtt; goto out_rtt;
if (bip) {
ret = btt_rw_integrity(btt, bip, arena, postmap, READ);
if (ret)
goto out_rtt;
}
arena->rtt[lane] = RTT_INVALID; arena->rtt[lane] = RTT_INVALID;
nd_region_release_lane(btt->nd_region, lane); nd_region_release_lane(btt->nd_region, lane);
...@@ -1001,8 +1067,9 @@ static int btt_read_pg(struct btt *btt, struct page *page, unsigned int off, ...@@ -1001,8 +1067,9 @@ static int btt_read_pg(struct btt *btt, struct page *page, unsigned int off,
return ret; return ret;
} }
static int btt_write_pg(struct btt *btt, sector_t sector, struct page *page, static int btt_write_pg(struct btt *btt, struct bio_integrity_payload *bip,
unsigned int off, unsigned int len) sector_t sector, struct page *page, unsigned int off,
unsigned int len)
{ {
int ret = 0; int ret = 0;
struct arena_info *arena = NULL; struct arena_info *arena = NULL;
...@@ -1036,12 +1103,19 @@ static int btt_write_pg(struct btt *btt, sector_t sector, struct page *page, ...@@ -1036,12 +1103,19 @@ static int btt_write_pg(struct btt *btt, sector_t sector, struct page *page,
if (new_postmap >= arena->internal_nlba) { if (new_postmap >= arena->internal_nlba) {
ret = -EIO; ret = -EIO;
goto out_lane; goto out_lane;
} else }
ret = btt_data_write(arena, new_postmap, page,
off, cur_len); ret = btt_data_write(arena, new_postmap, page, off, cur_len);
if (ret) if (ret)
goto out_lane; goto out_lane;
if (bip) {
ret = btt_rw_integrity(btt, bip, arena, new_postmap,
WRITE);
if (ret)
goto out_lane;
}
lock_map(arena, premap); lock_map(arena, premap);
ret = btt_map_read(arena, premap, &old_postmap, NULL, NULL); ret = btt_map_read(arena, premap, &old_postmap, NULL, NULL);
if (ret) if (ret)
...@@ -1081,18 +1155,18 @@ static int btt_write_pg(struct btt *btt, sector_t sector, struct page *page, ...@@ -1081,18 +1155,18 @@ static int btt_write_pg(struct btt *btt, sector_t sector, struct page *page,
return ret; return ret;
} }
static int btt_do_bvec(struct btt *btt, struct page *page, static int btt_do_bvec(struct btt *btt, struct bio_integrity_payload *bip,
unsigned int len, unsigned int off, int rw, struct page *page, unsigned int len, unsigned int off,
sector_t sector) int rw, sector_t sector)
{ {
int ret; int ret;
if (rw == READ) { if (rw == READ) {
ret = btt_read_pg(btt, page, off, sector, len); ret = btt_read_pg(btt, bip, page, off, sector, len);
flush_dcache_page(page); flush_dcache_page(page);
} else { } else {
flush_dcache_page(page); flush_dcache_page(page);
ret = btt_write_pg(btt, sector, page, off, len); ret = btt_write_pg(btt, bip, sector, page, off, len);
} }
return ret; return ret;
...@@ -1100,11 +1174,23 @@ static int btt_do_bvec(struct btt *btt, struct page *page, ...@@ -1100,11 +1174,23 @@ static int btt_do_bvec(struct btt *btt, struct page *page,
static void btt_make_request(struct request_queue *q, struct bio *bio) static void btt_make_request(struct request_queue *q, struct bio *bio)
{ {
struct bio_integrity_payload *bip = bio_integrity(bio);
struct btt *btt = q->queuedata; struct btt *btt = q->queuedata;
struct bvec_iter iter; struct bvec_iter iter;
struct bio_vec bvec; struct bio_vec bvec;
int err = 0, rw; int err = 0, rw;
/*
* bio_integrity_enabled also checks if the bio already has an
* integrity payload attached. If it does, we *don't* do a
* bio_integrity_prep here - the payload has been generated by
* another kernel subsystem, and we just pass it through.
*/
if (bio_integrity_enabled(bio) && bio_integrity_prep(bio)) {
err = -EIO;
goto out;
}
rw = bio_data_dir(bio); rw = bio_data_dir(bio);
bio_for_each_segment(bvec, bio, iter) { bio_for_each_segment(bvec, bio, iter) {
unsigned int len = bvec.bv_len; unsigned int len = bvec.bv_len;
...@@ -1115,7 +1201,7 @@ static void btt_make_request(struct request_queue *q, struct bio *bio) ...@@ -1115,7 +1201,7 @@ static void btt_make_request(struct request_queue *q, struct bio *bio)
BUG_ON(len < btt->sector_size); BUG_ON(len < btt->sector_size);
BUG_ON(len % btt->sector_size); BUG_ON(len % btt->sector_size);
err = btt_do_bvec(btt, bvec.bv_page, len, bvec.bv_offset, err = btt_do_bvec(btt, bip, bvec.bv_page, len, bvec.bv_offset,
rw, iter.bi_sector); rw, iter.bi_sector);
if (err) { if (err) {
dev_info(&btt->nd_btt->dev, dev_info(&btt->nd_btt->dev,
...@@ -1135,7 +1221,7 @@ static int btt_rw_page(struct block_device *bdev, sector_t sector, ...@@ -1135,7 +1221,7 @@ static int btt_rw_page(struct block_device *bdev, sector_t sector,
{ {
struct btt *btt = bdev->bd_disk->private_data; struct btt *btt = bdev->bd_disk->private_data;
btt_do_bvec(btt, page, PAGE_CACHE_SIZE, 0, rw, sector); btt_do_bvec(btt, NULL, page, PAGE_CACHE_SIZE, 0, rw, sector);
page_endio(page, rw & WRITE, 0); page_endio(page, rw & WRITE, 0);
return 0; return 0;
} }
...@@ -1188,15 +1274,26 @@ static int btt_blk_init(struct btt *btt) ...@@ -1188,15 +1274,26 @@ static int btt_blk_init(struct btt *btt)
queue_flag_set_unlocked(QUEUE_FLAG_NONROT, btt->btt_queue); queue_flag_set_unlocked(QUEUE_FLAG_NONROT, btt->btt_queue);
btt->btt_queue->queuedata = btt; btt->btt_queue->queuedata = btt;
set_capacity(btt->btt_disk, set_capacity(btt->btt_disk, 0);
btt->nlba * btt->sector_size >> SECTOR_SHIFT);
add_disk(btt->btt_disk); add_disk(btt->btt_disk);
if (btt_meta_size(btt)) {
int rc = nd_integrity_init(btt->btt_disk, btt_meta_size(btt));
if (rc) {
del_gendisk(btt->btt_disk);
put_disk(btt->btt_disk);
blk_cleanup_queue(btt->btt_queue);
return rc;
}
}
set_capacity(btt->btt_disk, btt->nlba * btt->sector_size >> 9);
return 0; return 0;
} }
static void btt_blk_cleanup(struct btt *btt) static void btt_blk_cleanup(struct btt *btt)
{ {
blk_integrity_unregister(btt->btt_disk);
del_gendisk(btt->btt_disk); del_gendisk(btt->btt_disk);
put_disk(btt->btt_disk); put_disk(btt->btt_disk);
blk_cleanup_queue(btt->btt_queue); blk_cleanup_queue(btt->btt_queue);
......
...@@ -31,7 +31,7 @@ ...@@ -31,7 +31,7 @@
#define ARENA_MAX_SIZE (1ULL << 39) /* 512 GB */ #define ARENA_MAX_SIZE (1ULL << 39) /* 512 GB */
#define RTT_VALID (1UL << 31) #define RTT_VALID (1UL << 31)
#define RTT_INVALID 0 #define RTT_INVALID 0
#define INT_LBASIZE_ALIGNMENT 256 #define INT_LBASIZE_ALIGNMENT 64
#define BTT_PG_SIZE 4096 #define BTT_PG_SIZE 4096
#define BTT_DEFAULT_NFREE ND_MAX_LANES #define BTT_DEFAULT_NFREE ND_MAX_LANES
#define LOG_SEQ_INIT 1 #define LOG_SEQ_INIT 1
......
...@@ -103,7 +103,8 @@ struct nd_btt *to_nd_btt(struct device *dev) ...@@ -103,7 +103,8 @@ struct nd_btt *to_nd_btt(struct device *dev)
} }
EXPORT_SYMBOL(to_nd_btt); EXPORT_SYMBOL(to_nd_btt);
static const unsigned long btt_lbasize_supported[] = { 512, 4096, 0 }; static const unsigned long btt_lbasize_supported[] = { 512, 520, 528,
4096, 4104, 4160, 4224, 0 };
static ssize_t sector_size_show(struct device *dev, static ssize_t sector_size_show(struct device *dev,
struct device_attribute *attr, char *buf) struct device_attribute *attr, char *buf)
......
...@@ -13,6 +13,7 @@ ...@@ -13,6 +13,7 @@
#include <linux/libnvdimm.h> #include <linux/libnvdimm.h>
#include <linux/export.h> #include <linux/export.h>
#include <linux/module.h> #include <linux/module.h>
#include <linux/blkdev.h>
#include <linux/device.h> #include <linux/device.h>
#include <linux/ctype.h> #include <linux/ctype.h>
#include <linux/ndctl.h> #include <linux/ndctl.h>
...@@ -361,6 +362,42 @@ void nvdimm_bus_unregister(struct nvdimm_bus *nvdimm_bus) ...@@ -361,6 +362,42 @@ void nvdimm_bus_unregister(struct nvdimm_bus *nvdimm_bus)
} }
EXPORT_SYMBOL_GPL(nvdimm_bus_unregister); EXPORT_SYMBOL_GPL(nvdimm_bus_unregister);
#ifdef CONFIG_BLK_DEV_INTEGRITY
static int nd_pi_nop_generate_verify(struct blk_integrity_iter *iter)
{
return 0;
}
int nd_integrity_init(struct gendisk *disk, unsigned long meta_size)
{
struct blk_integrity integrity = {
.name = "ND-PI-NOP",
.generate_fn = nd_pi_nop_generate_verify,
.verify_fn = nd_pi_nop_generate_verify,
.tuple_size = meta_size,
.tag_size = meta_size,
};
int ret;
ret = blk_integrity_register(disk, &integrity);
if (ret)
return ret;
blk_queue_max_integrity_segments(disk->queue, 1);
return 0;
}
EXPORT_SYMBOL(nd_integrity_init);
#else /* CONFIG_BLK_DEV_INTEGRITY */
int nd_integrity_init(struct gendisk *disk, unsigned long meta_size)
{
return 0;
}
EXPORT_SYMBOL(nd_integrity_init);
#endif
static __init int libnvdimm_init(void) static __init int libnvdimm_init(void)
{ {
int rc; int rc;
......
...@@ -136,6 +136,7 @@ enum nd_async_mode { ...@@ -136,6 +136,7 @@ enum nd_async_mode {
ND_ASYNC, ND_ASYNC,
}; };
int nd_integrity_init(struct gendisk *disk, unsigned long meta_size);
void wait_nvdimm_bus_probe_idle(struct device *dev); void wait_nvdimm_bus_probe_idle(struct device *dev);
void nd_device_register(struct device *dev); void nd_device_register(struct device *dev);
void nd_device_unregister(struct device *dev, enum nd_async_mode mode); void nd_device_unregister(struct device *dev, enum nd_async_mode mode);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment