Commit 239467e8 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'libnvdimm-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/nvdimm/nvdimm

Pull libnvdimm fixes from Dan Williams:
 "Three fixes, the first two are tagged for -stable:

   - The ndctl utility/library gained expanded unit tests illuminating a
     long standing bug in the libnvdimm SMART data retrieval
     implementation.

     It has been broken since its initial implementation, now fixed.

   - Another one line fix for the detection of stale info blocks.

     Without this change userspace can get into a situation where it is
     unable to reconfigure a namespace.

   - Fix the badblock initialization path in the presence of the new (in
     v4.6-rc1) section alignment workarounds.

     Without this change badblocks will be reported at the wrong offset.

  These have received a build success report from the kbuild robot and
  have appeared in -next with no reported issues"

* 'libnvdimm-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/nvdimm/nvdimm:
  libnvdimm, pfn: fix nvdimm_namespace_add_poison() vs section alignment
  libnvdimm, pfn: fix uuid validation
  libnvdimm: fix smart data retrieval
parents 289b7bfd a3901802
...@@ -407,7 +407,7 @@ static const struct nd_cmd_desc __nd_cmd_dimm_descs[] = { ...@@ -407,7 +407,7 @@ static const struct nd_cmd_desc __nd_cmd_dimm_descs[] = {
[ND_CMD_IMPLEMENTED] = { }, [ND_CMD_IMPLEMENTED] = { },
[ND_CMD_SMART] = { [ND_CMD_SMART] = {
.out_num = 2, .out_num = 2,
.out_sizes = { 4, 8, }, .out_sizes = { 4, 128, },
}, },
[ND_CMD_SMART_THRESHOLD] = { [ND_CMD_SMART_THRESHOLD] = {
.out_num = 2, .out_num = 2,
......
...@@ -417,8 +417,8 @@ static void __add_badblock_range(struct badblocks *bb, u64 ns_offset, u64 len) ...@@ -417,8 +417,8 @@ static void __add_badblock_range(struct badblocks *bb, u64 ns_offset, u64 len)
set_badblock(bb, start_sector, num_sectors); set_badblock(bb, start_sector, num_sectors);
} }
static void namespace_add_poison(struct list_head *poison_list, static void badblocks_populate(struct list_head *poison_list,
struct badblocks *bb, struct resource *res) struct badblocks *bb, const struct resource *res)
{ {
struct nd_poison *pl; struct nd_poison *pl;
...@@ -460,36 +460,35 @@ static void namespace_add_poison(struct list_head *poison_list, ...@@ -460,36 +460,35 @@ static void namespace_add_poison(struct list_head *poison_list,
} }
/** /**
* nvdimm_namespace_add_poison() - Convert a list of poison ranges to badblocks * nvdimm_badblocks_populate() - Convert a list of poison ranges to badblocks
* @ndns: the namespace containing poison ranges * @region: parent region of the range to interrogate
* @bb: badblocks instance to populate * @bb: badblocks instance to populate
* @offset: offset at the start of the namespace before 'sector 0' * @res: resource range to consider
* *
* The poison list generated during NFIT initialization may contain multiple, * The poison list generated during bus initialization may contain
* possibly overlapping ranges in the SPA (System Physical Address) space. * multiple, possibly overlapping physical address ranges. Compare each
* Compare each of these ranges to the namespace currently being initialized, * of these ranges to the resource range currently being initialized,
* and add badblocks to the gendisk for all matching sub-ranges * and add badblocks entries for all matching sub-ranges
*/ */
void nvdimm_namespace_add_poison(struct nd_namespace_common *ndns, void nvdimm_badblocks_populate(struct nd_region *nd_region,
struct badblocks *bb, resource_size_t offset) struct badblocks *bb, const struct resource *res)
{ {
struct nd_namespace_io *nsio = to_nd_namespace_io(&ndns->dev);
struct nd_region *nd_region = to_nd_region(ndns->dev.parent);
struct nvdimm_bus *nvdimm_bus; struct nvdimm_bus *nvdimm_bus;
struct list_head *poison_list; struct list_head *poison_list;
struct resource res = {
.start = nsio->res.start + offset,
.end = nsio->res.end,
};
nvdimm_bus = to_nvdimm_bus(nd_region->dev.parent); if (!is_nd_pmem(&nd_region->dev)) {
dev_WARN_ONCE(&nd_region->dev, 1,
"%s only valid for pmem regions\n", __func__);
return;
}
nvdimm_bus = walk_to_nvdimm_bus(&nd_region->dev);
poison_list = &nvdimm_bus->poison_list; poison_list = &nvdimm_bus->poison_list;
nvdimm_bus_lock(&nvdimm_bus->dev); nvdimm_bus_lock(&nvdimm_bus->dev);
namespace_add_poison(poison_list, bb, &res); badblocks_populate(poison_list, bb, res);
nvdimm_bus_unlock(&nvdimm_bus->dev); nvdimm_bus_unlock(&nvdimm_bus->dev);
} }
EXPORT_SYMBOL_GPL(nvdimm_namespace_add_poison); EXPORT_SYMBOL_GPL(nvdimm_badblocks_populate);
static int add_poison(struct nvdimm_bus *nvdimm_bus, u64 addr, u64 length) static int add_poison(struct nvdimm_bus *nvdimm_bus, u64 addr, u64 length)
{ {
......
...@@ -266,8 +266,8 @@ int nvdimm_namespace_attach_btt(struct nd_namespace_common *ndns); ...@@ -266,8 +266,8 @@ int nvdimm_namespace_attach_btt(struct nd_namespace_common *ndns);
int nvdimm_namespace_detach_btt(struct nd_namespace_common *ndns); int nvdimm_namespace_detach_btt(struct nd_namespace_common *ndns);
const char *nvdimm_namespace_disk_name(struct nd_namespace_common *ndns, const char *nvdimm_namespace_disk_name(struct nd_namespace_common *ndns,
char *name); char *name);
void nvdimm_namespace_add_poison(struct nd_namespace_common *ndns, void nvdimm_badblocks_populate(struct nd_region *nd_region,
struct badblocks *bb, resource_size_t offset); struct badblocks *bb, const struct resource *res);
int nd_blk_region_init(struct nd_region *nd_region); int nd_blk_region_init(struct nd_region *nd_region);
void __nd_iostat_start(struct bio *bio, unsigned long *start); void __nd_iostat_start(struct bio *bio, unsigned long *start);
static inline bool nd_iostat_start(struct bio *bio, unsigned long *start) static inline bool nd_iostat_start(struct bio *bio, unsigned long *start)
......
...@@ -376,7 +376,7 @@ int nd_pfn_validate(struct nd_pfn *nd_pfn) ...@@ -376,7 +376,7 @@ int nd_pfn_validate(struct nd_pfn *nd_pfn)
} else { } else {
/* from init we validate */ /* from init we validate */
if (memcmp(nd_pfn->uuid, pfn_sb->uuid, 16) != 0) if (memcmp(nd_pfn->uuid, pfn_sb->uuid, 16) != 0)
return -EINVAL; return -ENODEV;
} }
if (nd_pfn->align > nvdimm_namespace_capacity(ndns)) { if (nd_pfn->align > nvdimm_namespace_capacity(ndns)) {
......
...@@ -244,7 +244,9 @@ static void pmem_detach_disk(struct pmem_device *pmem) ...@@ -244,7 +244,9 @@ static void pmem_detach_disk(struct pmem_device *pmem)
static int pmem_attach_disk(struct device *dev, static int pmem_attach_disk(struct device *dev,
struct nd_namespace_common *ndns, struct pmem_device *pmem) struct nd_namespace_common *ndns, struct pmem_device *pmem)
{ {
struct nd_namespace_io *nsio = to_nd_namespace_io(&ndns->dev);
int nid = dev_to_node(dev); int nid = dev_to_node(dev);
struct resource bb_res;
struct gendisk *disk; struct gendisk *disk;
blk_queue_make_request(pmem->pmem_queue, pmem_make_request); blk_queue_make_request(pmem->pmem_queue, pmem_make_request);
...@@ -271,8 +273,17 @@ static int pmem_attach_disk(struct device *dev, ...@@ -271,8 +273,17 @@ static int pmem_attach_disk(struct device *dev,
devm_exit_badblocks(dev, &pmem->bb); devm_exit_badblocks(dev, &pmem->bb);
if (devm_init_badblocks(dev, &pmem->bb)) if (devm_init_badblocks(dev, &pmem->bb))
return -ENOMEM; return -ENOMEM;
nvdimm_namespace_add_poison(ndns, &pmem->bb, pmem->data_offset); bb_res.start = nsio->res.start + pmem->data_offset;
bb_res.end = nsio->res.end;
if (is_nd_pfn(dev)) {
struct nd_pfn *nd_pfn = to_nd_pfn(dev);
struct nd_pfn_sb *pfn_sb = nd_pfn->pfn_sb;
bb_res.start += __le32_to_cpu(pfn_sb->start_pad);
bb_res.end -= __le32_to_cpu(pfn_sb->end_trunc);
}
nvdimm_badblocks_populate(to_nd_region(dev->parent), &pmem->bb,
&bb_res);
disk->bb = &pmem->bb; disk->bb = &pmem->bb;
add_disk(disk); add_disk(disk);
revalidate_disk(disk); revalidate_disk(disk);
...@@ -553,7 +564,7 @@ static int nd_pmem_probe(struct device *dev) ...@@ -553,7 +564,7 @@ static int nd_pmem_probe(struct device *dev)
ndns->rw_bytes = pmem_rw_bytes; ndns->rw_bytes = pmem_rw_bytes;
if (devm_init_badblocks(dev, &pmem->bb)) if (devm_init_badblocks(dev, &pmem->bb))
return -ENOMEM; return -ENOMEM;
nvdimm_namespace_add_poison(ndns, &pmem->bb, 0); nvdimm_badblocks_populate(nd_region, &pmem->bb, &nsio->res);
if (is_nd_btt(dev)) { if (is_nd_btt(dev)) {
/* btt allocates its own request_queue */ /* btt allocates its own request_queue */
...@@ -595,14 +606,25 @@ static void nd_pmem_notify(struct device *dev, enum nvdimm_event event) ...@@ -595,14 +606,25 @@ static void nd_pmem_notify(struct device *dev, enum nvdimm_event event)
{ {
struct pmem_device *pmem = dev_get_drvdata(dev); struct pmem_device *pmem = dev_get_drvdata(dev);
struct nd_namespace_common *ndns = pmem->ndns; struct nd_namespace_common *ndns = pmem->ndns;
struct nd_region *nd_region = to_nd_region(dev->parent);
struct nd_namespace_io *nsio = to_nd_namespace_io(&ndns->dev);
struct resource res = {
.start = nsio->res.start + pmem->data_offset,
.end = nsio->res.end,
};
if (event != NVDIMM_REVALIDATE_POISON) if (event != NVDIMM_REVALIDATE_POISON)
return; return;
if (is_nd_btt(dev)) if (is_nd_pfn(dev)) {
nvdimm_namespace_add_poison(ndns, &pmem->bb, 0); struct nd_pfn *nd_pfn = to_nd_pfn(dev);
else struct nd_pfn_sb *pfn_sb = nd_pfn->pfn_sb;
nvdimm_namespace_add_poison(ndns, &pmem->bb, pmem->data_offset);
res.start += __le32_to_cpu(pfn_sb->start_pad);
res.end -= __le32_to_cpu(pfn_sb->end_trunc);
}
nvdimm_badblocks_populate(nd_region, &pmem->bb, &res);
} }
MODULE_ALIAS("pmem"); MODULE_ALIAS("pmem");
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment