Commit 451fed24 authored by Dan Williams's avatar Dan Williams

Merge branch 'for-5.1/libnvdimm' into libnvdimm-for-next

Merge miscellaneous libnvdimm sub-system updates for v5.1. Highlights
include:

* Support for the Hyper-V family of device-specific-methods (DSMs)
* Several fixes and workarounds for Hyper-V compatibility.
* Fix for the support to cache the dirty-shutdown-count at init.
parents ebe9f6f1 075c3fdd
......@@ -4536,10 +4536,11 @@ S: Maintained
F: drivers/i2c/busses/i2c-diolan-u2c.c
FILESYSTEM DIRECT ACCESS (DAX)
M: Matthew Wilcox <willy@infradead.org>
M: Ross Zwisler <zwisler@kernel.org>
M: Jan Kara <jack@suse.cz>
M: Dan Williams <dan.j.williams@intel.com>
R: Matthew Wilcox <willy@infradead.org>
R: Jan Kara <jack@suse.cz>
L: linux-fsdevel@vger.kernel.org
L: linux-nvdimm@lists.01.org
S: Supported
F: fs/dax.c
F: include/linux/dax.h
......@@ -4547,9 +4548,9 @@ F: include/trace/events/fs_dax.h
DEVICE DIRECT ACCESS (DAX)
M: Dan Williams <dan.j.williams@intel.com>
M: Dave Jiang <dave.jiang@intel.com>
M: Ross Zwisler <zwisler@kernel.org>
M: Vishal Verma <vishal.l.verma@intel.com>
M: Keith Busch <keith.busch@intel.com>
M: Dave Jiang <dave.jiang@intel.com>
L: linux-nvdimm@lists.01.org
S: Supported
F: drivers/dax/
......@@ -8643,7 +8644,6 @@ S: Maintained
F: tools/lib/lockdep/
LIBNVDIMM BLK: MMIO-APERTURE DRIVER
M: Ross Zwisler <zwisler@kernel.org>
M: Dan Williams <dan.j.williams@intel.com>
M: Vishal Verma <vishal.l.verma@intel.com>
M: Dave Jiang <dave.jiang@intel.com>
......@@ -8656,7 +8656,6 @@ F: drivers/nvdimm/region_devs.c
LIBNVDIMM BTT: BLOCK TRANSLATION TABLE
M: Vishal Verma <vishal.l.verma@intel.com>
M: Dan Williams <dan.j.williams@intel.com>
M: Ross Zwisler <zwisler@kernel.org>
M: Dave Jiang <dave.jiang@intel.com>
L: linux-nvdimm@lists.01.org
Q: https://patchwork.kernel.org/project/linux-nvdimm/list/
......@@ -8664,7 +8663,6 @@ S: Supported
F: drivers/nvdimm/btt*
LIBNVDIMM PMEM: PERSISTENT MEMORY DRIVER
M: Ross Zwisler <zwisler@kernel.org>
M: Dan Williams <dan.j.williams@intel.com>
M: Vishal Verma <vishal.l.verma@intel.com>
M: Dave Jiang <dave.jiang@intel.com>
......@@ -8683,9 +8681,10 @@ F: Documentation/devicetree/bindings/pmem/pmem-region.txt
LIBNVDIMM: NON-VOLATILE MEMORY DEVICE SUBSYSTEM
M: Dan Williams <dan.j.williams@intel.com>
M: Ross Zwisler <zwisler@kernel.org>
M: Vishal Verma <vishal.l.verma@intel.com>
M: Dave Jiang <dave.jiang@intel.com>
M: Keith Busch <keith.busch@intel.com>
M: Ira Weiny <ira.weiny@intel.com>
L: linux-nvdimm@lists.01.org
Q: https://patchwork.kernel.org/project/linux-nvdimm/list/
T: git git://git.kernel.org/pub/scm/linux/kernel/git/nvdimm/nvdimm.git
......
......@@ -55,6 +55,10 @@ static bool no_init_ars;
module_param(no_init_ars, bool, 0644);
MODULE_PARM_DESC(no_init_ars, "Skip ARS run at nfit init time");
static bool force_labels;
module_param(force_labels, bool, 0444);
MODULE_PARM_DESC(force_labels, "Opt-in to labels despite missing methods");
LIST_HEAD(acpi_descs);
DEFINE_MUTEX(acpi_desc_lock);
......@@ -556,6 +560,13 @@ int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc, struct nvdimm *nvdimm,
return -EINVAL;
}
if (out_obj->type != ACPI_TYPE_BUFFER) {
dev_dbg(dev, "%s unexpected output object type cmd: %s type: %d\n",
dimm_name, cmd_name, out_obj->type);
rc = -EINVAL;
goto out;
}
if (call_pkg) {
call_pkg->nd_fw_size = out_obj->buffer.length;
memcpy(call_pkg->nd_payload + call_pkg->nd_size_in,
......@@ -574,13 +585,6 @@ int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc, struct nvdimm *nvdimm,
return 0;
}
if (out_obj->package.type != ACPI_TYPE_BUFFER) {
dev_dbg(dev, "%s unexpected output object type cmd: %s type: %d\n",
dimm_name, cmd_name, out_obj->type);
rc = -EINVAL;
goto out;
}
dev_dbg(dev, "%s cmd: %s output length: %d\n", dimm_name,
cmd_name, out_obj->buffer.length);
print_hex_dump_debug(cmd_name, DUMP_PREFIX_OFFSET, 4, 4,
......@@ -1761,14 +1765,14 @@ static bool acpi_nvdimm_has_method(struct acpi_device *adev, char *method)
__weak void nfit_intel_shutdown_status(struct nfit_mem *nfit_mem)
{
struct device *dev = &nfit_mem->adev->dev;
struct nd_intel_smart smart = { 0 };
union acpi_object in_buf = {
.type = ACPI_TYPE_BUFFER,
.buffer.pointer = (char *) &smart,
.buffer.length = sizeof(smart),
.buffer.type = ACPI_TYPE_BUFFER,
.buffer.length = 0,
};
union acpi_object in_obj = {
.type = ACPI_TYPE_PACKAGE,
.package.type = ACPI_TYPE_PACKAGE,
.package.count = 1,
.package.elements = &in_buf,
};
......@@ -1783,8 +1787,15 @@ __weak void nfit_intel_shutdown_status(struct nfit_mem *nfit_mem)
return;
out_obj = acpi_evaluate_dsm(handle, guid, revid, func, &in_obj);
if (!out_obj)
if (!out_obj || out_obj->type != ACPI_TYPE_BUFFER
|| out_obj->buffer.length < sizeof(smart)) {
dev_dbg(dev->parent, "%s: failed to retrieve initial health\n",
dev_name(dev));
ACPI_FREE(out_obj);
return;
}
memcpy(&smart, out_obj->buffer.pointer, sizeof(smart));
ACPI_FREE(out_obj);
if (smart.flags & ND_INTEL_SMART_SHUTDOWN_VALID) {
if (smart.shutdown_state)
......@@ -1795,7 +1806,6 @@ __weak void nfit_intel_shutdown_status(struct nfit_mem *nfit_mem)
set_bit(NFIT_MEM_DIRTY_COUNT, &nfit_mem->flags);
nfit_mem->dirty_shutdown = smart.shutdown_count;
}
ACPI_FREE(out_obj);
}
static void populate_shutdown_status(struct nfit_mem *nfit_mem)
......@@ -1863,9 +1873,17 @@ static int acpi_nfit_add_dimm(struct acpi_nfit_desc *acpi_desc,
dev_set_drvdata(&adev_dimm->dev, nfit_mem);
/*
* Until standardization materializes we need to consider 4
* different command sets. Note, that checking for function0 (bit0)
* tells us if any commands are reachable through this GUID.
* There are 4 "legacy" NVDIMM command sets
* (NVDIMM_FAMILY_{INTEL,MSFT,HPE1,HPE2}) that were created before
* an EFI working group was established to constrain this
* proliferation. The nfit driver probes for the supported command
* set by GUID. Note, if you're a platform developer looking to add
* a new command set to this probe, consider using an existing set,
* or otherwise seek approval to publish the command set at
* http://www.uefi.org/RFIC_LIST.
*
* Note, that checking for function0 (bit0) tells us if any commands
* are reachable through this GUID.
*/
for (i = 0; i <= NVDIMM_FAMILY_MAX; i++)
if (acpi_check_dsm(adev_dimm->handle, to_nfit_uuid(i), 1, 1))
......@@ -1888,6 +1906,8 @@ static int acpi_nfit_add_dimm(struct acpi_nfit_desc *acpi_desc,
dsm_mask &= ~(1 << 8);
} else if (nfit_mem->family == NVDIMM_FAMILY_MSFT) {
dsm_mask = 0xffffffff;
} else if (nfit_mem->family == NVDIMM_FAMILY_HYPERV) {
dsm_mask = 0x1f;
} else {
dev_dbg(dev, "unknown dimm command family\n");
nfit_mem->family = -1;
......@@ -1917,18 +1937,32 @@ static int acpi_nfit_add_dimm(struct acpi_nfit_desc *acpi_desc,
| 1 << ND_CMD_SET_CONFIG_DATA;
if (family == NVDIMM_FAMILY_INTEL
&& (dsm_mask & label_mask) == label_mask)
return 0;
/* skip _LS{I,R,W} enabling */;
else {
if (acpi_nvdimm_has_method(adev_dimm, "_LSI")
&& acpi_nvdimm_has_method(adev_dimm, "_LSR")) {
dev_dbg(dev, "%s: has _LSR\n", dev_name(&adev_dimm->dev));
set_bit(NFIT_MEM_LSR, &nfit_mem->flags);
}
if (acpi_nvdimm_has_method(adev_dimm, "_LSI")
&& acpi_nvdimm_has_method(adev_dimm, "_LSR")) {
dev_dbg(dev, "%s: has _LSR\n", dev_name(&adev_dimm->dev));
set_bit(NFIT_MEM_LSR, &nfit_mem->flags);
}
if (test_bit(NFIT_MEM_LSR, &nfit_mem->flags)
&& acpi_nvdimm_has_method(adev_dimm, "_LSW")) {
dev_dbg(dev, "%s: has _LSW\n", dev_name(&adev_dimm->dev));
set_bit(NFIT_MEM_LSW, &nfit_mem->flags);
}
if (test_bit(NFIT_MEM_LSR, &nfit_mem->flags)
&& acpi_nvdimm_has_method(adev_dimm, "_LSW")) {
dev_dbg(dev, "%s: has _LSW\n", dev_name(&adev_dimm->dev));
set_bit(NFIT_MEM_LSW, &nfit_mem->flags);
/*
* Quirk read-only label configurations to preserve
* access to label-less namespaces by default.
*/
if (!test_bit(NFIT_MEM_LSW, &nfit_mem->flags)
&& !force_labels) {
dev_dbg(dev, "%s: No _LSW, disable labels\n",
dev_name(&adev_dimm->dev));
clear_bit(NFIT_MEM_LSR, &nfit_mem->flags);
} else
dev_dbg(dev, "%s: Force enable labels\n",
dev_name(&adev_dimm->dev));
}
populate_shutdown_status(nfit_mem);
......@@ -2029,6 +2063,10 @@ static int acpi_nfit_register_dimms(struct acpi_nfit_desc *acpi_desc)
cmd_mask |= nfit_mem->dsm_mask & NVDIMM_STANDARD_CMDMASK;
}
/* Quirk to ignore LOCAL for labels on HYPERV DIMMs */
if (nfit_mem->family == NVDIMM_FAMILY_HYPERV)
set_bit(NDD_NOBLK, &flags);
if (test_bit(NFIT_MEM_LSR, &nfit_mem->flags)) {
set_bit(ND_CMD_GET_CONFIG_SIZE, &cmd_mask);
set_bit(ND_CMD_GET_CONFIG_DATA, &cmd_mask);
......@@ -2052,7 +2090,7 @@ static int acpi_nfit_register_dimms(struct acpi_nfit_desc *acpi_desc)
if ((mem_flags & ACPI_NFIT_MEM_FAILED_MASK) == 0)
continue;
dev_info(acpi_desc->dev, "%s flags:%s%s%s%s%s\n",
dev_err(acpi_desc->dev, "Error found in NVDIMM %s flags:%s%s%s%s%s\n",
nvdimm_name(nvdimm),
mem_flags & ACPI_NFIT_MEM_SAVE_FAILED ? " save_fail" : "",
mem_flags & ACPI_NFIT_MEM_RESTORE_FAILED ? " restore_fail":"",
......@@ -3731,6 +3769,7 @@ static __init int nfit_init(void)
guid_parse(UUID_NFIT_DIMM_N_HPE1, &nfit_uuid[NFIT_DEV_DIMM_N_HPE1]);
guid_parse(UUID_NFIT_DIMM_N_HPE2, &nfit_uuid[NFIT_DEV_DIMM_N_HPE2]);
guid_parse(UUID_NFIT_DIMM_N_MSFT, &nfit_uuid[NFIT_DEV_DIMM_N_MSFT]);
guid_parse(UUID_NFIT_DIMM_N_HYPERV, &nfit_uuid[NFIT_DEV_DIMM_N_HYPERV]);
nfit_wq = create_singlethread_workqueue("nfit");
if (!nfit_wq)
......
......@@ -34,11 +34,14 @@
/* https://msdn.microsoft.com/library/windows/hardware/mt604741 */
#define UUID_NFIT_DIMM_N_MSFT "1ee68b36-d4bd-4a1a-9a16-4f8e53d46e05"
/* http://www.uefi.org/RFIC_LIST (see "Virtual NVDIMM 0x1901") */
#define UUID_NFIT_DIMM_N_HYPERV "5746c5f2-a9a2-4264-ad0e-e4ddc9e09e80"
#define ACPI_NFIT_MEM_FAILED_MASK (ACPI_NFIT_MEM_SAVE_FAILED \
| ACPI_NFIT_MEM_RESTORE_FAILED | ACPI_NFIT_MEM_FLUSH_FAILED \
| ACPI_NFIT_MEM_NOT_ARMED | ACPI_NFIT_MEM_MAP_FAILED)
#define NVDIMM_FAMILY_MAX NVDIMM_FAMILY_MSFT
#define NVDIMM_FAMILY_MAX NVDIMM_FAMILY_HYPERV
#define NVDIMM_STANDARD_CMDMASK \
(1 << ND_CMD_SMART | 1 << ND_CMD_SMART_THRESHOLD | 1 << ND_CMD_DIMM_FLAGS \
......@@ -94,6 +97,7 @@ enum nfit_uuids {
NFIT_DEV_DIMM_N_HPE1 = NVDIMM_FAMILY_HPE1,
NFIT_DEV_DIMM_N_HPE2 = NVDIMM_FAMILY_HPE2,
NFIT_DEV_DIMM_N_MSFT = NVDIMM_FAMILY_MSFT,
NFIT_DEV_DIMM_N_HYPERV = NVDIMM_FAMILY_HYPERV,
NFIT_SPA_VOLATILE,
NFIT_SPA_PM,
NFIT_SPA_DCR,
......
......@@ -541,9 +541,9 @@ static int arena_clear_freelist_error(struct arena_info *arena, u32 lane)
static int btt_freelist_init(struct arena_info *arena)
{
int old, new, ret;
u32 i, map_entry;
struct log_entry log_new, log_old;
int new, ret;
struct log_entry log_new;
u32 i, map_entry, log_oldmap, log_newmap;
arena->freelist = kcalloc(arena->nfree, sizeof(struct free_entry),
GFP_KERNEL);
......@@ -551,24 +551,26 @@ static int btt_freelist_init(struct arena_info *arena)
return -ENOMEM;
for (i = 0; i < arena->nfree; i++) {
old = btt_log_read(arena, i, &log_old, LOG_OLD_ENT);
if (old < 0)
return old;
new = btt_log_read(arena, i, &log_new, LOG_NEW_ENT);
if (new < 0)
return new;
/* old and new map entries with any flags stripped out */
log_oldmap = ent_lba(le32_to_cpu(log_new.old_map));
log_newmap = ent_lba(le32_to_cpu(log_new.new_map));
/* sub points to the next one to be overwritten */
arena->freelist[i].sub = 1 - new;
arena->freelist[i].seq = nd_inc_seq(le32_to_cpu(log_new.seq));
arena->freelist[i].block = le32_to_cpu(log_new.old_map);
arena->freelist[i].block = log_oldmap;
/*
* FIXME: if error clearing fails during init, we want to make
* the BTT read-only
*/
if (ent_e_flag(log_new.old_map)) {
if (ent_e_flag(log_new.old_map) &&
!ent_normal(log_new.old_map)) {
arena->freelist[i].has_err = 1;
ret = arena_clear_freelist_error(arena, i);
if (ret)
dev_err_ratelimited(to_dev(arena),
......@@ -576,7 +578,7 @@ static int btt_freelist_init(struct arena_info *arena)
}
/* This implies a newly created or untouched flog entry */
if (log_new.old_map == log_new.new_map)
if (log_oldmap == log_newmap)
continue;
/* Check if map recovery is needed */
......@@ -584,8 +586,15 @@ static int btt_freelist_init(struct arena_info *arena)
NULL, NULL, 0);
if (ret)
return ret;
if ((le32_to_cpu(log_new.new_map) != map_entry) &&
(le32_to_cpu(log_new.old_map) == map_entry)) {
/*
* The map_entry from btt_read_map is stripped of any flag bits,
* so use the stripped out versions from the log as well for
* testing whether recovery is needed. For restoration, use the
* 'raw' version of the log entries as that captured what we
* were going to write originally.
*/
if ((log_newmap != map_entry) && (log_oldmap == map_entry)) {
/*
* Last transaction wrote the flog, but wasn't able
* to complete the map write. So fix up the map.
......
......@@ -44,6 +44,8 @@
#define ent_e_flag(ent) (!!(ent & MAP_ERR_MASK))
#define ent_z_flag(ent) (!!(ent & MAP_TRIM_MASK))
#define set_e_flag(ent) (ent |= MAP_ERR_MASK)
/* 'normal' is both e and z flags set */
#define ent_normal(ent) (ent_e_flag(ent) && ent_z_flag(ent))
enum btt_init_state {
INIT_UNCHECKED = 0,
......
......@@ -159,11 +159,19 @@ static ssize_t size_show(struct device *dev,
}
static DEVICE_ATTR_RO(size);
static ssize_t log_zero_flags_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
return sprintf(buf, "Y\n");
}
static DEVICE_ATTR_RO(log_zero_flags);
static struct attribute *nd_btt_attributes[] = {
&dev_attr_sector_size.attr,
&dev_attr_namespace.attr,
&dev_attr_uuid.attr,
&dev_attr_size.attr,
&dev_attr_log_zero_flags.attr,
NULL,
};
......
......@@ -11,6 +11,7 @@
* General Public License for more details.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/moduleparam.h>
#include <linux/vmalloc.h>
#include <linux/device.h>
#include <linux/ndctl.h>
......@@ -25,6 +26,10 @@
static DEFINE_IDA(dimm_ida);
static bool noblk;
module_param(noblk, bool, 0444);
MODULE_PARM_DESC(noblk, "force disable BLK / local alias support");
/*
* Retrieve bus and dimm handle and return if this bus supports
* get_config_data commands
......@@ -551,6 +556,8 @@ struct nvdimm *__nvdimm_create(struct nvdimm_bus *nvdimm_bus,
nvdimm->dimm_id = dimm_id;
nvdimm->provider_data = provider_data;
if (noblk)
flags |= 1 << NDD_NOBLK;
nvdimm->flags = flags;
nvdimm->cmd_mask = cmd_mask;
nvdimm->num_flush = num_flush;
......
......@@ -392,6 +392,7 @@ int nd_label_reserve_dpa(struct nvdimm_drvdata *ndd)
return 0; /* no label, nothing to reserve */
for_each_clear_bit_le(slot, free, nslot) {
struct nvdimm *nvdimm = to_nvdimm(ndd->dev);
struct nd_namespace_label *nd_label;
struct nd_region *nd_region = NULL;
u8 label_uuid[NSLABEL_UUID_LEN];
......@@ -406,6 +407,8 @@ int nd_label_reserve_dpa(struct nvdimm_drvdata *ndd)
memcpy(label_uuid, nd_label->uuid, NSLABEL_UUID_LEN);
flags = __le32_to_cpu(nd_label->flags);
if (test_bit(NDD_NOBLK, &nvdimm->flags))
flags &= ~NSLABEL_FLAG_LOCAL;
nd_label_gen_id(&label_id, label_uuid, flags);
res = nvdimm_allocate_dpa(ndd, &label_id,
__le64_to_cpu(nd_label->dpa),
......@@ -755,7 +758,7 @@ static const guid_t *to_abstraction_guid(enum nvdimm_claim_class claim_class,
static int __pmem_label_update(struct nd_region *nd_region,
struct nd_mapping *nd_mapping, struct nd_namespace_pmem *nspm,
int pos)
int pos, unsigned long flags)
{
struct nd_namespace_common *ndns = &nspm->nsio.common;
struct nd_interleave_set *nd_set = nd_region->nd_set;
......@@ -796,7 +799,7 @@ static int __pmem_label_update(struct nd_region *nd_region,
memcpy(nd_label->uuid, nspm->uuid, NSLABEL_UUID_LEN);
if (nspm->alt_name)
memcpy(nd_label->name, nspm->alt_name, NSLABEL_NAME_LEN);
nd_label->flags = __cpu_to_le32(NSLABEL_FLAG_UPDATING);
nd_label->flags = __cpu_to_le32(flags);
nd_label->nlabel = __cpu_to_le16(nd_region->ndr_mappings);
nd_label->position = __cpu_to_le16(pos);
nd_label->isetcookie = __cpu_to_le64(cookie);
......@@ -1249,13 +1252,13 @@ static int del_labels(struct nd_mapping *nd_mapping, u8 *uuid)
int nd_pmem_namespace_label_update(struct nd_region *nd_region,
struct nd_namespace_pmem *nspm, resource_size_t size)
{
int i;
int i, rc;
for (i = 0; i < nd_region->ndr_mappings; i++) {
struct nd_mapping *nd_mapping = &nd_region->mapping[i];
struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
struct resource *res;
int rc, count = 0;
int count = 0;
if (size == 0) {
rc = del_labels(nd_mapping, nspm->uuid);
......@@ -1273,7 +1276,20 @@ int nd_pmem_namespace_label_update(struct nd_region *nd_region,
if (rc < 0)
return rc;
rc = __pmem_label_update(nd_region, nd_mapping, nspm, i);
rc = __pmem_label_update(nd_region, nd_mapping, nspm, i,
NSLABEL_FLAG_UPDATING);
if (rc)
return rc;
}
if (size == 0)
return 0;
/* Clear the UPDATING flag per UEFI 2.7 expectations */
for (i = 0; i < nd_region->ndr_mappings; i++) {
struct nd_mapping *nd_mapping = &nd_region->mapping[i];
rc = __pmem_label_update(nd_region, nd_mapping, nspm, i, 0);
if (rc)
return rc;
}
......
......@@ -1506,13 +1506,13 @@ static ssize_t __holder_class_store(struct device *dev, const char *buf)
if (dev->driver || ndns->claim)
return -EBUSY;
if (strcmp(buf, "btt") == 0 || strcmp(buf, "btt\n") == 0)
if (sysfs_streq(buf, "btt"))
ndns->claim_class = btt_claim_class(dev);
else if (strcmp(buf, "pfn") == 0 || strcmp(buf, "pfn\n") == 0)
else if (sysfs_streq(buf, "pfn"))
ndns->claim_class = NVDIMM_CCLASS_PFN;
else if (strcmp(buf, "dax") == 0 || strcmp(buf, "dax\n") == 0)
else if (sysfs_streq(buf, "dax"))
ndns->claim_class = NVDIMM_CCLASS_DAX;
else if (strcmp(buf, "") == 0 || strcmp(buf, "\n") == 0)
else if (sysfs_streq(buf, ""))
ndns->claim_class = NVDIMM_CCLASS_NONE;
else
return -EINVAL;
......@@ -2492,6 +2492,12 @@ static int init_active_labels(struct nd_region *nd_region)
if (!label_ent)
break;
label = nd_label_active(ndd, j);
if (test_bit(NDD_NOBLK, &nvdimm->flags)) {
u32 flags = __le32_to_cpu(label->flags);
flags &= ~NSLABEL_FLAG_LOCAL;
label->flags = __cpu_to_le32(flags);
}
label_ent->label = label;
mutex_lock(&nd_mapping->lock);
......
......@@ -108,7 +108,6 @@ static struct platform_driver of_pmem_region_driver = {
.remove = of_pmem_region_remove,
.driver = {
.name = "of_pmem",
.owner = THIS_MODULE,
.of_match_table = of_pmem_region_match,
},
};
......
......@@ -1003,6 +1003,13 @@ static struct nd_region *nd_region_create(struct nvdimm_bus *nvdimm_bus,
if (test_bit(NDD_UNARMED, &nvdimm->flags))
ro = 1;
if (test_bit(NDD_NOBLK, &nvdimm->flags)
&& dev_type == &nd_blk_device_type) {
dev_err(&nvdimm_bus->dev, "%s: %s mapping%d is not BLK capable\n",
caller, dev_name(&nvdimm->dev), i);
return NULL;
}
}
if (dev_type == &nd_blk_device_type) {
......
......@@ -42,6 +42,8 @@ enum {
NDD_SECURITY_OVERWRITE = 3,
/* tracking whether or not there is a pending device reference */
NDD_WORK_PENDING = 4,
/* ignore / filter NSLABEL_FLAG_LOCAL for this DIMM, i.e. no aliasing */
NDD_NOBLK = 5,
/* need to set a limit somewhere, but yes, this is likely overkill */
ND_IOCTL_MAX_BUFLEN = SZ_4M,
......
......@@ -243,6 +243,7 @@ struct nd_cmd_pkg {
#define NVDIMM_FAMILY_HPE1 1
#define NVDIMM_FAMILY_HPE2 2
#define NVDIMM_FAMILY_MSFT 3
#define NVDIMM_FAMILY_HYPERV 4
#define ND_IOCTL_CALL _IOWR(ND_IOCTL, ND_CMD_CALL,\
struct nd_cmd_pkg)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment