Commit 1ed41b56 authored by Dan Williams's avatar Dan Williams

Merge branch 'for-4.17/libnvdimm' into libnvdimm-for-next

parents 3eb2ce82 291717b6
Device-tree bindings for persistent memory regions
-----------------------------------------------------
Persistent memory refers to a class of memory devices that are:
a) Usable as main system memory (i.e. cacheable), and
b) Retain their contents across power failure.
Given b) it is best to think of persistent memory as a kind of memory mapped
storage device. To ensure data integrity the operating system needs to manage
persistent regions separately to the normal memory pool. To aid with that this
binding provides a standardised interface for discovering where persistent
memory regions exist inside the physical address space.
Bindings for the region nodes:
-----------------------------
Required properties:
- compatible = "pmem-region"
- reg = <base, size>;
The reg property should specificy an address range that is
translatable to a system physical address range. This address
range should be mappable as normal system memory would be
(i.e cacheable).
If the reg property contains multiple address ranges
each address range will be treated as though it was specified
in a separate device node. Having multiple address ranges in a
node implies no special relationship between the two ranges.
Optional properties:
- Any relevant NUMA assocativity properties for the target platform.
- volatile; This property indicates that this region is actually
backed by non-persistent memory. This lets the OS know that it
may skip the cache flushes required to ensure data is made
persistent after a write.
If this property is absent then the OS must assume that the region
is backed by non-volatile memory.
Examples:
--------------------
/*
* This node specifies one 4KB region spanning from
* 0x5000 to 0x5fff that is backed by non-volatile memory.
*/
pmem@5000 {
compatible = "pmem-region";
reg = <0x00005000 0x00001000>;
};
/*
* This node specifies two 4KB regions that are backed by
* volatile (normal) memory.
*/
pmem@6000 {
compatible = "pmem-region";
reg = < 0x00006000 0x00001000
0x00008000 0x00001000 >;
volatile;
};
...@@ -8035,6 +8035,14 @@ Q: https://patchwork.kernel.org/project/linux-nvdimm/list/ ...@@ -8035,6 +8035,14 @@ Q: https://patchwork.kernel.org/project/linux-nvdimm/list/
S: Supported S: Supported
F: drivers/nvdimm/pmem* F: drivers/nvdimm/pmem*
LIBNVDIMM: DEVICETREE BINDINGS
M: Oliver O'Halloran <oohall@gmail.com>
L: linux-nvdimm@lists.01.org
Q: https://patchwork.kernel.org/project/linux-nvdimm/list/
S: Supported
F: drivers/nvdimm/of_pmem.c
F: Documentation/devicetree/bindings/pmem/pmem-region.txt
LIBNVDIMM: NON-VOLATILE MEMORY DEVICE SUBSYSTEM LIBNVDIMM: NON-VOLATILE MEMORY DEVICE SUBSYSTEM
M: Dan Williams <dan.j.williams@intel.com> M: Dan Williams <dan.j.williams@intel.com>
L: linux-nvdimm@lists.01.org L: linux-nvdimm@lists.01.org
......
...@@ -821,6 +821,9 @@ static int __init opal_init(void) ...@@ -821,6 +821,9 @@ static int __init opal_init(void)
/* Create i2c platform devices */ /* Create i2c platform devices */
opal_pdev_init("ibm,opal-i2c"); opal_pdev_init("ibm,opal-i2c");
/* Handle non-volatile memory devices */
opal_pdev_init("pmem-region");
/* Setup a heatbeat thread if requested by OPAL */ /* Setup a heatbeat thread if requested by OPAL */
opal_init_heartbeat(); opal_init_heartbeat();
......
This diff is collapsed.
...@@ -51,9 +51,8 @@ static int nfit_handle_mce(struct notifier_block *nb, unsigned long val, ...@@ -51,9 +51,8 @@ static int nfit_handle_mce(struct notifier_block *nb, unsigned long val,
if ((spa->address + spa->length - 1) < mce->addr) if ((spa->address + spa->length - 1) < mce->addr)
continue; continue;
found_match = 1; found_match = 1;
dev_dbg(dev, "%s: addr in SPA %d (0x%llx, 0x%llx)\n", dev_dbg(dev, "addr in SPA %d (0x%llx, 0x%llx)\n",
__func__, spa->range_index, spa->address, spa->range_index, spa->address, spa->length);
spa->length);
/* /*
* We can break at the first match because we're going * We can break at the first match because we're going
* to rescan all the SPA ranges. There shouldn't be any * to rescan all the SPA ranges. There shouldn't be any
......
...@@ -117,10 +117,17 @@ enum nfit_dimm_notifiers { ...@@ -117,10 +117,17 @@ enum nfit_dimm_notifiers {
NFIT_NOTIFY_DIMM_HEALTH = 0x81, NFIT_NOTIFY_DIMM_HEALTH = 0x81,
}; };
enum nfit_ars_state {
ARS_REQ,
ARS_DONE,
ARS_SHORT,
ARS_FAILED,
};
struct nfit_spa { struct nfit_spa {
struct list_head list; struct list_head list;
struct nd_region *nd_region; struct nd_region *nd_region;
unsigned int ars_required:1; unsigned long ars_state;
u32 clear_err_unit; u32 clear_err_unit;
u32 max_ars; u32 max_ars;
struct acpi_nfit_system_address spa[0]; struct acpi_nfit_system_address spa[0];
...@@ -171,9 +178,8 @@ struct nfit_mem { ...@@ -171,9 +178,8 @@ struct nfit_mem {
struct resource *flush_wpq; struct resource *flush_wpq;
unsigned long dsm_mask; unsigned long dsm_mask;
int family; int family;
u32 has_lsi:1; bool has_lsr;
u32 has_lsr:1; bool has_lsw;
u32 has_lsw:1;
}; };
struct acpi_nfit_desc { struct acpi_nfit_desc {
...@@ -191,18 +197,18 @@ struct acpi_nfit_desc { ...@@ -191,18 +197,18 @@ struct acpi_nfit_desc {
struct device *dev; struct device *dev;
u8 ars_start_flags; u8 ars_start_flags;
struct nd_cmd_ars_status *ars_status; struct nd_cmd_ars_status *ars_status;
size_t ars_status_size; struct delayed_work dwork;
struct work_struct work;
struct list_head list; struct list_head list;
struct kernfs_node *scrub_count_state; struct kernfs_node *scrub_count_state;
unsigned int max_ars;
unsigned int scrub_count; unsigned int scrub_count;
unsigned int scrub_mode; unsigned int scrub_mode;
unsigned int cancel:1; unsigned int cancel:1;
unsigned int init_complete:1;
unsigned long dimm_cmd_force_en; unsigned long dimm_cmd_force_en;
unsigned long bus_cmd_force_en; unsigned long bus_cmd_force_en;
unsigned long bus_nfit_cmd_force_en; unsigned long bus_nfit_cmd_force_en;
unsigned int platform_cap; unsigned int platform_cap;
unsigned int scrub_tmo;
int (*blk_do_io)(struct nd_blk_region *ndbr, resource_size_t dpa, int (*blk_do_io)(struct nd_blk_region *ndbr, resource_size_t dpa,
void *iobuf, u64 len, int rw); void *iobuf, u64 len, int rw);
}; };
...@@ -244,7 +250,7 @@ struct nfit_blk { ...@@ -244,7 +250,7 @@ struct nfit_blk {
extern struct list_head acpi_descs; extern struct list_head acpi_descs;
extern struct mutex acpi_desc_lock; extern struct mutex acpi_desc_lock;
int acpi_nfit_ars_rescan(struct acpi_nfit_desc *acpi_desc, u8 flags); int acpi_nfit_ars_rescan(struct acpi_nfit_desc *acpi_desc, unsigned long flags);
#ifdef CONFIG_X86_MCE #ifdef CONFIG_X86_MCE
void nfit_mce_register(void); void nfit_mce_register(void);
......
...@@ -257,8 +257,8 @@ static int __dev_dax_pte_fault(struct dev_dax *dev_dax, struct vm_fault *vmf) ...@@ -257,8 +257,8 @@ static int __dev_dax_pte_fault(struct dev_dax *dev_dax, struct vm_fault *vmf)
dax_region = dev_dax->region; dax_region = dev_dax->region;
if (dax_region->align > PAGE_SIZE) { if (dax_region->align > PAGE_SIZE) {
dev_dbg(dev, "%s: alignment (%#x) > fault size (%#x)\n", dev_dbg(dev, "alignment (%#x) > fault size (%#x)\n",
__func__, dax_region->align, fault_size); dax_region->align, fault_size);
return VM_FAULT_SIGBUS; return VM_FAULT_SIGBUS;
} }
...@@ -267,8 +267,7 @@ static int __dev_dax_pte_fault(struct dev_dax *dev_dax, struct vm_fault *vmf) ...@@ -267,8 +267,7 @@ static int __dev_dax_pte_fault(struct dev_dax *dev_dax, struct vm_fault *vmf)
phys = dax_pgoff_to_phys(dev_dax, vmf->pgoff, PAGE_SIZE); phys = dax_pgoff_to_phys(dev_dax, vmf->pgoff, PAGE_SIZE);
if (phys == -1) { if (phys == -1) {
dev_dbg(dev, "%s: pgoff_to_phys(%#lx) failed\n", __func__, dev_dbg(dev, "pgoff_to_phys(%#lx) failed\n", vmf->pgoff);
vmf->pgoff);
return VM_FAULT_SIGBUS; return VM_FAULT_SIGBUS;
} }
...@@ -299,14 +298,14 @@ static int __dev_dax_pmd_fault(struct dev_dax *dev_dax, struct vm_fault *vmf) ...@@ -299,14 +298,14 @@ static int __dev_dax_pmd_fault(struct dev_dax *dev_dax, struct vm_fault *vmf)
dax_region = dev_dax->region; dax_region = dev_dax->region;
if (dax_region->align > PMD_SIZE) { if (dax_region->align > PMD_SIZE) {
dev_dbg(dev, "%s: alignment (%#x) > fault size (%#x)\n", dev_dbg(dev, "alignment (%#x) > fault size (%#x)\n",
__func__, dax_region->align, fault_size); dax_region->align, fault_size);
return VM_FAULT_SIGBUS; return VM_FAULT_SIGBUS;
} }
/* dax pmd mappings require pfn_t_devmap() */ /* dax pmd mappings require pfn_t_devmap() */
if ((dax_region->pfn_flags & (PFN_DEV|PFN_MAP)) != (PFN_DEV|PFN_MAP)) { if ((dax_region->pfn_flags & (PFN_DEV|PFN_MAP)) != (PFN_DEV|PFN_MAP)) {
dev_dbg(dev, "%s: region lacks devmap flags\n", __func__); dev_dbg(dev, "region lacks devmap flags\n");
return VM_FAULT_SIGBUS; return VM_FAULT_SIGBUS;
} }
...@@ -323,8 +322,7 @@ static int __dev_dax_pmd_fault(struct dev_dax *dev_dax, struct vm_fault *vmf) ...@@ -323,8 +322,7 @@ static int __dev_dax_pmd_fault(struct dev_dax *dev_dax, struct vm_fault *vmf)
pgoff = linear_page_index(vmf->vma, pmd_addr); pgoff = linear_page_index(vmf->vma, pmd_addr);
phys = dax_pgoff_to_phys(dev_dax, pgoff, PMD_SIZE); phys = dax_pgoff_to_phys(dev_dax, pgoff, PMD_SIZE);
if (phys == -1) { if (phys == -1) {
dev_dbg(dev, "%s: pgoff_to_phys(%#lx) failed\n", __func__, dev_dbg(dev, "pgoff_to_phys(%#lx) failed\n", pgoff);
pgoff);
return VM_FAULT_SIGBUS; return VM_FAULT_SIGBUS;
} }
...@@ -351,14 +349,14 @@ static int __dev_dax_pud_fault(struct dev_dax *dev_dax, struct vm_fault *vmf) ...@@ -351,14 +349,14 @@ static int __dev_dax_pud_fault(struct dev_dax *dev_dax, struct vm_fault *vmf)
dax_region = dev_dax->region; dax_region = dev_dax->region;
if (dax_region->align > PUD_SIZE) { if (dax_region->align > PUD_SIZE) {
dev_dbg(dev, "%s: alignment (%#x) > fault size (%#x)\n", dev_dbg(dev, "alignment (%#x) > fault size (%#x)\n",
__func__, dax_region->align, fault_size); dax_region->align, fault_size);
return VM_FAULT_SIGBUS; return VM_FAULT_SIGBUS;
} }
/* dax pud mappings require pfn_t_devmap() */ /* dax pud mappings require pfn_t_devmap() */
if ((dax_region->pfn_flags & (PFN_DEV|PFN_MAP)) != (PFN_DEV|PFN_MAP)) { if ((dax_region->pfn_flags & (PFN_DEV|PFN_MAP)) != (PFN_DEV|PFN_MAP)) {
dev_dbg(dev, "%s: region lacks devmap flags\n", __func__); dev_dbg(dev, "region lacks devmap flags\n");
return VM_FAULT_SIGBUS; return VM_FAULT_SIGBUS;
} }
...@@ -375,8 +373,7 @@ static int __dev_dax_pud_fault(struct dev_dax *dev_dax, struct vm_fault *vmf) ...@@ -375,8 +373,7 @@ static int __dev_dax_pud_fault(struct dev_dax *dev_dax, struct vm_fault *vmf)
pgoff = linear_page_index(vmf->vma, pud_addr); pgoff = linear_page_index(vmf->vma, pud_addr);
phys = dax_pgoff_to_phys(dev_dax, pgoff, PUD_SIZE); phys = dax_pgoff_to_phys(dev_dax, pgoff, PUD_SIZE);
if (phys == -1) { if (phys == -1) {
dev_dbg(dev, "%s: pgoff_to_phys(%#lx) failed\n", __func__, dev_dbg(dev, "pgoff_to_phys(%#lx) failed\n", pgoff);
pgoff);
return VM_FAULT_SIGBUS; return VM_FAULT_SIGBUS;
} }
...@@ -399,9 +396,8 @@ static int dev_dax_huge_fault(struct vm_fault *vmf, ...@@ -399,9 +396,8 @@ static int dev_dax_huge_fault(struct vm_fault *vmf,
struct file *filp = vmf->vma->vm_file; struct file *filp = vmf->vma->vm_file;
struct dev_dax *dev_dax = filp->private_data; struct dev_dax *dev_dax = filp->private_data;
dev_dbg(&dev_dax->dev, "%s: %s: %s (%#lx - %#lx) size = %d\n", __func__, dev_dbg(&dev_dax->dev, "%s: %s (%#lx - %#lx) size = %d\n", current->comm,
current->comm, (vmf->flags & FAULT_FLAG_WRITE) (vmf->flags & FAULT_FLAG_WRITE) ? "write" : "read",
? "write" : "read",
vmf->vma->vm_start, vmf->vma->vm_end, pe_size); vmf->vma->vm_start, vmf->vma->vm_end, pe_size);
id = dax_read_lock(); id = dax_read_lock();
...@@ -450,7 +446,7 @@ static int dax_mmap(struct file *filp, struct vm_area_struct *vma) ...@@ -450,7 +446,7 @@ static int dax_mmap(struct file *filp, struct vm_area_struct *vma)
struct dev_dax *dev_dax = filp->private_data; struct dev_dax *dev_dax = filp->private_data;
int rc, id; int rc, id;
dev_dbg(&dev_dax->dev, "%s\n", __func__); dev_dbg(&dev_dax->dev, "trace\n");
/* /*
* We lock to check dax_dev liveness and will re-check at * We lock to check dax_dev liveness and will re-check at
...@@ -508,7 +504,7 @@ static int dax_open(struct inode *inode, struct file *filp) ...@@ -508,7 +504,7 @@ static int dax_open(struct inode *inode, struct file *filp)
struct inode *__dax_inode = dax_inode(dax_dev); struct inode *__dax_inode = dax_inode(dax_dev);
struct dev_dax *dev_dax = dax_get_private(dax_dev); struct dev_dax *dev_dax = dax_get_private(dax_dev);
dev_dbg(&dev_dax->dev, "%s\n", __func__); dev_dbg(&dev_dax->dev, "trace\n");
inode->i_mapping = __dax_inode->i_mapping; inode->i_mapping = __dax_inode->i_mapping;
inode->i_mapping->host = __dax_inode; inode->i_mapping->host = __dax_inode;
filp->f_mapping = inode->i_mapping; filp->f_mapping = inode->i_mapping;
...@@ -523,7 +519,7 @@ static int dax_release(struct inode *inode, struct file *filp) ...@@ -523,7 +519,7 @@ static int dax_release(struct inode *inode, struct file *filp)
{ {
struct dev_dax *dev_dax = filp->private_data; struct dev_dax *dev_dax = filp->private_data;
dev_dbg(&dev_dax->dev, "%s\n", __func__); dev_dbg(&dev_dax->dev, "trace\n");
return 0; return 0;
} }
...@@ -565,7 +561,7 @@ static void unregister_dev_dax(void *dev) ...@@ -565,7 +561,7 @@ static void unregister_dev_dax(void *dev)
struct inode *inode = dax_inode(dax_dev); struct inode *inode = dax_inode(dax_dev);
struct cdev *cdev = inode->i_cdev; struct cdev *cdev = inode->i_cdev;
dev_dbg(dev, "%s\n", __func__); dev_dbg(dev, "trace\n");
kill_dev_dax(dev_dax); kill_dev_dax(dev_dax);
cdev_device_del(cdev, dev); cdev_device_del(cdev, dev);
......
...@@ -34,7 +34,7 @@ static void dax_pmem_percpu_release(struct percpu_ref *ref) ...@@ -34,7 +34,7 @@ static void dax_pmem_percpu_release(struct percpu_ref *ref)
{ {
struct dax_pmem *dax_pmem = to_dax_pmem(ref); struct dax_pmem *dax_pmem = to_dax_pmem(ref);
dev_dbg(dax_pmem->dev, "%s\n", __func__); dev_dbg(dax_pmem->dev, "trace\n");
complete(&dax_pmem->cmp); complete(&dax_pmem->cmp);
} }
...@@ -43,7 +43,7 @@ static void dax_pmem_percpu_exit(void *data) ...@@ -43,7 +43,7 @@ static void dax_pmem_percpu_exit(void *data)
struct percpu_ref *ref = data; struct percpu_ref *ref = data;
struct dax_pmem *dax_pmem = to_dax_pmem(ref); struct dax_pmem *dax_pmem = to_dax_pmem(ref);
dev_dbg(dax_pmem->dev, "%s\n", __func__); dev_dbg(dax_pmem->dev, "trace\n");
wait_for_completion(&dax_pmem->cmp); wait_for_completion(&dax_pmem->cmp);
percpu_ref_exit(ref); percpu_ref_exit(ref);
} }
...@@ -53,7 +53,7 @@ static void dax_pmem_percpu_kill(void *data) ...@@ -53,7 +53,7 @@ static void dax_pmem_percpu_kill(void *data)
struct percpu_ref *ref = data; struct percpu_ref *ref = data;
struct dax_pmem *dax_pmem = to_dax_pmem(ref); struct dax_pmem *dax_pmem = to_dax_pmem(ref);
dev_dbg(dax_pmem->dev, "%s\n", __func__); dev_dbg(dax_pmem->dev, "trace\n");
percpu_ref_kill(ref); percpu_ref_kill(ref);
} }
...@@ -150,17 +150,7 @@ static struct nd_device_driver dax_pmem_driver = { ...@@ -150,17 +150,7 @@ static struct nd_device_driver dax_pmem_driver = {
.type = ND_DRIVER_DAX_PMEM, .type = ND_DRIVER_DAX_PMEM,
}; };
static int __init dax_pmem_init(void) module_nd_driver(dax_pmem_driver);
{
return nd_driver_register(&dax_pmem_driver);
}
module_init(dax_pmem_init);
static void __exit dax_pmem_exit(void)
{
driver_unregister(&dax_pmem_driver.drv);
}
module_exit(dax_pmem_exit);
MODULE_LICENSE("GPL v2"); MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Intel Corporation"); MODULE_AUTHOR("Intel Corporation");
......
...@@ -102,4 +102,15 @@ config NVDIMM_DAX ...@@ -102,4 +102,15 @@ config NVDIMM_DAX
Select Y if unsure Select Y if unsure
config OF_PMEM
# FIXME: make tristate once OF_NUMA dependency removed
bool "Device-tree support for persistent memory regions"
depends on OF
default LIBNVDIMM
help
Allows regions of persistent memory to be described in the
device-tree.
Select Y if unsure.
endif endif
...@@ -4,6 +4,7 @@ obj-$(CONFIG_BLK_DEV_PMEM) += nd_pmem.o ...@@ -4,6 +4,7 @@ obj-$(CONFIG_BLK_DEV_PMEM) += nd_pmem.o
obj-$(CONFIG_ND_BTT) += nd_btt.o obj-$(CONFIG_ND_BTT) += nd_btt.o
obj-$(CONFIG_ND_BLK) += nd_blk.o obj-$(CONFIG_ND_BLK) += nd_blk.o
obj-$(CONFIG_X86_PMEM_LEGACY) += nd_e820.o obj-$(CONFIG_X86_PMEM_LEGACY) += nd_e820.o
obj-$(CONFIG_OF_PMEM) += of_pmem.o
nd_pmem-y := pmem.o nd_pmem-y := pmem.o
......
...@@ -26,7 +26,7 @@ static void nd_btt_release(struct device *dev) ...@@ -26,7 +26,7 @@ static void nd_btt_release(struct device *dev)
struct nd_region *nd_region = to_nd_region(dev->parent); struct nd_region *nd_region = to_nd_region(dev->parent);
struct nd_btt *nd_btt = to_nd_btt(dev); struct nd_btt *nd_btt = to_nd_btt(dev);
dev_dbg(dev, "%s\n", __func__); dev_dbg(dev, "trace\n");
nd_detach_ndns(&nd_btt->dev, &nd_btt->ndns); nd_detach_ndns(&nd_btt->dev, &nd_btt->ndns);
ida_simple_remove(&nd_region->btt_ida, nd_btt->id); ida_simple_remove(&nd_region->btt_ida, nd_btt->id);
kfree(nd_btt->uuid); kfree(nd_btt->uuid);
...@@ -74,8 +74,8 @@ static ssize_t sector_size_store(struct device *dev, ...@@ -74,8 +74,8 @@ static ssize_t sector_size_store(struct device *dev,
nvdimm_bus_lock(dev); nvdimm_bus_lock(dev);
rc = nd_size_select_store(dev, buf, &nd_btt->lbasize, rc = nd_size_select_store(dev, buf, &nd_btt->lbasize,
btt_lbasize_supported); btt_lbasize_supported);
dev_dbg(dev, "%s: result: %zd wrote: %s%s", __func__, dev_dbg(dev, "result: %zd wrote: %s%s", rc, buf,
rc, buf, buf[len - 1] == '\n' ? "" : "\n"); buf[len - 1] == '\n' ? "" : "\n");
nvdimm_bus_unlock(dev); nvdimm_bus_unlock(dev);
device_unlock(dev); device_unlock(dev);
...@@ -101,8 +101,8 @@ static ssize_t uuid_store(struct device *dev, ...@@ -101,8 +101,8 @@ static ssize_t uuid_store(struct device *dev,
device_lock(dev); device_lock(dev);
rc = nd_uuid_store(dev, &nd_btt->uuid, buf, len); rc = nd_uuid_store(dev, &nd_btt->uuid, buf, len);
dev_dbg(dev, "%s: result: %zd wrote: %s%s", __func__, dev_dbg(dev, "result: %zd wrote: %s%s", rc, buf,
rc, buf, buf[len - 1] == '\n' ? "" : "\n"); buf[len - 1] == '\n' ? "" : "\n");
device_unlock(dev); device_unlock(dev);
return rc ? rc : len; return rc ? rc : len;
...@@ -131,8 +131,8 @@ static ssize_t namespace_store(struct device *dev, ...@@ -131,8 +131,8 @@ static ssize_t namespace_store(struct device *dev,
device_lock(dev); device_lock(dev);
nvdimm_bus_lock(dev); nvdimm_bus_lock(dev);
rc = nd_namespace_store(dev, &nd_btt->ndns, buf, len); rc = nd_namespace_store(dev, &nd_btt->ndns, buf, len);
dev_dbg(dev, "%s: result: %zd wrote: %s%s", __func__, dev_dbg(dev, "result: %zd wrote: %s%s", rc, buf,
rc, buf, buf[len - 1] == '\n' ? "" : "\n"); buf[len - 1] == '\n' ? "" : "\n");
nvdimm_bus_unlock(dev); nvdimm_bus_unlock(dev);
device_unlock(dev); device_unlock(dev);
...@@ -206,8 +206,8 @@ static struct device *__nd_btt_create(struct nd_region *nd_region, ...@@ -206,8 +206,8 @@ static struct device *__nd_btt_create(struct nd_region *nd_region,
dev->groups = nd_btt_attribute_groups; dev->groups = nd_btt_attribute_groups;
device_initialize(&nd_btt->dev); device_initialize(&nd_btt->dev);
if (ndns && !__nd_attach_ndns(&nd_btt->dev, ndns, &nd_btt->ndns)) { if (ndns && !__nd_attach_ndns(&nd_btt->dev, ndns, &nd_btt->ndns)) {
dev_dbg(&ndns->dev, "%s failed, already claimed by %s\n", dev_dbg(&ndns->dev, "failed, already claimed by %s\n",
__func__, dev_name(ndns->claim)); dev_name(ndns->claim));
put_device(dev); put_device(dev);
return NULL; return NULL;
} }
...@@ -346,8 +346,7 @@ int nd_btt_probe(struct device *dev, struct nd_namespace_common *ndns) ...@@ -346,8 +346,7 @@ int nd_btt_probe(struct device *dev, struct nd_namespace_common *ndns)
return -ENOMEM; return -ENOMEM;
btt_sb = devm_kzalloc(dev, sizeof(*btt_sb), GFP_KERNEL); btt_sb = devm_kzalloc(dev, sizeof(*btt_sb), GFP_KERNEL);
rc = __nd_btt_probe(to_nd_btt(btt_dev), ndns, btt_sb); rc = __nd_btt_probe(to_nd_btt(btt_dev), ndns, btt_sb);
dev_dbg(dev, "%s: btt: %s\n", __func__, dev_dbg(dev, "btt: %s\n", rc == 0 ? dev_name(btt_dev) : "<none>");
rc == 0 ? dev_name(btt_dev) : "<none>");
if (rc < 0) { if (rc < 0) {
struct nd_btt *nd_btt = to_nd_btt(btt_dev); struct nd_btt *nd_btt = to_nd_btt(btt_dev);
......
...@@ -358,6 +358,7 @@ struct nvdimm_bus *nvdimm_bus_register(struct device *parent, ...@@ -358,6 +358,7 @@ struct nvdimm_bus *nvdimm_bus_register(struct device *parent,
nvdimm_bus->dev.release = nvdimm_bus_release; nvdimm_bus->dev.release = nvdimm_bus_release;
nvdimm_bus->dev.groups = nd_desc->attr_groups; nvdimm_bus->dev.groups = nd_desc->attr_groups;
nvdimm_bus->dev.bus = &nvdimm_bus_type; nvdimm_bus->dev.bus = &nvdimm_bus_type;
nvdimm_bus->dev.of_node = nd_desc->of_node;
dev_set_name(&nvdimm_bus->dev, "ndbus%d", nvdimm_bus->id); dev_set_name(&nvdimm_bus->dev, "ndbus%d", nvdimm_bus->id);
rc = device_register(&nvdimm_bus->dev); rc = device_register(&nvdimm_bus->dev);
if (rc) { if (rc) {
...@@ -984,8 +985,8 @@ static int __nd_ioctl(struct nvdimm_bus *nvdimm_bus, struct nvdimm *nvdimm, ...@@ -984,8 +985,8 @@ static int __nd_ioctl(struct nvdimm_bus *nvdimm_bus, struct nvdimm *nvdimm,
if (cmd == ND_CMD_CALL) { if (cmd == ND_CMD_CALL) {
func = pkg.nd_command; func = pkg.nd_command;
dev_dbg(dev, "%s:%s, idx: %llu, in: %u, out: %u, len %llu\n", dev_dbg(dev, "%s, idx: %llu, in: %u, out: %u, len %llu\n",
__func__, dimm_name, pkg.nd_command, dimm_name, pkg.nd_command,
in_len, out_len, buf_len); in_len, out_len, buf_len);
} }
...@@ -996,8 +997,8 @@ static int __nd_ioctl(struct nvdimm_bus *nvdimm_bus, struct nvdimm *nvdimm, ...@@ -996,8 +997,8 @@ static int __nd_ioctl(struct nvdimm_bus *nvdimm_bus, struct nvdimm *nvdimm,
u32 copy; u32 copy;
if (out_size == UINT_MAX) { if (out_size == UINT_MAX) {
dev_dbg(dev, "%s:%s unknown output size cmd: %s field: %d\n", dev_dbg(dev, "%s unknown output size cmd: %s field: %d\n",
__func__, dimm_name, cmd_name, i); dimm_name, cmd_name, i);
return -EFAULT; return -EFAULT;
} }
if (out_len < sizeof(out_env)) if (out_len < sizeof(out_env))
...@@ -1012,9 +1013,8 @@ static int __nd_ioctl(struct nvdimm_bus *nvdimm_bus, struct nvdimm *nvdimm, ...@@ -1012,9 +1013,8 @@ static int __nd_ioctl(struct nvdimm_bus *nvdimm_bus, struct nvdimm *nvdimm,
buf_len = (u64) out_len + (u64) in_len; buf_len = (u64) out_len + (u64) in_len;
if (buf_len > ND_IOCTL_MAX_BUFLEN) { if (buf_len > ND_IOCTL_MAX_BUFLEN) {
dev_dbg(dev, "%s:%s cmd: %s buf_len: %llu > %d\n", __func__, dev_dbg(dev, "%s cmd: %s buf_len: %llu > %d\n", dimm_name,
dimm_name, cmd_name, buf_len, cmd_name, buf_len, ND_IOCTL_MAX_BUFLEN);
ND_IOCTL_MAX_BUFLEN);
return -EINVAL; return -EINVAL;
} }
......
...@@ -148,7 +148,7 @@ ssize_t nd_namespace_store(struct device *dev, ...@@ -148,7 +148,7 @@ ssize_t nd_namespace_store(struct device *dev,
char *name; char *name;
if (dev->driver) { if (dev->driver) {
dev_dbg(dev, "%s: -EBUSY\n", __func__); dev_dbg(dev, "namespace already active\n");
return -EBUSY; return -EBUSY;
} }
......
...@@ -134,7 +134,7 @@ static void nvdimm_map_release(struct kref *kref) ...@@ -134,7 +134,7 @@ static void nvdimm_map_release(struct kref *kref)
nvdimm_map = container_of(kref, struct nvdimm_map, kref); nvdimm_map = container_of(kref, struct nvdimm_map, kref);
nvdimm_bus = nvdimm_map->nvdimm_bus; nvdimm_bus = nvdimm_map->nvdimm_bus;
dev_dbg(&nvdimm_bus->dev, "%s: %pa\n", __func__, &nvdimm_map->offset); dev_dbg(&nvdimm_bus->dev, "%pa\n", &nvdimm_map->offset);
list_del(&nvdimm_map->list); list_del(&nvdimm_map->list);
if (nvdimm_map->flags) if (nvdimm_map->flags)
memunmap(nvdimm_map->mem); memunmap(nvdimm_map->mem);
...@@ -230,8 +230,8 @@ static int nd_uuid_parse(struct device *dev, u8 *uuid_out, const char *buf, ...@@ -230,8 +230,8 @@ static int nd_uuid_parse(struct device *dev, u8 *uuid_out, const char *buf,
for (i = 0; i < 16; i++) { for (i = 0; i < 16; i++) {
if (!isxdigit(str[0]) || !isxdigit(str[1])) { if (!isxdigit(str[0]) || !isxdigit(str[1])) {
dev_dbg(dev, "%s: pos: %d buf[%zd]: %c buf[%zd]: %c\n", dev_dbg(dev, "pos: %d buf[%zd]: %c buf[%zd]: %c\n",
__func__, i, str - buf, str[0], i, str - buf, str[0],
str + 1 - buf, str[1]); str + 1 - buf, str[1]);
return -EINVAL; return -EINVAL;
} }
......
...@@ -24,7 +24,7 @@ static void nd_dax_release(struct device *dev) ...@@ -24,7 +24,7 @@ static void nd_dax_release(struct device *dev)
struct nd_dax *nd_dax = to_nd_dax(dev); struct nd_dax *nd_dax = to_nd_dax(dev);
struct nd_pfn *nd_pfn = &nd_dax->nd_pfn; struct nd_pfn *nd_pfn = &nd_dax->nd_pfn;
dev_dbg(dev, "%s\n", __func__); dev_dbg(dev, "trace\n");
nd_detach_ndns(dev, &nd_pfn->ndns); nd_detach_ndns(dev, &nd_pfn->ndns);
ida_simple_remove(&nd_region->dax_ida, nd_pfn->id); ida_simple_remove(&nd_region->dax_ida, nd_pfn->id);
kfree(nd_pfn->uuid); kfree(nd_pfn->uuid);
...@@ -129,8 +129,7 @@ int nd_dax_probe(struct device *dev, struct nd_namespace_common *ndns) ...@@ -129,8 +129,7 @@ int nd_dax_probe(struct device *dev, struct nd_namespace_common *ndns)
pfn_sb = devm_kzalloc(dev, sizeof(*pfn_sb), GFP_KERNEL); pfn_sb = devm_kzalloc(dev, sizeof(*pfn_sb), GFP_KERNEL);
nd_pfn->pfn_sb = pfn_sb; nd_pfn->pfn_sb = pfn_sb;
rc = nd_pfn_validate(nd_pfn, DAX_SIG); rc = nd_pfn_validate(nd_pfn, DAX_SIG);
dev_dbg(dev, "%s: dax: %s\n", __func__, dev_dbg(dev, "dax: %s\n", rc == 0 ? dev_name(dax_dev) : "<none>");
rc == 0 ? dev_name(dax_dev) : "<none>");
if (rc < 0) { if (rc < 0) {
nd_detach_ndns(dax_dev, &nd_pfn->ndns); nd_detach_ndns(dax_dev, &nd_pfn->ndns);
put_device(dax_dev); put_device(dax_dev);
......
...@@ -67,9 +67,11 @@ static int nvdimm_probe(struct device *dev) ...@@ -67,9 +67,11 @@ static int nvdimm_probe(struct device *dev)
ndd->ns_next = nd_label_next_nsindex(ndd->ns_current); ndd->ns_next = nd_label_next_nsindex(ndd->ns_current);
nd_label_copy(ndd, to_next_namespace_index(ndd), nd_label_copy(ndd, to_next_namespace_index(ndd),
to_current_namespace_index(ndd)); to_current_namespace_index(ndd));
if (ndd->ns_current >= 0) {
rc = nd_label_reserve_dpa(ndd); rc = nd_label_reserve_dpa(ndd);
if (ndd->ns_current >= 0) if (rc == 0)
nvdimm_set_aliasing(dev); nvdimm_set_aliasing(dev);
}
nvdimm_clear_locked(dev); nvdimm_clear_locked(dev);
nvdimm_bus_unlock(dev); nvdimm_bus_unlock(dev);
......
...@@ -131,7 +131,7 @@ int nvdimm_init_config_data(struct nvdimm_drvdata *ndd) ...@@ -131,7 +131,7 @@ int nvdimm_init_config_data(struct nvdimm_drvdata *ndd)
} }
memcpy(ndd->data + offset, cmd->out_buf, cmd->in_length); memcpy(ndd->data + offset, cmd->out_buf, cmd->in_length);
} }
dev_dbg(ndd->dev, "%s: len: %zu rc: %d\n", __func__, offset, rc); dev_dbg(ndd->dev, "len: %zu rc: %d\n", offset, rc);
kfree(cmd); kfree(cmd);
return rc; return rc;
...@@ -266,8 +266,7 @@ void nvdimm_drvdata_release(struct kref *kref) ...@@ -266,8 +266,7 @@ void nvdimm_drvdata_release(struct kref *kref)
struct device *dev = ndd->dev; struct device *dev = ndd->dev;
struct resource *res, *_r; struct resource *res, *_r;
dev_dbg(dev, "%s\n", __func__); dev_dbg(dev, "trace\n");
nvdimm_bus_lock(dev); nvdimm_bus_lock(dev);
for_each_dpa_resource_safe(ndd, res, _r) for_each_dpa_resource_safe(ndd, res, _r)
nvdimm_free_dpa(ndd, res); nvdimm_free_dpa(ndd, res);
...@@ -660,7 +659,7 @@ int nvdimm_bus_check_dimm_count(struct nvdimm_bus *nvdimm_bus, int dimm_count) ...@@ -660,7 +659,7 @@ int nvdimm_bus_check_dimm_count(struct nvdimm_bus *nvdimm_bus, int dimm_count)
nd_synchronize(); nd_synchronize();
device_for_each_child(&nvdimm_bus->dev, &count, count_dimms); device_for_each_child(&nvdimm_bus->dev, &count, count_dimms);
dev_dbg(&nvdimm_bus->dev, "%s: count: %d\n", __func__, count); dev_dbg(&nvdimm_bus->dev, "count: %d\n", count);
if (count != dimm_count) if (count != dimm_count)
return -ENXIO; return -ENXIO;
return 0; return 0;
......
...@@ -45,9 +45,27 @@ unsigned sizeof_namespace_label(struct nvdimm_drvdata *ndd) ...@@ -45,9 +45,27 @@ unsigned sizeof_namespace_label(struct nvdimm_drvdata *ndd)
return ndd->nslabel_size; return ndd->nslabel_size;
} }
static size_t __sizeof_namespace_index(u32 nslot)
{
return ALIGN(sizeof(struct nd_namespace_index) + DIV_ROUND_UP(nslot, 8),
NSINDEX_ALIGN);
}
static int __nvdimm_num_label_slots(struct nvdimm_drvdata *ndd,
size_t index_size)
{
return (ndd->nsarea.config_size - index_size * 2) /
sizeof_namespace_label(ndd);
}
int nvdimm_num_label_slots(struct nvdimm_drvdata *ndd) int nvdimm_num_label_slots(struct nvdimm_drvdata *ndd)
{ {
return ndd->nsarea.config_size / (sizeof_namespace_label(ndd) + 1); u32 tmp_nslot, n;
tmp_nslot = ndd->nsarea.config_size / sizeof_namespace_label(ndd);
n = __sizeof_namespace_index(tmp_nslot) / NSINDEX_ALIGN;
return __nvdimm_num_label_slots(ndd, NSINDEX_ALIGN * n);
} }
size_t sizeof_namespace_index(struct nvdimm_drvdata *ndd) size_t sizeof_namespace_index(struct nvdimm_drvdata *ndd)
...@@ -55,18 +73,14 @@ size_t sizeof_namespace_index(struct nvdimm_drvdata *ndd) ...@@ -55,18 +73,14 @@ size_t sizeof_namespace_index(struct nvdimm_drvdata *ndd)
u32 nslot, space, size; u32 nslot, space, size;
/* /*
* The minimum index space is 512 bytes, with that amount of * Per UEFI 2.7, the minimum size of the Label Storage Area is large
* index we can describe ~1400 labels which is less than a byte * enough to hold 2 index blocks and 2 labels. The minimum index
* of overhead per label. Round up to a byte of overhead per * block size is 256 bytes, and the minimum label size is 256 bytes.
* label and determine the size of the index region. Yes, this
* starts to waste space at larger config_sizes, but it's
* unlikely we'll ever see anything but 128K.
*/ */
nslot = nvdimm_num_label_slots(ndd); nslot = nvdimm_num_label_slots(ndd);
space = ndd->nsarea.config_size - nslot * sizeof_namespace_label(ndd); space = ndd->nsarea.config_size - nslot * sizeof_namespace_label(ndd);
size = ALIGN(sizeof(struct nd_namespace_index) + DIV_ROUND_UP(nslot, 8), size = __sizeof_namespace_index(nslot) * 2;
NSINDEX_ALIGN) * 2; if (size <= space && nslot >= 2)
if (size <= space)
return size / 2; return size / 2;
dev_err(ndd->dev, "label area (%d) too small to host (%d byte) labels\n", dev_err(ndd->dev, "label area (%d) too small to host (%d byte) labels\n",
...@@ -121,8 +135,7 @@ static int __nd_label_validate(struct nvdimm_drvdata *ndd) ...@@ -121,8 +135,7 @@ static int __nd_label_validate(struct nvdimm_drvdata *ndd)
memcpy(sig, nsindex[i]->sig, NSINDEX_SIG_LEN); memcpy(sig, nsindex[i]->sig, NSINDEX_SIG_LEN);
if (memcmp(sig, NSINDEX_SIGNATURE, NSINDEX_SIG_LEN) != 0) { if (memcmp(sig, NSINDEX_SIGNATURE, NSINDEX_SIG_LEN) != 0) {
dev_dbg(dev, "%s: nsindex%d signature invalid\n", dev_dbg(dev, "nsindex%d signature invalid\n", i);
__func__, i);
continue; continue;
} }
...@@ -135,8 +148,8 @@ static int __nd_label_validate(struct nvdimm_drvdata *ndd) ...@@ -135,8 +148,8 @@ static int __nd_label_validate(struct nvdimm_drvdata *ndd)
labelsize = 128; labelsize = 128;
if (labelsize != sizeof_namespace_label(ndd)) { if (labelsize != sizeof_namespace_label(ndd)) {
dev_dbg(dev, "%s: nsindex%d labelsize %d invalid\n", dev_dbg(dev, "nsindex%d labelsize %d invalid\n",
__func__, i, nsindex[i]->labelsize); i, nsindex[i]->labelsize);
continue; continue;
} }
...@@ -145,30 +158,28 @@ static int __nd_label_validate(struct nvdimm_drvdata *ndd) ...@@ -145,30 +158,28 @@ static int __nd_label_validate(struct nvdimm_drvdata *ndd)
sum = nd_fletcher64(nsindex[i], sizeof_namespace_index(ndd), 1); sum = nd_fletcher64(nsindex[i], sizeof_namespace_index(ndd), 1);
nsindex[i]->checksum = __cpu_to_le64(sum_save); nsindex[i]->checksum = __cpu_to_le64(sum_save);
if (sum != sum_save) { if (sum != sum_save) {
dev_dbg(dev, "%s: nsindex%d checksum invalid\n", dev_dbg(dev, "nsindex%d checksum invalid\n", i);
__func__, i);
continue; continue;
} }
seq = __le32_to_cpu(nsindex[i]->seq); seq = __le32_to_cpu(nsindex[i]->seq);
if ((seq & NSINDEX_SEQ_MASK) == 0) { if ((seq & NSINDEX_SEQ_MASK) == 0) {
dev_dbg(dev, "%s: nsindex%d sequence: %#x invalid\n", dev_dbg(dev, "nsindex%d sequence: %#x invalid\n", i, seq);
__func__, i, seq);
continue; continue;
} }
/* sanity check the index against expected values */ /* sanity check the index against expected values */
if (__le64_to_cpu(nsindex[i]->myoff) if (__le64_to_cpu(nsindex[i]->myoff)
!= i * sizeof_namespace_index(ndd)) { != i * sizeof_namespace_index(ndd)) {
dev_dbg(dev, "%s: nsindex%d myoff: %#llx invalid\n", dev_dbg(dev, "nsindex%d myoff: %#llx invalid\n",
__func__, i, (unsigned long long) i, (unsigned long long)
__le64_to_cpu(nsindex[i]->myoff)); __le64_to_cpu(nsindex[i]->myoff));
continue; continue;
} }
if (__le64_to_cpu(nsindex[i]->otheroff) if (__le64_to_cpu(nsindex[i]->otheroff)
!= (!i) * sizeof_namespace_index(ndd)) { != (!i) * sizeof_namespace_index(ndd)) {
dev_dbg(dev, "%s: nsindex%d otheroff: %#llx invalid\n", dev_dbg(dev, "nsindex%d otheroff: %#llx invalid\n",
__func__, i, (unsigned long long) i, (unsigned long long)
__le64_to_cpu(nsindex[i]->otheroff)); __le64_to_cpu(nsindex[i]->otheroff));
continue; continue;
} }
...@@ -176,8 +187,7 @@ static int __nd_label_validate(struct nvdimm_drvdata *ndd) ...@@ -176,8 +187,7 @@ static int __nd_label_validate(struct nvdimm_drvdata *ndd)
size = __le64_to_cpu(nsindex[i]->mysize); size = __le64_to_cpu(nsindex[i]->mysize);
if (size > sizeof_namespace_index(ndd) if (size > sizeof_namespace_index(ndd)
|| size < sizeof(struct nd_namespace_index)) { || size < sizeof(struct nd_namespace_index)) {
dev_dbg(dev, "%s: nsindex%d mysize: %#llx invalid\n", dev_dbg(dev, "nsindex%d mysize: %#llx invalid\n", i, size);
__func__, i, size);
continue; continue;
} }
...@@ -185,9 +195,8 @@ static int __nd_label_validate(struct nvdimm_drvdata *ndd) ...@@ -185,9 +195,8 @@ static int __nd_label_validate(struct nvdimm_drvdata *ndd)
if (nslot * sizeof_namespace_label(ndd) if (nslot * sizeof_namespace_label(ndd)
+ 2 * sizeof_namespace_index(ndd) + 2 * sizeof_namespace_index(ndd)
> ndd->nsarea.config_size) { > ndd->nsarea.config_size) {
dev_dbg(dev, "%s: nsindex%d nslot: %u invalid, config_size: %#x\n", dev_dbg(dev, "nsindex%d nslot: %u invalid, config_size: %#x\n",
__func__, i, nslot, i, nslot, ndd->nsarea.config_size);
ndd->nsarea.config_size);
continue; continue;
} }
valid[i] = true; valid[i] = true;
...@@ -356,8 +365,8 @@ static bool slot_valid(struct nvdimm_drvdata *ndd, ...@@ -356,8 +365,8 @@ static bool slot_valid(struct nvdimm_drvdata *ndd,
sum = nd_fletcher64(nd_label, sizeof_namespace_label(ndd), 1); sum = nd_fletcher64(nd_label, sizeof_namespace_label(ndd), 1);
nd_label->checksum = __cpu_to_le64(sum_save); nd_label->checksum = __cpu_to_le64(sum_save);
if (sum != sum_save) { if (sum != sum_save) {
dev_dbg(ndd->dev, "%s fail checksum. slot: %d expect: %#llx\n", dev_dbg(ndd->dev, "fail checksum. slot: %d expect: %#llx\n",
__func__, slot, sum); slot, sum);
return false; return false;
} }
} }
...@@ -422,8 +431,8 @@ int nd_label_active_count(struct nvdimm_drvdata *ndd) ...@@ -422,8 +431,8 @@ int nd_label_active_count(struct nvdimm_drvdata *ndd)
u64 dpa = __le64_to_cpu(nd_label->dpa); u64 dpa = __le64_to_cpu(nd_label->dpa);
dev_dbg(ndd->dev, dev_dbg(ndd->dev,
"%s: slot%d invalid slot: %d dpa: %llx size: %llx\n", "slot%d invalid slot: %d dpa: %llx size: %llx\n",
__func__, slot, label_slot, dpa, size); slot, label_slot, dpa, size);
continue; continue;
} }
count++; count++;
...@@ -650,7 +659,7 @@ static int __pmem_label_update(struct nd_region *nd_region, ...@@ -650,7 +659,7 @@ static int __pmem_label_update(struct nd_region *nd_region,
slot = nd_label_alloc_slot(ndd); slot = nd_label_alloc_slot(ndd);
if (slot == UINT_MAX) if (slot == UINT_MAX)
return -ENXIO; return -ENXIO;
dev_dbg(ndd->dev, "%s: allocated: %d\n", __func__, slot); dev_dbg(ndd->dev, "allocated: %d\n", slot);
nd_label = to_label(ndd, slot); nd_label = to_label(ndd, slot);
memset(nd_label, 0, sizeof_namespace_label(ndd)); memset(nd_label, 0, sizeof_namespace_label(ndd));
...@@ -678,7 +687,7 @@ static int __pmem_label_update(struct nd_region *nd_region, ...@@ -678,7 +687,7 @@ static int __pmem_label_update(struct nd_region *nd_region,
sum = nd_fletcher64(nd_label, sizeof_namespace_label(ndd), 1); sum = nd_fletcher64(nd_label, sizeof_namespace_label(ndd), 1);
nd_label->checksum = __cpu_to_le64(sum); nd_label->checksum = __cpu_to_le64(sum);
} }
nd_dbg_dpa(nd_region, ndd, res, "%s\n", __func__); nd_dbg_dpa(nd_region, ndd, res, "\n");
/* update label */ /* update label */
offset = nd_label_offset(ndd, nd_label); offset = nd_label_offset(ndd, nd_label);
...@@ -700,7 +709,7 @@ static int __pmem_label_update(struct nd_region *nd_region, ...@@ -700,7 +709,7 @@ static int __pmem_label_update(struct nd_region *nd_region,
break; break;
} }
if (victim) { if (victim) {
dev_dbg(ndd->dev, "%s: free: %d\n", __func__, slot); dev_dbg(ndd->dev, "free: %d\n", slot);
slot = to_slot(ndd, victim->label); slot = to_slot(ndd, victim->label);
nd_label_free_slot(ndd, slot); nd_label_free_slot(ndd, slot);
victim->label = NULL; victim->label = NULL;
...@@ -868,7 +877,7 @@ static int __blk_label_update(struct nd_region *nd_region, ...@@ -868,7 +877,7 @@ static int __blk_label_update(struct nd_region *nd_region,
slot = nd_label_alloc_slot(ndd); slot = nd_label_alloc_slot(ndd);
if (slot == UINT_MAX) if (slot == UINT_MAX)
goto abort; goto abort;
dev_dbg(ndd->dev, "%s: allocated: %d\n", __func__, slot); dev_dbg(ndd->dev, "allocated: %d\n", slot);
nd_label = to_label(ndd, slot); nd_label = to_label(ndd, slot);
memset(nd_label, 0, sizeof_namespace_label(ndd)); memset(nd_label, 0, sizeof_namespace_label(ndd));
...@@ -928,7 +937,7 @@ static int __blk_label_update(struct nd_region *nd_region, ...@@ -928,7 +937,7 @@ static int __blk_label_update(struct nd_region *nd_region,
/* free up now unused slots in the new index */ /* free up now unused slots in the new index */
for_each_set_bit(slot, victim_map, victim_map ? nslot : 0) { for_each_set_bit(slot, victim_map, victim_map ? nslot : 0) {
dev_dbg(ndd->dev, "%s: free: %d\n", __func__, slot); dev_dbg(ndd->dev, "free: %d\n", slot);
nd_label_free_slot(ndd, slot); nd_label_free_slot(ndd, slot);
} }
...@@ -1092,7 +1101,7 @@ static int del_labels(struct nd_mapping *nd_mapping, u8 *uuid) ...@@ -1092,7 +1101,7 @@ static int del_labels(struct nd_mapping *nd_mapping, u8 *uuid)
active--; active--;
slot = to_slot(ndd, nd_label); slot = to_slot(ndd, nd_label);
nd_label_free_slot(ndd, slot); nd_label_free_slot(ndd, slot);
dev_dbg(ndd->dev, "%s: free: %d\n", __func__, slot); dev_dbg(ndd->dev, "free: %d\n", slot);
list_move_tail(&label_ent->list, &list); list_move_tail(&label_ent->list, &list);
label_ent->label = NULL; label_ent->label = NULL;
} }
...@@ -1100,7 +1109,7 @@ static int del_labels(struct nd_mapping *nd_mapping, u8 *uuid) ...@@ -1100,7 +1109,7 @@ static int del_labels(struct nd_mapping *nd_mapping, u8 *uuid)
if (active == 0) { if (active == 0) {
nd_mapping_free_labels(nd_mapping); nd_mapping_free_labels(nd_mapping);
dev_dbg(ndd->dev, "%s: no more active labels\n", __func__); dev_dbg(ndd->dev, "no more active labels\n");
} }
mutex_unlock(&nd_mapping->lock); mutex_unlock(&nd_mapping->lock);
......
...@@ -33,7 +33,7 @@ enum { ...@@ -33,7 +33,7 @@ enum {
BTTINFO_UUID_LEN = 16, BTTINFO_UUID_LEN = 16,
BTTINFO_FLAG_ERROR = 0x1, /* error state (read-only) */ BTTINFO_FLAG_ERROR = 0x1, /* error state (read-only) */
BTTINFO_MAJOR_VERSION = 1, BTTINFO_MAJOR_VERSION = 1,
ND_LABEL_MIN_SIZE = 512 * 129, /* see sizeof_namespace_index() */ ND_LABEL_MIN_SIZE = 256 * 4, /* see sizeof_namespace_index() */
ND_LABEL_ID_SIZE = 50, ND_LABEL_ID_SIZE = 50,
ND_NSINDEX_INIT = 0x1, ND_NSINDEX_INIT = 0x1,
}; };
......
...@@ -421,7 +421,7 @@ static ssize_t alt_name_store(struct device *dev, ...@@ -421,7 +421,7 @@ static ssize_t alt_name_store(struct device *dev,
rc = __alt_name_store(dev, buf, len); rc = __alt_name_store(dev, buf, len);
if (rc >= 0) if (rc >= 0)
rc = nd_namespace_label_update(nd_region, dev); rc = nd_namespace_label_update(nd_region, dev);
dev_dbg(dev, "%s: %s(%zd)\n", __func__, rc < 0 ? "fail " : "", rc); dev_dbg(dev, "%s(%zd)\n", rc < 0 ? "fail " : "", rc);
nvdimm_bus_unlock(dev); nvdimm_bus_unlock(dev);
device_unlock(dev); device_unlock(dev);
...@@ -1007,7 +1007,7 @@ static ssize_t __size_store(struct device *dev, unsigned long long val) ...@@ -1007,7 +1007,7 @@ static ssize_t __size_store(struct device *dev, unsigned long long val)
if (uuid_not_set(uuid, dev, __func__)) if (uuid_not_set(uuid, dev, __func__))
return -ENXIO; return -ENXIO;
if (nd_region->ndr_mappings == 0) { if (nd_region->ndr_mappings == 0) {
dev_dbg(dev, "%s: not associated with dimm(s)\n", __func__); dev_dbg(dev, "not associated with dimm(s)\n");
return -ENXIO; return -ENXIO;
} }
...@@ -1105,8 +1105,7 @@ static ssize_t size_store(struct device *dev, ...@@ -1105,8 +1105,7 @@ static ssize_t size_store(struct device *dev,
*uuid = NULL; *uuid = NULL;
} }
dev_dbg(dev, "%s: %llx %s (%d)\n", __func__, val, rc < 0 dev_dbg(dev, "%llx %s (%d)\n", val, rc < 0 ? "fail" : "success", rc);
? "fail" : "success", rc);
nvdimm_bus_unlock(dev); nvdimm_bus_unlock(dev);
device_unlock(dev); device_unlock(dev);
...@@ -1270,8 +1269,8 @@ static ssize_t uuid_store(struct device *dev, ...@@ -1270,8 +1269,8 @@ static ssize_t uuid_store(struct device *dev,
rc = nd_namespace_label_update(nd_region, dev); rc = nd_namespace_label_update(nd_region, dev);
else else
kfree(uuid); kfree(uuid);
dev_dbg(dev, "%s: result: %zd wrote: %s%s", __func__, dev_dbg(dev, "result: %zd wrote: %s%s", rc, buf,
rc, buf, buf[len - 1] == '\n' ? "" : "\n"); buf[len - 1] == '\n' ? "" : "\n");
nvdimm_bus_unlock(dev); nvdimm_bus_unlock(dev);
device_unlock(dev); device_unlock(dev);
...@@ -1355,9 +1354,8 @@ static ssize_t sector_size_store(struct device *dev, ...@@ -1355,9 +1354,8 @@ static ssize_t sector_size_store(struct device *dev,
rc = nd_size_select_store(dev, buf, lbasize, supported); rc = nd_size_select_store(dev, buf, lbasize, supported);
if (rc >= 0) if (rc >= 0)
rc = nd_namespace_label_update(nd_region, dev); rc = nd_namespace_label_update(nd_region, dev);
dev_dbg(dev, "%s: result: %zd %s: %s%s", __func__, dev_dbg(dev, "result: %zd %s: %s%s", rc, rc < 0 ? "tried" : "wrote",
rc, rc < 0 ? "tried" : "wrote", buf, buf, buf[len - 1] == '\n' ? "" : "\n");
buf[len - 1] == '\n' ? "" : "\n");
nvdimm_bus_unlock(dev); nvdimm_bus_unlock(dev);
device_unlock(dev); device_unlock(dev);
...@@ -1519,7 +1517,7 @@ static ssize_t holder_class_store(struct device *dev, ...@@ -1519,7 +1517,7 @@ static ssize_t holder_class_store(struct device *dev,
rc = __holder_class_store(dev, buf); rc = __holder_class_store(dev, buf);
if (rc >= 0) if (rc >= 0)
rc = nd_namespace_label_update(nd_region, dev); rc = nd_namespace_label_update(nd_region, dev);
dev_dbg(dev, "%s: %s(%zd)\n", __func__, rc < 0 ? "fail " : "", rc); dev_dbg(dev, "%s(%zd)\n", rc < 0 ? "fail " : "", rc);
nvdimm_bus_unlock(dev); nvdimm_bus_unlock(dev);
device_unlock(dev); device_unlock(dev);
...@@ -1717,8 +1715,7 @@ struct nd_namespace_common *nvdimm_namespace_common_probe(struct device *dev) ...@@ -1717,8 +1715,7 @@ struct nd_namespace_common *nvdimm_namespace_common_probe(struct device *dev)
if (uuid_not_set(nsblk->uuid, &ndns->dev, __func__)) if (uuid_not_set(nsblk->uuid, &ndns->dev, __func__))
return ERR_PTR(-ENODEV); return ERR_PTR(-ENODEV);
if (!nsblk->lbasize) { if (!nsblk->lbasize) {
dev_dbg(&ndns->dev, "%s: sector size not set\n", dev_dbg(&ndns->dev, "sector size not set\n");
__func__);
return ERR_PTR(-ENODEV); return ERR_PTR(-ENODEV);
} }
if (!nd_namespace_blk_validate(nsblk)) if (!nd_namespace_blk_validate(nsblk))
...@@ -1798,9 +1795,7 @@ static bool has_uuid_at_pos(struct nd_region *nd_region, u8 *uuid, ...@@ -1798,9 +1795,7 @@ static bool has_uuid_at_pos(struct nd_region *nd_region, u8 *uuid,
} }
if (found_uuid) { if (found_uuid) {
dev_dbg(ndd->dev, dev_dbg(ndd->dev, "duplicate entry for uuid\n");
"%s duplicate entry for uuid\n",
__func__);
return false; return false;
} }
found_uuid = true; found_uuid = true;
...@@ -1926,7 +1921,7 @@ static struct device *create_namespace_pmem(struct nd_region *nd_region, ...@@ -1926,7 +1921,7 @@ static struct device *create_namespace_pmem(struct nd_region *nd_region,
} }
if (i < nd_region->ndr_mappings) { if (i < nd_region->ndr_mappings) {
struct nvdimm_drvdata *ndd = to_ndd(&nd_region->mapping[i]); struct nvdimm *nvdimm = nd_region->mapping[i].nvdimm;
/* /*
* Give up if we don't find an instance of a uuid at each * Give up if we don't find an instance of a uuid at each
...@@ -1934,7 +1929,7 @@ static struct device *create_namespace_pmem(struct nd_region *nd_region, ...@@ -1934,7 +1929,7 @@ static struct device *create_namespace_pmem(struct nd_region *nd_region,
* find a dimm with two instances of the same uuid. * find a dimm with two instances of the same uuid.
*/ */
dev_err(&nd_region->dev, "%s missing label for %pUb\n", dev_err(&nd_region->dev, "%s missing label for %pUb\n",
dev_name(ndd->dev), nd_label->uuid); nvdimm_name(nvdimm), nd_label->uuid);
rc = -EINVAL; rc = -EINVAL;
goto err; goto err;
} }
...@@ -1994,14 +1989,13 @@ static struct device *create_namespace_pmem(struct nd_region *nd_region, ...@@ -1994,14 +1989,13 @@ static struct device *create_namespace_pmem(struct nd_region *nd_region,
namespace_pmem_release(dev); namespace_pmem_release(dev);
switch (rc) { switch (rc) {
case -EINVAL: case -EINVAL:
dev_dbg(&nd_region->dev, "%s: invalid label(s)\n", __func__); dev_dbg(&nd_region->dev, "invalid label(s)\n");
break; break;
case -ENODEV: case -ENODEV:
dev_dbg(&nd_region->dev, "%s: label not found\n", __func__); dev_dbg(&nd_region->dev, "label not found\n");
break; break;
default: default:
dev_dbg(&nd_region->dev, "%s: unexpected err: %d\n", dev_dbg(&nd_region->dev, "unexpected err: %d\n", rc);
__func__, rc);
break; break;
} }
return ERR_PTR(rc); return ERR_PTR(rc);
...@@ -2334,8 +2328,8 @@ static struct device **scan_labels(struct nd_region *nd_region) ...@@ -2334,8 +2328,8 @@ static struct device **scan_labels(struct nd_region *nd_region)
} }
dev_dbg(&nd_region->dev, "%s: discovered %d %s namespace%s\n", dev_dbg(&nd_region->dev, "discovered %d %s namespace%s\n",
__func__, count, is_nd_blk(&nd_region->dev) count, is_nd_blk(&nd_region->dev)
? "blk" : "pmem", count == 1 ? "" : "s"); ? "blk" : "pmem", count == 1 ? "" : "s");
if (count == 0) { if (count == 0) {
...@@ -2467,7 +2461,7 @@ static int init_active_labels(struct nd_region *nd_region) ...@@ -2467,7 +2461,7 @@ static int init_active_labels(struct nd_region *nd_region)
get_ndd(ndd); get_ndd(ndd);
count = nd_label_active_count(ndd); count = nd_label_active_count(ndd);
dev_dbg(ndd->dev, "%s: %d\n", __func__, count); dev_dbg(ndd->dev, "count: %d\n", count);
if (!count) if (!count)
continue; continue;
for (j = 0; j < count; j++) { for (j = 0; j < count; j++) {
......
...@@ -341,7 +341,6 @@ static inline struct device *nd_dax_create(struct nd_region *nd_region) ...@@ -341,7 +341,6 @@ static inline struct device *nd_dax_create(struct nd_region *nd_region)
} }
#endif #endif
struct nd_region *to_nd_region(struct device *dev);
int nd_region_to_nstype(struct nd_region *nd_region); int nd_region_to_nstype(struct nd_region *nd_region);
int nd_region_register_namespaces(struct nd_region *nd_region, int *err); int nd_region_register_namespaces(struct nd_region *nd_region, int *err);
u64 nd_region_interleave_set_cookie(struct nd_region *nd_region, u64 nd_region_interleave_set_cookie(struct nd_region *nd_region,
......
// SPDX-License-Identifier: GPL-2.0+
#define pr_fmt(fmt) "of_pmem: " fmt
#include <linux/of_platform.h>
#include <linux/of_address.h>
#include <linux/libnvdimm.h>
#include <linux/module.h>
#include <linux/ioport.h>
#include <linux/slab.h>
static const struct attribute_group *region_attr_groups[] = {
&nd_region_attribute_group,
&nd_device_attribute_group,
NULL,
};
static const struct attribute_group *bus_attr_groups[] = {
&nvdimm_bus_attribute_group,
NULL,
};
struct of_pmem_private {
struct nvdimm_bus_descriptor bus_desc;
struct nvdimm_bus *bus;
};
static int of_pmem_region_probe(struct platform_device *pdev)
{
struct of_pmem_private *priv;
struct device_node *np;
struct nvdimm_bus *bus;
bool is_volatile;
int i;
np = dev_of_node(&pdev->dev);
if (!np)
return -ENXIO;
priv = kzalloc(sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
priv->bus_desc.attr_groups = bus_attr_groups;
priv->bus_desc.provider_name = "of_pmem";
priv->bus_desc.module = THIS_MODULE;
priv->bus_desc.of_node = np;
priv->bus = bus = nvdimm_bus_register(&pdev->dev, &priv->bus_desc);
if (!bus) {
kfree(priv);
return -ENODEV;
}
platform_set_drvdata(pdev, priv);
is_volatile = !!of_find_property(np, "volatile", NULL);
dev_dbg(&pdev->dev, "Registering %s regions from %pOF\n",
is_volatile ? "volatile" : "non-volatile", np);
for (i = 0; i < pdev->num_resources; i++) {
struct nd_region_desc ndr_desc;
struct nd_region *region;
/*
* NB: libnvdimm copies the data from ndr_desc into it's own
* structures so passing a stack pointer is fine.
*/
memset(&ndr_desc, 0, sizeof(ndr_desc));
ndr_desc.attr_groups = region_attr_groups;
ndr_desc.numa_node = of_node_to_nid(np);
ndr_desc.res = &pdev->resource[i];
ndr_desc.of_node = np;
set_bit(ND_REGION_PAGEMAP, &ndr_desc.flags);
if (is_volatile)
region = nvdimm_volatile_region_create(bus, &ndr_desc);
else
region = nvdimm_pmem_region_create(bus, &ndr_desc);
if (!region)
dev_warn(&pdev->dev, "Unable to register region %pR from %pOF\n",
ndr_desc.res, np);
else
dev_dbg(&pdev->dev, "Registered region %pR from %pOF\n",
ndr_desc.res, np);
}
return 0;
}
static int of_pmem_region_remove(struct platform_device *pdev)
{
struct of_pmem_private *priv = platform_get_drvdata(pdev);
nvdimm_bus_unregister(priv->bus);
kfree(priv);
return 0;
}
static const struct of_device_id of_pmem_region_match[] = {
{ .compatible = "pmem-region" },
{ },
};
static struct platform_driver of_pmem_region_driver = {
.probe = of_pmem_region_probe,
.remove = of_pmem_region_remove,
.driver = {
.name = "of_pmem",
.owner = THIS_MODULE,
.of_match_table = of_pmem_region_match,
},
};
module_platform_driver(of_pmem_region_driver);
MODULE_DEVICE_TABLE(of, of_pmem_region_match);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("IBM Corporation");
...@@ -27,7 +27,7 @@ static void nd_pfn_release(struct device *dev) ...@@ -27,7 +27,7 @@ static void nd_pfn_release(struct device *dev)
struct nd_region *nd_region = to_nd_region(dev->parent); struct nd_region *nd_region = to_nd_region(dev->parent);
struct nd_pfn *nd_pfn = to_nd_pfn(dev); struct nd_pfn *nd_pfn = to_nd_pfn(dev);
dev_dbg(dev, "%s\n", __func__); dev_dbg(dev, "trace\n");
nd_detach_ndns(&nd_pfn->dev, &nd_pfn->ndns); nd_detach_ndns(&nd_pfn->dev, &nd_pfn->ndns);
ida_simple_remove(&nd_region->pfn_ida, nd_pfn->id); ida_simple_remove(&nd_region->pfn_ida, nd_pfn->id);
kfree(nd_pfn->uuid); kfree(nd_pfn->uuid);
...@@ -94,8 +94,8 @@ static ssize_t mode_store(struct device *dev, ...@@ -94,8 +94,8 @@ static ssize_t mode_store(struct device *dev,
else else
rc = -EINVAL; rc = -EINVAL;
} }
dev_dbg(dev, "%s: result: %zd wrote: %s%s", __func__, dev_dbg(dev, "result: %zd wrote: %s%s", rc, buf,
rc, buf, buf[len - 1] == '\n' ? "" : "\n"); buf[len - 1] == '\n' ? "" : "\n");
nvdimm_bus_unlock(dev); nvdimm_bus_unlock(dev);
device_unlock(dev); device_unlock(dev);
...@@ -144,8 +144,8 @@ static ssize_t align_store(struct device *dev, ...@@ -144,8 +144,8 @@ static ssize_t align_store(struct device *dev,
nvdimm_bus_lock(dev); nvdimm_bus_lock(dev);
rc = nd_size_select_store(dev, buf, &nd_pfn->align, rc = nd_size_select_store(dev, buf, &nd_pfn->align,
nd_pfn_supported_alignments()); nd_pfn_supported_alignments());
dev_dbg(dev, "%s: result: %zd wrote: %s%s", __func__, dev_dbg(dev, "result: %zd wrote: %s%s", rc, buf,
rc, buf, buf[len - 1] == '\n' ? "" : "\n"); buf[len - 1] == '\n' ? "" : "\n");
nvdimm_bus_unlock(dev); nvdimm_bus_unlock(dev);
device_unlock(dev); device_unlock(dev);
...@@ -171,8 +171,8 @@ static ssize_t uuid_store(struct device *dev, ...@@ -171,8 +171,8 @@ static ssize_t uuid_store(struct device *dev,
device_lock(dev); device_lock(dev);
rc = nd_uuid_store(dev, &nd_pfn->uuid, buf, len); rc = nd_uuid_store(dev, &nd_pfn->uuid, buf, len);
dev_dbg(dev, "%s: result: %zd wrote: %s%s", __func__, dev_dbg(dev, "result: %zd wrote: %s%s", rc, buf,
rc, buf, buf[len - 1] == '\n' ? "" : "\n"); buf[len - 1] == '\n' ? "" : "\n");
device_unlock(dev); device_unlock(dev);
return rc ? rc : len; return rc ? rc : len;
...@@ -201,8 +201,8 @@ static ssize_t namespace_store(struct device *dev, ...@@ -201,8 +201,8 @@ static ssize_t namespace_store(struct device *dev,
device_lock(dev); device_lock(dev);
nvdimm_bus_lock(dev); nvdimm_bus_lock(dev);
rc = nd_namespace_store(dev, &nd_pfn->ndns, buf, len); rc = nd_namespace_store(dev, &nd_pfn->ndns, buf, len);
dev_dbg(dev, "%s: result: %zd wrote: %s%s", __func__, dev_dbg(dev, "result: %zd wrote: %s%s", rc, buf,
rc, buf, buf[len - 1] == '\n' ? "" : "\n"); buf[len - 1] == '\n' ? "" : "\n");
nvdimm_bus_unlock(dev); nvdimm_bus_unlock(dev);
device_unlock(dev); device_unlock(dev);
...@@ -314,8 +314,8 @@ struct device *nd_pfn_devinit(struct nd_pfn *nd_pfn, ...@@ -314,8 +314,8 @@ struct device *nd_pfn_devinit(struct nd_pfn *nd_pfn,
dev = &nd_pfn->dev; dev = &nd_pfn->dev;
device_initialize(&nd_pfn->dev); device_initialize(&nd_pfn->dev);
if (ndns && !__nd_attach_ndns(&nd_pfn->dev, ndns, &nd_pfn->ndns)) { if (ndns && !__nd_attach_ndns(&nd_pfn->dev, ndns, &nd_pfn->ndns)) {
dev_dbg(&ndns->dev, "%s failed, already claimed by %s\n", dev_dbg(&ndns->dev, "failed, already claimed by %s\n",
__func__, dev_name(ndns->claim)); dev_name(ndns->claim));
put_device(dev); put_device(dev);
return NULL; return NULL;
} }
...@@ -510,8 +510,7 @@ int nd_pfn_probe(struct device *dev, struct nd_namespace_common *ndns) ...@@ -510,8 +510,7 @@ int nd_pfn_probe(struct device *dev, struct nd_namespace_common *ndns)
nd_pfn = to_nd_pfn(pfn_dev); nd_pfn = to_nd_pfn(pfn_dev);
nd_pfn->pfn_sb = pfn_sb; nd_pfn->pfn_sb = pfn_sb;
rc = nd_pfn_validate(nd_pfn, PFN_SIG); rc = nd_pfn_validate(nd_pfn, PFN_SIG);
dev_dbg(dev, "%s: pfn: %s\n", __func__, dev_dbg(dev, "pfn: %s\n", rc == 0 ? dev_name(pfn_dev) : "<none>");
rc == 0 ? dev_name(pfn_dev) : "<none>");
if (rc < 0) { if (rc < 0) {
nd_detach_ndns(pfn_dev, &nd_pfn->ndns); nd_detach_ndns(pfn_dev, &nd_pfn->ndns);
put_device(pfn_dev); put_device(pfn_dev);
......
...@@ -66,7 +66,7 @@ static blk_status_t pmem_clear_poison(struct pmem_device *pmem, ...@@ -66,7 +66,7 @@ static blk_status_t pmem_clear_poison(struct pmem_device *pmem,
rc = BLK_STS_IOERR; rc = BLK_STS_IOERR;
if (cleared > 0 && cleared / 512) { if (cleared > 0 && cleared / 512) {
cleared /= 512; cleared /= 512;
dev_dbg(dev, "%s: %#llx clear %ld sector%s\n", __func__, dev_dbg(dev, "%#llx clear %ld sector%s\n",
(unsigned long long) sector, cleared, (unsigned long long) sector, cleared,
cleared > 1 ? "s" : ""); cleared > 1 ? "s" : "");
badblocks_clear(&pmem->bb, sector, cleared); badblocks_clear(&pmem->bb, sector, cleared);
...@@ -547,17 +547,7 @@ static struct nd_device_driver nd_pmem_driver = { ...@@ -547,17 +547,7 @@ static struct nd_device_driver nd_pmem_driver = {
.type = ND_DRIVER_NAMESPACE_IO | ND_DRIVER_NAMESPACE_PMEM, .type = ND_DRIVER_NAMESPACE_IO | ND_DRIVER_NAMESPACE_PMEM,
}; };
static int __init pmem_init(void) module_nd_driver(nd_pmem_driver);
{
return nd_driver_register(&nd_pmem_driver);
}
module_init(pmem_init);
static void pmem_exit(void)
{
driver_unregister(&nd_pmem_driver.drv);
}
module_exit(pmem_exit);
MODULE_AUTHOR("Ross Zwisler <ross.zwisler@linux.intel.com>"); MODULE_AUTHOR("Ross Zwisler <ross.zwisler@linux.intel.com>");
MODULE_LICENSE("GPL v2"); MODULE_LICENSE("GPL v2");
...@@ -27,10 +27,10 @@ static int nd_region_probe(struct device *dev) ...@@ -27,10 +27,10 @@ static int nd_region_probe(struct device *dev)
if (nd_region->num_lanes > num_online_cpus() if (nd_region->num_lanes > num_online_cpus()
&& nd_region->num_lanes < num_possible_cpus() && nd_region->num_lanes < num_possible_cpus()
&& !test_and_set_bit(0, &once)) { && !test_and_set_bit(0, &once)) {
dev_info(dev, "online cpus (%d) < concurrent i/o lanes (%d) < possible cpus (%d)\n", dev_dbg(dev, "online cpus (%d) < concurrent i/o lanes (%d) < possible cpus (%d)\n",
num_online_cpus(), nd_region->num_lanes, num_online_cpus(), nd_region->num_lanes,
num_possible_cpus()); num_possible_cpus());
dev_info(dev, "setting nr_cpus=%d may yield better libnvdimm device performance\n", dev_dbg(dev, "setting nr_cpus=%d may yield better libnvdimm device performance\n",
nd_region->num_lanes); nd_region->num_lanes);
} }
......
...@@ -182,6 +182,14 @@ struct nd_region *to_nd_region(struct device *dev) ...@@ -182,6 +182,14 @@ struct nd_region *to_nd_region(struct device *dev)
} }
EXPORT_SYMBOL_GPL(to_nd_region); EXPORT_SYMBOL_GPL(to_nd_region);
struct device *nd_region_dev(struct nd_region *nd_region)
{
if (!nd_region)
return NULL;
return &nd_region->dev;
}
EXPORT_SYMBOL_GPL(nd_region_dev);
struct nd_blk_region *to_nd_blk_region(struct device *dev) struct nd_blk_region *to_nd_blk_region(struct device *dev)
{ {
struct nd_region *nd_region = to_nd_region(dev); struct nd_region *nd_region = to_nd_region(dev);
...@@ -1014,6 +1022,7 @@ static struct nd_region *nd_region_create(struct nvdimm_bus *nvdimm_bus, ...@@ -1014,6 +1022,7 @@ static struct nd_region *nd_region_create(struct nvdimm_bus *nvdimm_bus,
dev->parent = &nvdimm_bus->dev; dev->parent = &nvdimm_bus->dev;
dev->type = dev_type; dev->type = dev_type;
dev->groups = ndr_desc->attr_groups; dev->groups = ndr_desc->attr_groups;
dev->of_node = ndr_desc->of_node;
nd_region->ndr_size = resource_size(ndr_desc->res); nd_region->ndr_size = resource_size(ndr_desc->res);
nd_region->ndr_start = ndr_desc->res->start; nd_region->ndr_start = ndr_desc->res->start;
nd_device_register(dev); nd_device_register(dev);
......
...@@ -76,12 +76,14 @@ typedef int (*ndctl_fn)(struct nvdimm_bus_descriptor *nd_desc, ...@@ -76,12 +76,14 @@ typedef int (*ndctl_fn)(struct nvdimm_bus_descriptor *nd_desc,
struct nvdimm *nvdimm, unsigned int cmd, void *buf, struct nvdimm *nvdimm, unsigned int cmd, void *buf,
unsigned int buf_len, int *cmd_rc); unsigned int buf_len, int *cmd_rc);
struct device_node;
struct nvdimm_bus_descriptor { struct nvdimm_bus_descriptor {
const struct attribute_group **attr_groups; const struct attribute_group **attr_groups;
unsigned long bus_dsm_mask; unsigned long bus_dsm_mask;
unsigned long cmd_mask; unsigned long cmd_mask;
struct module *module; struct module *module;
char *provider_name; char *provider_name;
struct device_node *of_node;
ndctl_fn ndctl; ndctl_fn ndctl;
int (*flush_probe)(struct nvdimm_bus_descriptor *nd_desc); int (*flush_probe)(struct nvdimm_bus_descriptor *nd_desc);
int (*clear_to_send)(struct nvdimm_bus_descriptor *nd_desc, int (*clear_to_send)(struct nvdimm_bus_descriptor *nd_desc,
...@@ -123,6 +125,7 @@ struct nd_region_desc { ...@@ -123,6 +125,7 @@ struct nd_region_desc {
int num_lanes; int num_lanes;
int numa_node; int numa_node;
unsigned long flags; unsigned long flags;
struct device_node *of_node;
}; };
struct device; struct device;
...@@ -164,6 +167,7 @@ void nvdimm_bus_unregister(struct nvdimm_bus *nvdimm_bus); ...@@ -164,6 +167,7 @@ void nvdimm_bus_unregister(struct nvdimm_bus *nvdimm_bus);
struct nvdimm_bus *to_nvdimm_bus(struct device *dev); struct nvdimm_bus *to_nvdimm_bus(struct device *dev);
struct nvdimm *to_nvdimm(struct device *dev); struct nvdimm *to_nvdimm(struct device *dev);
struct nd_region *to_nd_region(struct device *dev); struct nd_region *to_nd_region(struct device *dev);
struct device *nd_region_dev(struct nd_region *nd_region);
struct nd_blk_region *to_nd_blk_region(struct device *dev); struct nd_blk_region *to_nd_blk_region(struct device *dev);
struct nvdimm_bus_descriptor *to_nd_desc(struct nvdimm_bus *nvdimm_bus); struct nvdimm_bus_descriptor *to_nd_desc(struct nvdimm_bus *nvdimm_bus);
struct device *to_nvdimm_bus_dev(struct nvdimm_bus *nvdimm_bus); struct device *to_nvdimm_bus_dev(struct nvdimm_bus *nvdimm_bus);
......
...@@ -180,6 +180,12 @@ struct nd_region; ...@@ -180,6 +180,12 @@ struct nd_region;
void nvdimm_region_notify(struct nd_region *nd_region, enum nvdimm_event event); void nvdimm_region_notify(struct nd_region *nd_region, enum nvdimm_event event);
int __must_check __nd_driver_register(struct nd_device_driver *nd_drv, int __must_check __nd_driver_register(struct nd_device_driver *nd_drv,
struct module *module, const char *mod_name); struct module *module, const char *mod_name);
static inline void nd_driver_unregister(struct nd_device_driver *drv)
{
driver_unregister(&drv->drv);
}
#define nd_driver_register(driver) \ #define nd_driver_register(driver) \
__nd_driver_register(driver, THIS_MODULE, KBUILD_MODNAME) __nd_driver_register(driver, THIS_MODULE, KBUILD_MODNAME)
#define module_nd_driver(driver) \
module_driver(driver, nd_driver_register, nd_driver_unregister)
#endif /* __LINUX_ND_H__ */ #endif /* __LINUX_ND_H__ */
This diff is collapsed.
...@@ -93,6 +93,7 @@ struct nd_cmd_ars_err_inj_stat { ...@@ -93,6 +93,7 @@ struct nd_cmd_ars_err_inj_stat {
#define ND_INTEL_FW_FINISH_UPDATE 15 #define ND_INTEL_FW_FINISH_UPDATE 15
#define ND_INTEL_FW_FINISH_QUERY 16 #define ND_INTEL_FW_FINISH_QUERY 16
#define ND_INTEL_SMART_SET_THRESHOLD 17 #define ND_INTEL_SMART_SET_THRESHOLD 17
#define ND_INTEL_SMART_INJECT 18
#define ND_INTEL_SMART_HEALTH_VALID (1 << 0) #define ND_INTEL_SMART_HEALTH_VALID (1 << 0)
#define ND_INTEL_SMART_SPARES_VALID (1 << 1) #define ND_INTEL_SMART_SPARES_VALID (1 << 1)
...@@ -111,6 +112,10 @@ struct nd_cmd_ars_err_inj_stat { ...@@ -111,6 +112,10 @@ struct nd_cmd_ars_err_inj_stat {
#define ND_INTEL_SMART_NON_CRITICAL_HEALTH (1 << 0) #define ND_INTEL_SMART_NON_CRITICAL_HEALTH (1 << 0)
#define ND_INTEL_SMART_CRITICAL_HEALTH (1 << 1) #define ND_INTEL_SMART_CRITICAL_HEALTH (1 << 1)
#define ND_INTEL_SMART_FATAL_HEALTH (1 << 2) #define ND_INTEL_SMART_FATAL_HEALTH (1 << 2)
#define ND_INTEL_SMART_INJECT_MTEMP (1 << 0)
#define ND_INTEL_SMART_INJECT_SPARE (1 << 1)
#define ND_INTEL_SMART_INJECT_FATAL (1 << 2)
#define ND_INTEL_SMART_INJECT_SHUTDOWN (1 << 3)
struct nd_intel_smart { struct nd_intel_smart {
__u32 status; __u32 status;
...@@ -158,6 +163,17 @@ struct nd_intel_smart_set_threshold { ...@@ -158,6 +163,17 @@ struct nd_intel_smart_set_threshold {
__u32 status; __u32 status;
} __packed; } __packed;
struct nd_intel_smart_inject {
__u64 flags;
__u8 mtemp_enable;
__u16 media_temperature;
__u8 spare_enable;
__u8 spares;
__u8 fatal_enable;
__u8 unsafe_shutdown_enable;
__u32 status;
} __packed;
#define INTEL_FW_STORAGE_SIZE 0x100000 #define INTEL_FW_STORAGE_SIZE 0x100000
#define INTEL_FW_MAX_SEND_LEN 0xFFEC #define INTEL_FW_MAX_SEND_LEN 0xFFEC
#define INTEL_FW_QUERY_INTERVAL 250000 #define INTEL_FW_QUERY_INTERVAL 250000
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment