Commit d2c0f041 authored by Dan Williams's avatar Dan Williams Committed by Linus Torvalds

libnvdimm, pfn, pmem: allocate memmap array in persistent memory

Use the new vmem_altmap capability to enable the pmem driver to arrange
for a struct page memmap to be established in persistent memory.

[linux@roeck-us.net: mn10300: declare __pfn_to_phys() to fix build error]
Signed-off-by: default avatarDan Williams <dan.j.williams@intel.com>
Cc: Christoph Hellwig <hch@lst.de>
Cc: Dave Chinner <david@fromorbit.com>
Cc: Ross Zwisler <ross.zwisler@linux.intel.com>
Signed-off-by: default avatarGuenter Roeck <linux@roeck-us.net>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 4b94ffdc
...@@ -105,6 +105,7 @@ extern struct page *vmem_map; ...@@ -105,6 +105,7 @@ extern struct page *vmem_map;
#ifdef CONFIG_DISCONTIGMEM #ifdef CONFIG_DISCONTIGMEM
# define page_to_pfn(page) ((unsigned long) (page - vmem_map)) # define page_to_pfn(page) ((unsigned long) (page - vmem_map))
# define pfn_to_page(pfn) (vmem_map + (pfn)) # define pfn_to_page(pfn) (vmem_map + (pfn))
# define __pfn_to_phys(pfn) PFN_PHYS(pfn)
#else #else
# include <asm-generic/memory_model.h> # include <asm-generic/memory_model.h>
#endif #endif
......
...@@ -107,6 +107,7 @@ static inline int get_order(unsigned long size) ...@@ -107,6 +107,7 @@ static inline int get_order(unsigned long size)
#define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT) #define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT)
#define pfn_to_page(pfn) (mem_map + ((pfn) - __pfn_disp)) #define pfn_to_page(pfn) (mem_map + ((pfn) - __pfn_disp))
#define page_to_pfn(page) ((unsigned long)((page) - mem_map) + __pfn_disp) #define page_to_pfn(page) ((unsigned long)((page) - mem_map) + __pfn_disp)
#define __pfn_to_phys(pfn) PFN_PHYS(pfn)
#define pfn_valid(pfn) \ #define pfn_valid(pfn) \
({ \ ({ \
......
...@@ -83,8 +83,7 @@ static ssize_t mode_store(struct device *dev, ...@@ -83,8 +83,7 @@ static ssize_t mode_store(struct device *dev,
if (strncmp(buf, "pmem\n", n) == 0 if (strncmp(buf, "pmem\n", n) == 0
|| strncmp(buf, "pmem", n) == 0) { || strncmp(buf, "pmem", n) == 0) {
/* TODO: allocate from PMEM support */ nd_pfn->mode = PFN_MODE_PMEM;
rc = -ENOTTY;
} else if (strncmp(buf, "ram\n", n) == 0 } else if (strncmp(buf, "ram\n", n) == 0
|| strncmp(buf, "ram", n) == 0) || strncmp(buf, "ram", n) == 0)
nd_pfn->mode = PFN_MODE_RAM; nd_pfn->mode = PFN_MODE_RAM;
......
...@@ -357,12 +357,16 @@ static int nvdimm_namespace_attach_pfn(struct nd_namespace_common *ndns) ...@@ -357,12 +357,16 @@ static int nvdimm_namespace_attach_pfn(struct nd_namespace_common *ndns)
struct nd_namespace_io *nsio = to_nd_namespace_io(&ndns->dev); struct nd_namespace_io *nsio = to_nd_namespace_io(&ndns->dev);
struct nd_pfn *nd_pfn = to_nd_pfn(ndns->claim); struct nd_pfn *nd_pfn = to_nd_pfn(ndns->claim);
struct device *dev = &nd_pfn->dev; struct device *dev = &nd_pfn->dev;
struct vmem_altmap *altmap;
struct nd_region *nd_region; struct nd_region *nd_region;
struct vmem_altmap *altmap;
struct nd_pfn_sb *pfn_sb; struct nd_pfn_sb *pfn_sb;
struct pmem_device *pmem; struct pmem_device *pmem;
phys_addr_t offset; phys_addr_t offset;
int rc; int rc;
struct vmem_altmap __altmap = {
.base_pfn = __phys_to_pfn(nsio->res.start),
.reserve = __phys_to_pfn(SZ_8K),
};
if (!nd_pfn->uuid || !nd_pfn->ndns) if (!nd_pfn->uuid || !nd_pfn->ndns)
return -ENODEV; return -ENODEV;
...@@ -380,6 +384,17 @@ static int nvdimm_namespace_attach_pfn(struct nd_namespace_common *ndns) ...@@ -380,6 +384,17 @@ static int nvdimm_namespace_attach_pfn(struct nd_namespace_common *ndns)
return -EINVAL; return -EINVAL;
nd_pfn->npfns = le64_to_cpu(pfn_sb->npfns); nd_pfn->npfns = le64_to_cpu(pfn_sb->npfns);
altmap = NULL; altmap = NULL;
} else if (nd_pfn->mode == PFN_MODE_PMEM) {
nd_pfn->npfns = (resource_size(&nsio->res) - offset)
/ PAGE_SIZE;
if (le64_to_cpu(nd_pfn->pfn_sb->npfns) > nd_pfn->npfns)
dev_info(&nd_pfn->dev,
"number of pfns truncated from %lld to %ld\n",
le64_to_cpu(nd_pfn->pfn_sb->npfns),
nd_pfn->npfns);
altmap = & __altmap;
altmap->free = __phys_to_pfn(offset - SZ_8K);
altmap->alloc = 0;
} else { } else {
rc = -ENXIO; rc = -ENXIO;
goto err; goto err;
...@@ -389,7 +404,7 @@ static int nvdimm_namespace_attach_pfn(struct nd_namespace_common *ndns) ...@@ -389,7 +404,7 @@ static int nvdimm_namespace_attach_pfn(struct nd_namespace_common *ndns)
pmem = dev_get_drvdata(dev); pmem = dev_get_drvdata(dev);
devm_memunmap(dev, (void __force *) pmem->virt_addr); devm_memunmap(dev, (void __force *) pmem->virt_addr);
pmem->virt_addr = (void __pmem *) devm_memremap_pages(dev, &nsio->res, pmem->virt_addr = (void __pmem *) devm_memremap_pages(dev, &nsio->res,
NULL); altmap);
pmem->pfn_flags |= PFN_MAP; pmem->pfn_flags |= PFN_MAP;
if (IS_ERR(pmem->virt_addr)) { if (IS_ERR(pmem->virt_addr)) {
rc = PTR_ERR(pmem->virt_addr); rc = PTR_ERR(pmem->virt_addr);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment