Commit 8401c72c authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'libnvdimm-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/nvdimm/nvdimm

Pull libnvdimm fixes from Dan Williams:
 "Two regression fixes, two bug fixes for older issues, two fixes for
  new functionality added this cycle that have userspace ABI concerns,
  and a small cleanup. These have appeared in a linux-next release and
  have a build success report from the 0day robot.

   * The 4.16 rework of altmap handling led to some configurations
     leaking page table allocations due to freeing from the altmap
     reservation rather than the page allocator.

     The impact without the fix is leaked memory and a WARN() message
     when tearing down libnvdimm namespaces. The rework also missed a
     place where error handling code needed to be removed that can lead
     to a crash if devm_memremap_pages() fails.

   * acpi_map_pxm_to_node() had a latent bug whereby it could
     misidentify the closest online node to a given proximity domain.

   * Block integrity handling was reworked several kernels back to allow
     calling add_disk() after setting up the integrity profile.

     The nd_btt and nd_blk drivers are just now catching up to fix
     automatic partition detection at driver load time.

   * The new peristence_domain attribute, a platform indicator of
     whether cpu caches are powerfail protected for example, is meant to
     be a single value enum and not a set of flags.

     This oversight was caught while reviewing new userspace code in
     libndctl to communicate the attribute.

     Fix this new enabling up so that we are not stuck with an unwanted
     userspace ABI"

* 'libnvdimm-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/nvdimm/nvdimm:
  libnvdimm, nfit: fix persistence domain reporting
  libnvdimm, region: hide persistence_domain when unknown
  acpi, numa: fix pxm to online numa node associations
  x86, memremap: fix altmap accounting at free
  libnvdimm: remove redundant assignment to pointer 'dev'
  libnvdimm, {btt, blk}: do integrity setup before add_disk()
  kernel/memremap: Remove stale devres_free() call
parents 9ec7ccc8 fe9a552e
...@@ -800,17 +800,11 @@ int arch_add_memory(int nid, u64 start, u64 size, struct vmem_altmap *altmap, ...@@ -800,17 +800,11 @@ int arch_add_memory(int nid, u64 start, u64 size, struct vmem_altmap *altmap,
#define PAGE_INUSE 0xFD #define PAGE_INUSE 0xFD
static void __meminit free_pagetable(struct page *page, int order, static void __meminit free_pagetable(struct page *page, int order)
struct vmem_altmap *altmap)
{ {
unsigned long magic; unsigned long magic;
unsigned int nr_pages = 1 << order; unsigned int nr_pages = 1 << order;
if (altmap) {
vmem_altmap_free(altmap, nr_pages);
return;
}
/* bootmem page has reserved flag */ /* bootmem page has reserved flag */
if (PageReserved(page)) { if (PageReserved(page)) {
__ClearPageReserved(page); __ClearPageReserved(page);
...@@ -826,8 +820,16 @@ static void __meminit free_pagetable(struct page *page, int order, ...@@ -826,8 +820,16 @@ static void __meminit free_pagetable(struct page *page, int order,
free_pages((unsigned long)page_address(page), order); free_pages((unsigned long)page_address(page), order);
} }
static void __meminit free_pte_table(pte_t *pte_start, pmd_t *pmd, static void __meminit free_hugepage_table(struct page *page,
struct vmem_altmap *altmap) struct vmem_altmap *altmap)
{
if (altmap)
vmem_altmap_free(altmap, PMD_SIZE / PAGE_SIZE);
else
free_pagetable(page, get_order(PMD_SIZE));
}
static void __meminit free_pte_table(pte_t *pte_start, pmd_t *pmd)
{ {
pte_t *pte; pte_t *pte;
int i; int i;
...@@ -839,14 +841,13 @@ static void __meminit free_pte_table(pte_t *pte_start, pmd_t *pmd, ...@@ -839,14 +841,13 @@ static void __meminit free_pte_table(pte_t *pte_start, pmd_t *pmd,
} }
/* free a pte talbe */ /* free a pte talbe */
free_pagetable(pmd_page(*pmd), 0, altmap); free_pagetable(pmd_page(*pmd), 0);
spin_lock(&init_mm.page_table_lock); spin_lock(&init_mm.page_table_lock);
pmd_clear(pmd); pmd_clear(pmd);
spin_unlock(&init_mm.page_table_lock); spin_unlock(&init_mm.page_table_lock);
} }
static void __meminit free_pmd_table(pmd_t *pmd_start, pud_t *pud, static void __meminit free_pmd_table(pmd_t *pmd_start, pud_t *pud)
struct vmem_altmap *altmap)
{ {
pmd_t *pmd; pmd_t *pmd;
int i; int i;
...@@ -858,14 +859,13 @@ static void __meminit free_pmd_table(pmd_t *pmd_start, pud_t *pud, ...@@ -858,14 +859,13 @@ static void __meminit free_pmd_table(pmd_t *pmd_start, pud_t *pud,
} }
/* free a pmd talbe */ /* free a pmd talbe */
free_pagetable(pud_page(*pud), 0, altmap); free_pagetable(pud_page(*pud), 0);
spin_lock(&init_mm.page_table_lock); spin_lock(&init_mm.page_table_lock);
pud_clear(pud); pud_clear(pud);
spin_unlock(&init_mm.page_table_lock); spin_unlock(&init_mm.page_table_lock);
} }
static void __meminit free_pud_table(pud_t *pud_start, p4d_t *p4d, static void __meminit free_pud_table(pud_t *pud_start, p4d_t *p4d)
struct vmem_altmap *altmap)
{ {
pud_t *pud; pud_t *pud;
int i; int i;
...@@ -877,7 +877,7 @@ static void __meminit free_pud_table(pud_t *pud_start, p4d_t *p4d, ...@@ -877,7 +877,7 @@ static void __meminit free_pud_table(pud_t *pud_start, p4d_t *p4d,
} }
/* free a pud talbe */ /* free a pud talbe */
free_pagetable(p4d_page(*p4d), 0, altmap); free_pagetable(p4d_page(*p4d), 0);
spin_lock(&init_mm.page_table_lock); spin_lock(&init_mm.page_table_lock);
p4d_clear(p4d); p4d_clear(p4d);
spin_unlock(&init_mm.page_table_lock); spin_unlock(&init_mm.page_table_lock);
...@@ -885,7 +885,7 @@ static void __meminit free_pud_table(pud_t *pud_start, p4d_t *p4d, ...@@ -885,7 +885,7 @@ static void __meminit free_pud_table(pud_t *pud_start, p4d_t *p4d,
static void __meminit static void __meminit
remove_pte_table(pte_t *pte_start, unsigned long addr, unsigned long end, remove_pte_table(pte_t *pte_start, unsigned long addr, unsigned long end,
struct vmem_altmap *altmap, bool direct) bool direct)
{ {
unsigned long next, pages = 0; unsigned long next, pages = 0;
pte_t *pte; pte_t *pte;
...@@ -916,7 +916,7 @@ remove_pte_table(pte_t *pte_start, unsigned long addr, unsigned long end, ...@@ -916,7 +916,7 @@ remove_pte_table(pte_t *pte_start, unsigned long addr, unsigned long end,
* freed when offlining, or simplely not in use. * freed when offlining, or simplely not in use.
*/ */
if (!direct) if (!direct)
free_pagetable(pte_page(*pte), 0, altmap); free_pagetable(pte_page(*pte), 0);
spin_lock(&init_mm.page_table_lock); spin_lock(&init_mm.page_table_lock);
pte_clear(&init_mm, addr, pte); pte_clear(&init_mm, addr, pte);
...@@ -939,7 +939,7 @@ remove_pte_table(pte_t *pte_start, unsigned long addr, unsigned long end, ...@@ -939,7 +939,7 @@ remove_pte_table(pte_t *pte_start, unsigned long addr, unsigned long end,
page_addr = page_address(pte_page(*pte)); page_addr = page_address(pte_page(*pte));
if (!memchr_inv(page_addr, PAGE_INUSE, PAGE_SIZE)) { if (!memchr_inv(page_addr, PAGE_INUSE, PAGE_SIZE)) {
free_pagetable(pte_page(*pte), 0, altmap); free_pagetable(pte_page(*pte), 0);
spin_lock(&init_mm.page_table_lock); spin_lock(&init_mm.page_table_lock);
pte_clear(&init_mm, addr, pte); pte_clear(&init_mm, addr, pte);
...@@ -974,8 +974,7 @@ remove_pmd_table(pmd_t *pmd_start, unsigned long addr, unsigned long end, ...@@ -974,8 +974,7 @@ remove_pmd_table(pmd_t *pmd_start, unsigned long addr, unsigned long end,
if (IS_ALIGNED(addr, PMD_SIZE) && if (IS_ALIGNED(addr, PMD_SIZE) &&
IS_ALIGNED(next, PMD_SIZE)) { IS_ALIGNED(next, PMD_SIZE)) {
if (!direct) if (!direct)
free_pagetable(pmd_page(*pmd), free_hugepage_table(pmd_page(*pmd),
get_order(PMD_SIZE),
altmap); altmap);
spin_lock(&init_mm.page_table_lock); spin_lock(&init_mm.page_table_lock);
...@@ -989,8 +988,7 @@ remove_pmd_table(pmd_t *pmd_start, unsigned long addr, unsigned long end, ...@@ -989,8 +988,7 @@ remove_pmd_table(pmd_t *pmd_start, unsigned long addr, unsigned long end,
page_addr = page_address(pmd_page(*pmd)); page_addr = page_address(pmd_page(*pmd));
if (!memchr_inv(page_addr, PAGE_INUSE, if (!memchr_inv(page_addr, PAGE_INUSE,
PMD_SIZE)) { PMD_SIZE)) {
free_pagetable(pmd_page(*pmd), free_hugepage_table(pmd_page(*pmd),
get_order(PMD_SIZE),
altmap); altmap);
spin_lock(&init_mm.page_table_lock); spin_lock(&init_mm.page_table_lock);
...@@ -1003,8 +1001,8 @@ remove_pmd_table(pmd_t *pmd_start, unsigned long addr, unsigned long end, ...@@ -1003,8 +1001,8 @@ remove_pmd_table(pmd_t *pmd_start, unsigned long addr, unsigned long end,
} }
pte_base = (pte_t *)pmd_page_vaddr(*pmd); pte_base = (pte_t *)pmd_page_vaddr(*pmd);
remove_pte_table(pte_base, addr, next, altmap, direct); remove_pte_table(pte_base, addr, next, direct);
free_pte_table(pte_base, pmd, altmap); free_pte_table(pte_base, pmd);
} }
/* Call free_pmd_table() in remove_pud_table(). */ /* Call free_pmd_table() in remove_pud_table(). */
...@@ -1033,8 +1031,7 @@ remove_pud_table(pud_t *pud_start, unsigned long addr, unsigned long end, ...@@ -1033,8 +1031,7 @@ remove_pud_table(pud_t *pud_start, unsigned long addr, unsigned long end,
IS_ALIGNED(next, PUD_SIZE)) { IS_ALIGNED(next, PUD_SIZE)) {
if (!direct) if (!direct)
free_pagetable(pud_page(*pud), free_pagetable(pud_page(*pud),
get_order(PUD_SIZE), get_order(PUD_SIZE));
altmap);
spin_lock(&init_mm.page_table_lock); spin_lock(&init_mm.page_table_lock);
pud_clear(pud); pud_clear(pud);
...@@ -1048,8 +1045,7 @@ remove_pud_table(pud_t *pud_start, unsigned long addr, unsigned long end, ...@@ -1048,8 +1045,7 @@ remove_pud_table(pud_t *pud_start, unsigned long addr, unsigned long end,
if (!memchr_inv(page_addr, PAGE_INUSE, if (!memchr_inv(page_addr, PAGE_INUSE,
PUD_SIZE)) { PUD_SIZE)) {
free_pagetable(pud_page(*pud), free_pagetable(pud_page(*pud),
get_order(PUD_SIZE), get_order(PUD_SIZE));
altmap);
spin_lock(&init_mm.page_table_lock); spin_lock(&init_mm.page_table_lock);
pud_clear(pud); pud_clear(pud);
...@@ -1062,7 +1058,7 @@ remove_pud_table(pud_t *pud_start, unsigned long addr, unsigned long end, ...@@ -1062,7 +1058,7 @@ remove_pud_table(pud_t *pud_start, unsigned long addr, unsigned long end,
pmd_base = pmd_offset(pud, 0); pmd_base = pmd_offset(pud, 0);
remove_pmd_table(pmd_base, addr, next, direct, altmap); remove_pmd_table(pmd_base, addr, next, direct, altmap);
free_pmd_table(pmd_base, pud, altmap); free_pmd_table(pmd_base, pud);
} }
if (direct) if (direct)
...@@ -1094,7 +1090,7 @@ remove_p4d_table(p4d_t *p4d_start, unsigned long addr, unsigned long end, ...@@ -1094,7 +1090,7 @@ remove_p4d_table(p4d_t *p4d_start, unsigned long addr, unsigned long end,
* to adapt for boot-time switching between 4 and 5 level page tables. * to adapt for boot-time switching between 4 and 5 level page tables.
*/ */
if (CONFIG_PGTABLE_LEVELS == 5) if (CONFIG_PGTABLE_LEVELS == 5)
free_pud_table(pud_base, p4d, altmap); free_pud_table(pud_base, p4d);
} }
if (direct) if (direct)
......
...@@ -2675,10 +2675,14 @@ static int acpi_nfit_register_region(struct acpi_nfit_desc *acpi_desc, ...@@ -2675,10 +2675,14 @@ static int acpi_nfit_register_region(struct acpi_nfit_desc *acpi_desc,
else else
ndr_desc->numa_node = NUMA_NO_NODE; ndr_desc->numa_node = NUMA_NO_NODE;
if(acpi_desc->platform_cap & ACPI_NFIT_CAPABILITY_CACHE_FLUSH) /*
* Persistence domain bits are hierarchical, if
* ACPI_NFIT_CAPABILITY_CACHE_FLUSH is set then
* ACPI_NFIT_CAPABILITY_MEM_FLUSH is implied.
*/
if (acpi_desc->platform_cap & ACPI_NFIT_CAPABILITY_CACHE_FLUSH)
set_bit(ND_REGION_PERSIST_CACHE, &ndr_desc->flags); set_bit(ND_REGION_PERSIST_CACHE, &ndr_desc->flags);
else if (acpi_desc->platform_cap & ACPI_NFIT_CAPABILITY_MEM_FLUSH)
if (acpi_desc->platform_cap & ACPI_NFIT_CAPABILITY_MEM_FLUSH)
set_bit(ND_REGION_PERSIST_MEMCTRL, &ndr_desc->flags); set_bit(ND_REGION_PERSIST_MEMCTRL, &ndr_desc->flags);
list_for_each_entry(nfit_memdev, &acpi_desc->memdevs, list) { list_for_each_entry(nfit_memdev, &acpi_desc->memdevs, list) {
......
...@@ -103,25 +103,27 @@ int acpi_map_pxm_to_node(int pxm) ...@@ -103,25 +103,27 @@ int acpi_map_pxm_to_node(int pxm)
*/ */
int acpi_map_pxm_to_online_node(int pxm) int acpi_map_pxm_to_online_node(int pxm)
{ {
int node, n, dist, min_dist; int node, min_node;
node = acpi_map_pxm_to_node(pxm); node = acpi_map_pxm_to_node(pxm);
if (node == NUMA_NO_NODE) if (node == NUMA_NO_NODE)
node = 0; node = 0;
min_node = node;
if (!node_online(node)) { if (!node_online(node)) {
min_dist = INT_MAX; int min_dist = INT_MAX, dist, n;
for_each_online_node(n) { for_each_online_node(n) {
dist = node_distance(node, n); dist = node_distance(node, n);
if (dist < min_dist) { if (dist < min_dist) {
min_dist = dist; min_dist = dist;
node = n; min_node = n;
} }
} }
} }
return node; return min_node;
} }
EXPORT_SYMBOL(acpi_map_pxm_to_online_node); EXPORT_SYMBOL(acpi_map_pxm_to_online_node);
......
...@@ -278,8 +278,6 @@ static int nsblk_attach_disk(struct nd_namespace_blk *nsblk) ...@@ -278,8 +278,6 @@ static int nsblk_attach_disk(struct nd_namespace_blk *nsblk)
disk->queue = q; disk->queue = q;
disk->flags = GENHD_FL_EXT_DEVT; disk->flags = GENHD_FL_EXT_DEVT;
nvdimm_namespace_disk_name(&nsblk->common, disk->disk_name); nvdimm_namespace_disk_name(&nsblk->common, disk->disk_name);
set_capacity(disk, 0);
device_add_disk(dev, disk);
if (devm_add_action_or_reset(dev, nd_blk_release_disk, disk)) if (devm_add_action_or_reset(dev, nd_blk_release_disk, disk))
return -ENOMEM; return -ENOMEM;
...@@ -292,6 +290,7 @@ static int nsblk_attach_disk(struct nd_namespace_blk *nsblk) ...@@ -292,6 +290,7 @@ static int nsblk_attach_disk(struct nd_namespace_blk *nsblk)
} }
set_capacity(disk, available_disk_size >> SECTOR_SHIFT); set_capacity(disk, available_disk_size >> SECTOR_SHIFT);
device_add_disk(dev, disk);
revalidate_disk(disk); revalidate_disk(disk);
return 0; return 0;
} }
......
...@@ -1545,8 +1545,6 @@ static int btt_blk_init(struct btt *btt) ...@@ -1545,8 +1545,6 @@ static int btt_blk_init(struct btt *btt)
queue_flag_set_unlocked(QUEUE_FLAG_NONROT, btt->btt_queue); queue_flag_set_unlocked(QUEUE_FLAG_NONROT, btt->btt_queue);
btt->btt_queue->queuedata = btt; btt->btt_queue->queuedata = btt;
set_capacity(btt->btt_disk, 0);
device_add_disk(&btt->nd_btt->dev, btt->btt_disk);
if (btt_meta_size(btt)) { if (btt_meta_size(btt)) {
int rc = nd_integrity_init(btt->btt_disk, btt_meta_size(btt)); int rc = nd_integrity_init(btt->btt_disk, btt_meta_size(btt));
...@@ -1558,6 +1556,7 @@ static int btt_blk_init(struct btt *btt) ...@@ -1558,6 +1556,7 @@ static int btt_blk_init(struct btt *btt)
} }
} }
set_capacity(btt->btt_disk, btt->nlba * btt->sector_size >> 9); set_capacity(btt->btt_disk, btt->nlba * btt->sector_size >> 9);
device_add_disk(&btt->nd_btt->dev, btt->btt_disk);
btt->nd_btt->size = btt->nlba * (u64)btt->sector_size; btt->nd_btt->size = btt->nlba * (u64)btt->sector_size;
revalidate_disk(btt->btt_disk); revalidate_disk(btt->btt_disk);
......
...@@ -304,7 +304,7 @@ static const struct attribute_group *nd_pfn_attribute_groups[] = { ...@@ -304,7 +304,7 @@ static const struct attribute_group *nd_pfn_attribute_groups[] = {
struct device *nd_pfn_devinit(struct nd_pfn *nd_pfn, struct device *nd_pfn_devinit(struct nd_pfn *nd_pfn,
struct nd_namespace_common *ndns) struct nd_namespace_common *ndns)
{ {
struct device *dev = &nd_pfn->dev; struct device *dev;
if (!nd_pfn) if (!nd_pfn)
return NULL; return NULL;
......
...@@ -532,11 +532,13 @@ static ssize_t persistence_domain_show(struct device *dev, ...@@ -532,11 +532,13 @@ static ssize_t persistence_domain_show(struct device *dev,
struct device_attribute *attr, char *buf) struct device_attribute *attr, char *buf)
{ {
struct nd_region *nd_region = to_nd_region(dev); struct nd_region *nd_region = to_nd_region(dev);
unsigned long flags = nd_region->flags;
return sprintf(buf, "%s%s\n", if (test_bit(ND_REGION_PERSIST_CACHE, &nd_region->flags))
flags & BIT(ND_REGION_PERSIST_CACHE) ? "cpu_cache " : "", return sprintf(buf, "cpu_cache\n");
flags & BIT(ND_REGION_PERSIST_MEMCTRL) ? "memory_controller " : ""); else if (test_bit(ND_REGION_PERSIST_MEMCTRL, &nd_region->flags))
return sprintf(buf, "memory_controller\n");
else
return sprintf(buf, "\n");
} }
static DEVICE_ATTR_RO(persistence_domain); static DEVICE_ATTR_RO(persistence_domain);
...@@ -593,6 +595,13 @@ static umode_t region_visible(struct kobject *kobj, struct attribute *a, int n) ...@@ -593,6 +595,13 @@ static umode_t region_visible(struct kobject *kobj, struct attribute *a, int n)
return 0; return 0;
} }
if (a == &dev_attr_persistence_domain.attr) {
if ((nd_region->flags & (BIT(ND_REGION_PERSIST_CACHE)
| BIT(ND_REGION_PERSIST_MEMCTRL))) == 0)
return 0;
return a->mode;
}
if (a != &dev_attr_set_cookie.attr if (a != &dev_attr_set_cookie.attr
&& a != &dev_attr_available_size.attr) && a != &dev_attr_available_size.attr)
return a->mode; return a->mode;
......
...@@ -427,7 +427,6 @@ void *devm_memremap_pages(struct device *dev, struct dev_pagemap *pgmap) ...@@ -427,7 +427,6 @@ void *devm_memremap_pages(struct device *dev, struct dev_pagemap *pgmap)
err_pfn_remap: err_pfn_remap:
err_radix: err_radix:
pgmap_radix_release(res, pgoff); pgmap_radix_release(res, pgoff);
devres_free(pgmap);
return ERR_PTR(error); return ERR_PTR(error);
} }
EXPORT_SYMBOL(devm_memremap_pages); EXPORT_SYMBOL(devm_memremap_pages);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment