Commit eb7d78c9 authored by Dan Williams's avatar Dan Williams

devm_memremap_pages: fix vmem_altmap lifetime + alignment handling

to_vmem_altmap() needs to return valid results until
arch_remove_memory() completes.  It also needs to be valid for any pfn
in a section regardless of whether that pfn maps to data.  This escape
was a result of a bug in the unit test.

The signature of this bug is that free_pagetable() fails to retrieve a
vmem_altmap and goes off into the weeds:

 BUG: unable to handle kernel NULL pointer dereference at           (null)
 IP: [<ffffffff811d2629>] get_pfnblock_flags_mask+0x49/0x60
 [..]
 Call Trace:
  [<ffffffff811d3477>] free_hot_cold_page+0x97/0x1d0
  [<ffffffff811d367a>] __free_pages+0x2a/0x40
  [<ffffffff8191e669>] free_pagetable+0x8c/0xd4
  [<ffffffff8191ef4e>] remove_pagetable+0x37a/0x808
  [<ffffffff8191b210>] vmemmap_free+0x10/0x20

Fixes: 4b94ffdc ("x86, mm: introduce vmem_altmap to augment vmemmap_populate()")
Cc: Andrew Morton <akpm@linux-foundation.org>
Reported-by: default avatarJeff Moyer <jmoyer@redhat.com>
Signed-off-by: default avatarDan Williams <dan.j.williams@intel.com>
parent 45eb570a
...@@ -183,7 +183,11 @@ EXPORT_SYMBOL(put_zone_device_page); ...@@ -183,7 +183,11 @@ EXPORT_SYMBOL(put_zone_device_page);
static void pgmap_radix_release(struct resource *res) static void pgmap_radix_release(struct resource *res)
{ {
resource_size_t key; resource_size_t key, align_start, align_size, align_end;
align_start = res->start & ~(SECTION_SIZE - 1);
align_size = ALIGN(resource_size(res), SECTION_SIZE);
align_end = align_start + align_size - 1;
mutex_lock(&pgmap_lock); mutex_lock(&pgmap_lock);
for (key = res->start; key <= res->end; key += SECTION_SIZE) for (key = res->start; key <= res->end; key += SECTION_SIZE)
...@@ -226,12 +230,11 @@ static void devm_memremap_pages_release(struct device *dev, void *data) ...@@ -226,12 +230,11 @@ static void devm_memremap_pages_release(struct device *dev, void *data)
percpu_ref_put(pgmap->ref); percpu_ref_put(pgmap->ref);
} }
pgmap_radix_release(res);
/* pages are dead and unused, undo the arch mapping */ /* pages are dead and unused, undo the arch mapping */
align_start = res->start & ~(SECTION_SIZE - 1); align_start = res->start & ~(SECTION_SIZE - 1);
align_size = ALIGN(resource_size(res), SECTION_SIZE); align_size = ALIGN(resource_size(res), SECTION_SIZE);
arch_remove_memory(align_start, align_size); arch_remove_memory(align_start, align_size);
pgmap_radix_release(res);
dev_WARN_ONCE(dev, pgmap->altmap && pgmap->altmap->alloc, dev_WARN_ONCE(dev, pgmap->altmap && pgmap->altmap->alloc,
"%s: failed to free all reserved pages\n", __func__); "%s: failed to free all reserved pages\n", __func__);
} }
...@@ -267,7 +270,7 @@ void *devm_memremap_pages(struct device *dev, struct resource *res, ...@@ -267,7 +270,7 @@ void *devm_memremap_pages(struct device *dev, struct resource *res,
{ {
int is_ram = region_intersects(res->start, resource_size(res), int is_ram = region_intersects(res->start, resource_size(res),
"System RAM"); "System RAM");
resource_size_t key, align_start, align_size; resource_size_t key, align_start, align_size, align_end;
struct dev_pagemap *pgmap; struct dev_pagemap *pgmap;
struct page_map *page_map; struct page_map *page_map;
unsigned long pfn; unsigned long pfn;
...@@ -309,7 +312,10 @@ void *devm_memremap_pages(struct device *dev, struct resource *res, ...@@ -309,7 +312,10 @@ void *devm_memremap_pages(struct device *dev, struct resource *res,
mutex_lock(&pgmap_lock); mutex_lock(&pgmap_lock);
error = 0; error = 0;
for (key = res->start; key <= res->end; key += SECTION_SIZE) { align_start = res->start & ~(SECTION_SIZE - 1);
align_size = ALIGN(resource_size(res), SECTION_SIZE);
align_end = align_start + align_size - 1;
for (key = align_start; key <= align_end; key += SECTION_SIZE) {
struct dev_pagemap *dup; struct dev_pagemap *dup;
rcu_read_lock(); rcu_read_lock();
...@@ -336,8 +342,6 @@ void *devm_memremap_pages(struct device *dev, struct resource *res, ...@@ -336,8 +342,6 @@ void *devm_memremap_pages(struct device *dev, struct resource *res,
if (nid < 0) if (nid < 0)
nid = numa_mem_id(); nid = numa_mem_id();
align_start = res->start & ~(SECTION_SIZE - 1);
align_size = ALIGN(resource_size(res), SECTION_SIZE);
error = arch_add_memory(nid, align_start, align_size, true); error = arch_add_memory(nid, align_start, align_size, true);
if (error) if (error)
goto err_add_memory; goto err_add_memory;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment