Commit 91eea67c authored by Mark Nelson's avatar Mark Nelson Committed by Benjamin Herrenschmidt

powerpc/mm: Track backing pages allocated by vmemmap_populate()

We need to keep track of the backing pages that get allocated by
vmemmap_populate() so that when we use kdump, the dump-capture kernel knows
where these pages are.

We use a simple linked list of structures that contain the physical address
of the backing page and corresponding virtual address to track the backing
pages.
To save space, we just use a pointer to the next struct vmemmap_backing. We
can also do this because we never remove nodes.  We call the pointer "list"
to be compatible with changes made to the crash utility.

vmemmap_populate() is called either at boot-time or on a memory hotplug
operation. We don't have to worry about the boot-time calls because they
will be inherently single-threaded, and for a memory hotplug operation
vmemmap_populate() is called through:
sparse_add_one_section()
            |
            V
kmalloc_section_memmap()
            |
            V
sparse_mem_map_populate()
            |
            V
vmemmap_populate()
and in sparse_add_one_section() we're protected by pgdat_resize_lock().
So, we don't need a spinlock to protect the vmemmap_list.

We allocate space for the vmemmap_backing structs by allocating whole pages
in vmemmap_list_alloc() and then handing out chunks of this to
vmemmap_list_populate().

This means that we waste at most just under one page, but this keeps the code
is simple.
Signed-off-by: default avatarMark Nelson <markn@au1.ibm.com>
Signed-off-by: default avatarBenjamin Herrenschmidt <benh@kernel.crashing.org>
parent 13bb5339
...@@ -11,6 +11,12 @@ ...@@ -11,6 +11,12 @@
#include <linux/cpumask.h> #include <linux/cpumask.h>
#include <linux/percpu.h> #include <linux/percpu.h>
struct vmemmap_backing {
struct vmemmap_backing *list;
unsigned long phys;
unsigned long virt_addr;
};
/* /*
* Functions that deal with pagetables that could be at any level of * Functions that deal with pagetables that could be at any level of
* the table need to be passed an "index_size" so they know how to * the table need to be passed an "index_size" so they know how to
......
...@@ -252,6 +252,47 @@ static void __meminit vmemmap_create_mapping(unsigned long start, ...@@ -252,6 +252,47 @@ static void __meminit vmemmap_create_mapping(unsigned long start,
} }
#endif /* CONFIG_PPC_BOOK3E */ #endif /* CONFIG_PPC_BOOK3E */
struct vmemmap_backing *vmemmap_list;
static __meminit struct vmemmap_backing * vmemmap_list_alloc(int node)
{
static struct vmemmap_backing *next;
static int num_left;
/* allocate a page when required and hand out chunks */
if (!next || !num_left) {
next = vmemmap_alloc_block(PAGE_SIZE, node);
if (unlikely(!next)) {
WARN_ON(1);
return NULL;
}
num_left = PAGE_SIZE / sizeof(struct vmemmap_backing);
}
num_left--;
return next++;
}
static __meminit void vmemmap_list_populate(unsigned long phys,
unsigned long start,
int node)
{
struct vmemmap_backing *vmem_back;
vmem_back = vmemmap_list_alloc(node);
if (unlikely(!vmem_back)) {
WARN_ON(1);
return;
}
vmem_back->phys = phys;
vmem_back->virt_addr = start;
vmem_back->list = vmemmap_list;
vmemmap_list = vmem_back;
}
int __meminit vmemmap_populate(struct page *start_page, int __meminit vmemmap_populate(struct page *start_page,
unsigned long nr_pages, int node) unsigned long nr_pages, int node)
{ {
...@@ -276,6 +317,8 @@ int __meminit vmemmap_populate(struct page *start_page, ...@@ -276,6 +317,8 @@ int __meminit vmemmap_populate(struct page *start_page,
if (!p) if (!p)
return -ENOMEM; return -ENOMEM;
vmemmap_list_populate(__pa(p), start, node);
pr_debug(" * %016lx..%016lx allocated at %p\n", pr_debug(" * %016lx..%016lx allocated at %p\n",
start, start + page_size, p); start, start + page_size, p);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment