Commit edcb4639 authored by Tejun Heo's avatar Tejun Heo

percpu, module: implement reserved allocation and use it for module percpu variables

Impact: add reserved allocation functionality and use it for module
	percpu variables

This patch implements reserved allocation from the first chunk.  When
setting up the first chunk, arch can ask to set aside certain number
of bytes right after the core static area which is available only
through a separate reserved allocator.  This will be used primarily
for module static percpu variables on architectures with limited
relocation range to ensure that the module perpcu symbols are inside
the relocatable range.

If reserved area is requested, the first chunk becomes reserved and
isn't available for regular allocation.  If the first chunk also
includes piggy-back dynamic allocation area, a separate chunk mapping
the same region is created to serve dynamic allocation.  The first one
is called static first chunk and the second dynamic first chunk.
Although they share the page map, their different area map
initializations guarantee they serve disjoint areas according to their
purposes.

If arch doesn't setup reserved area, reserved allocation is handled
like any other allocation.
Signed-off-by: default avatarTejun Heo <tj@kernel.org>
parent 3e24aa58
...@@ -217,7 +217,7 @@ static ssize_t __init setup_pcpu_remap(size_t static_size) ...@@ -217,7 +217,7 @@ static ssize_t __init setup_pcpu_remap(size_t static_size)
pr_info("PERCPU: Remapped at %p with large pages, static data " pr_info("PERCPU: Remapped at %p with large pages, static data "
"%zu bytes\n", vm.addr, static_size); "%zu bytes\n", vm.addr, static_size);
ret = pcpu_setup_first_chunk(pcpur_get_page, static_size, PMD_SIZE, ret = pcpu_setup_first_chunk(pcpur_get_page, static_size, 0, PMD_SIZE,
pcpur_size - static_size, vm.addr, NULL); pcpur_size - static_size, vm.addr, NULL);
goto out_free_ar; goto out_free_ar;
...@@ -297,7 +297,7 @@ static ssize_t __init setup_pcpu_embed(size_t static_size) ...@@ -297,7 +297,7 @@ static ssize_t __init setup_pcpu_embed(size_t static_size)
pr_info("PERCPU: Embedded %zu pages at %p, static data %zu bytes\n", pr_info("PERCPU: Embedded %zu pages at %p, static data %zu bytes\n",
pcpue_size >> PAGE_SHIFT, pcpue_ptr, static_size); pcpue_size >> PAGE_SHIFT, pcpue_ptr, static_size);
return pcpu_setup_first_chunk(pcpue_get_page, static_size, return pcpu_setup_first_chunk(pcpue_get_page, static_size, 0,
pcpue_unit_size, dyn_size, pcpue_unit_size, dyn_size,
pcpue_ptr, NULL); pcpue_ptr, NULL);
} }
...@@ -356,8 +356,8 @@ static ssize_t __init setup_pcpu_4k(size_t static_size) ...@@ -356,8 +356,8 @@ static ssize_t __init setup_pcpu_4k(size_t static_size)
pr_info("PERCPU: Allocated %d 4k pages, static data %zu bytes\n", pr_info("PERCPU: Allocated %d 4k pages, static data %zu bytes\n",
pcpu4k_nr_static_pages, static_size); pcpu4k_nr_static_pages, static_size);
ret = pcpu_setup_first_chunk(pcpu4k_get_page, static_size, -1, -1, NULL, ret = pcpu_setup_first_chunk(pcpu4k_get_page, static_size, 0, -1, -1,
pcpu4k_populate_pte); NULL, pcpu4k_populate_pte);
goto out_free_ar; goto out_free_ar;
enomem: enomem:
......
...@@ -117,7 +117,7 @@ typedef struct page * (*pcpu_get_page_fn_t)(unsigned int cpu, int pageno); ...@@ -117,7 +117,7 @@ typedef struct page * (*pcpu_get_page_fn_t)(unsigned int cpu, int pageno);
typedef void (*pcpu_populate_pte_fn_t)(unsigned long addr); typedef void (*pcpu_populate_pte_fn_t)(unsigned long addr);
extern size_t __init pcpu_setup_first_chunk(pcpu_get_page_fn_t get_page_fn, extern size_t __init pcpu_setup_first_chunk(pcpu_get_page_fn_t get_page_fn,
size_t static_size, size_t static_size, size_t reserved_size,
ssize_t unit_size, ssize_t dyn_size, ssize_t unit_size, ssize_t dyn_size,
void *base_addr, void *base_addr,
pcpu_populate_pte_fn_t populate_pte_fn); pcpu_populate_pte_fn_t populate_pte_fn);
...@@ -129,6 +129,8 @@ extern size_t __init pcpu_setup_first_chunk(pcpu_get_page_fn_t get_page_fn, ...@@ -129,6 +129,8 @@ extern size_t __init pcpu_setup_first_chunk(pcpu_get_page_fn_t get_page_fn,
*/ */
#define per_cpu_ptr(ptr, cpu) SHIFT_PERCPU_PTR((ptr), per_cpu_offset((cpu))) #define per_cpu_ptr(ptr, cpu) SHIFT_PERCPU_PTR((ptr), per_cpu_offset((cpu)))
extern void *__alloc_reserved_percpu(size_t size, size_t align);
#else /* CONFIG_HAVE_DYNAMIC_PER_CPU_AREA */ #else /* CONFIG_HAVE_DYNAMIC_PER_CPU_AREA */
struct percpu_data { struct percpu_data {
......
...@@ -381,7 +381,7 @@ static void *percpu_modalloc(unsigned long size, unsigned long align, ...@@ -381,7 +381,7 @@ static void *percpu_modalloc(unsigned long size, unsigned long align,
align = PAGE_SIZE; align = PAGE_SIZE;
} }
ptr = __alloc_percpu(size, align); ptr = __alloc_reserved_percpu(size, align);
if (!ptr) if (!ptr)
printk(KERN_WARNING printk(KERN_WARNING
"Could not allocate %lu bytes percpu data\n", size); "Could not allocate %lu bytes percpu data\n", size);
......
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment