Commit 9a773769 authored by Tejun Heo's avatar Tejun Heo

percpu: drop @static_size from first chunk allocators

First chunk allocators assume percpu areas have been linked using one
of PERCPU_*() macros and depend on __per_cpu_load symbol defined by
those macros, so there isn't much point in passing in static area size
explicitly when it can be easily calculated from __per_cpu_start and
__per_cpu_end.  Drop @static_size from all percpu first chunk
allocators and helpers.
Signed-off-by: default avatarTejun Heo <tj@kernel.org>
parent f58dc01b
...@@ -157,7 +157,7 @@ static int pcpu_lpage_cpu_distance(unsigned int from, unsigned int to) ...@@ -157,7 +157,7 @@ static int pcpu_lpage_cpu_distance(unsigned int from, unsigned int to)
return REMOTE_DISTANCE; return REMOTE_DISTANCE;
} }
static ssize_t __init setup_pcpu_lpage(size_t static_size, bool chosen) static ssize_t __init setup_pcpu_lpage(bool chosen)
{ {
size_t reserve = PERCPU_MODULE_RESERVE + PERCPU_DYNAMIC_RESERVE; size_t reserve = PERCPU_MODULE_RESERVE + PERCPU_DYNAMIC_RESERVE;
size_t dyn_size = reserve - PERCPU_FIRST_CHUNK_RESERVE; size_t dyn_size = reserve - PERCPU_FIRST_CHUNK_RESERVE;
...@@ -184,8 +184,7 @@ static ssize_t __init setup_pcpu_lpage(size_t static_size, bool chosen) ...@@ -184,8 +184,7 @@ static ssize_t __init setup_pcpu_lpage(size_t static_size, bool chosen)
return -ENOMEM; return -ENOMEM;
} }
ret = pcpu_lpage_build_unit_map(static_size, ret = pcpu_lpage_build_unit_map(PERCPU_FIRST_CHUNK_RESERVE,
PERCPU_FIRST_CHUNK_RESERVE,
&dyn_size, &unit_size, PMD_SIZE, &dyn_size, &unit_size, PMD_SIZE,
unit_map, pcpu_lpage_cpu_distance); unit_map, pcpu_lpage_cpu_distance);
if (ret < 0) { if (ret < 0) {
...@@ -208,9 +207,8 @@ static ssize_t __init setup_pcpu_lpage(size_t static_size, bool chosen) ...@@ -208,9 +207,8 @@ static ssize_t __init setup_pcpu_lpage(size_t static_size, bool chosen)
} }
} }
ret = pcpu_lpage_first_chunk(static_size, PERCPU_FIRST_CHUNK_RESERVE, ret = pcpu_lpage_first_chunk(PERCPU_FIRST_CHUNK_RESERVE, dyn_size,
dyn_size, unit_size, PMD_SIZE, unit_size, PMD_SIZE, unit_map, nr_units,
unit_map, nr_units,
pcpu_fc_alloc, pcpu_fc_free, pcpul_map); pcpu_fc_alloc, pcpu_fc_free, pcpul_map);
out_free: out_free:
if (ret < 0) if (ret < 0)
...@@ -218,7 +216,7 @@ static ssize_t __init setup_pcpu_lpage(size_t static_size, bool chosen) ...@@ -218,7 +216,7 @@ static ssize_t __init setup_pcpu_lpage(size_t static_size, bool chosen)
return ret; return ret;
} }
#else #else
static ssize_t __init setup_pcpu_lpage(size_t static_size, bool chosen) static ssize_t __init setup_pcpu_lpage(bool chosen)
{ {
return -EINVAL; return -EINVAL;
} }
...@@ -232,7 +230,7 @@ static ssize_t __init setup_pcpu_lpage(size_t static_size, bool chosen) ...@@ -232,7 +230,7 @@ static ssize_t __init setup_pcpu_lpage(size_t static_size, bool chosen)
* mapping so that it can use PMD mapping without additional TLB * mapping so that it can use PMD mapping without additional TLB
* pressure. * pressure.
*/ */
static ssize_t __init setup_pcpu_embed(size_t static_size, bool chosen) static ssize_t __init setup_pcpu_embed(bool chosen)
{ {
size_t reserve = PERCPU_MODULE_RESERVE + PERCPU_DYNAMIC_RESERVE; size_t reserve = PERCPU_MODULE_RESERVE + PERCPU_DYNAMIC_RESERVE;
...@@ -244,7 +242,7 @@ static ssize_t __init setup_pcpu_embed(size_t static_size, bool chosen) ...@@ -244,7 +242,7 @@ static ssize_t __init setup_pcpu_embed(size_t static_size, bool chosen)
if (!chosen && (!cpu_has_pse || pcpu_need_numa())) if (!chosen && (!cpu_has_pse || pcpu_need_numa()))
return -EINVAL; return -EINVAL;
return pcpu_embed_first_chunk(static_size, PERCPU_FIRST_CHUNK_RESERVE, return pcpu_embed_first_chunk(PERCPU_FIRST_CHUNK_RESERVE,
reserve - PERCPU_FIRST_CHUNK_RESERVE); reserve - PERCPU_FIRST_CHUNK_RESERVE);
} }
...@@ -260,9 +258,9 @@ static void __init pcpup_populate_pte(unsigned long addr) ...@@ -260,9 +258,9 @@ static void __init pcpup_populate_pte(unsigned long addr)
populate_extra_pte(addr); populate_extra_pte(addr);
} }
static ssize_t __init setup_pcpu_page(size_t static_size) static ssize_t __init setup_pcpu_page(void)
{ {
return pcpu_page_first_chunk(static_size, PERCPU_FIRST_CHUNK_RESERVE, return pcpu_page_first_chunk(PERCPU_FIRST_CHUNK_RESERVE,
pcpu_fc_alloc, pcpu_fc_free, pcpu_fc_alloc, pcpu_fc_free,
pcpup_populate_pte); pcpup_populate_pte);
} }
...@@ -282,7 +280,6 @@ static inline void setup_percpu_segment(int cpu) ...@@ -282,7 +280,6 @@ static inline void setup_percpu_segment(int cpu)
void __init setup_per_cpu_areas(void) void __init setup_per_cpu_areas(void)
{ {
size_t static_size = __per_cpu_end - __per_cpu_start;
unsigned int cpu; unsigned int cpu;
unsigned long delta; unsigned long delta;
size_t pcpu_unit_size; size_t pcpu_unit_size;
...@@ -300,9 +297,9 @@ void __init setup_per_cpu_areas(void) ...@@ -300,9 +297,9 @@ void __init setup_per_cpu_areas(void)
if (pcpu_chosen_fc != PCPU_FC_AUTO) { if (pcpu_chosen_fc != PCPU_FC_AUTO) {
if (pcpu_chosen_fc != PCPU_FC_PAGE) { if (pcpu_chosen_fc != PCPU_FC_PAGE) {
if (pcpu_chosen_fc == PCPU_FC_LPAGE) if (pcpu_chosen_fc == PCPU_FC_LPAGE)
ret = setup_pcpu_lpage(static_size, true); ret = setup_pcpu_lpage(true);
else else
ret = setup_pcpu_embed(static_size, true); ret = setup_pcpu_embed(true);
if (ret < 0) if (ret < 0)
pr_warning("PERCPU: %s allocator failed (%zd), " pr_warning("PERCPU: %s allocator failed (%zd), "
...@@ -310,15 +307,14 @@ void __init setup_per_cpu_areas(void) ...@@ -310,15 +307,14 @@ void __init setup_per_cpu_areas(void)
pcpu_fc_names[pcpu_chosen_fc], ret); pcpu_fc_names[pcpu_chosen_fc], ret);
} }
} else { } else {
ret = setup_pcpu_lpage(static_size, false); ret = setup_pcpu_lpage(false);
if (ret < 0) if (ret < 0)
ret = setup_pcpu_embed(static_size, false); ret = setup_pcpu_embed(false);
} }
if (ret < 0) if (ret < 0)
ret = setup_pcpu_page(static_size); ret = setup_pcpu_page();
if (ret < 0) if (ret < 0)
panic("cannot allocate static percpu area (%zu bytes, err=%zd)", panic("cannot initialize percpu area (err=%zd)", ret);
static_size, ret);
pcpu_unit_size = ret; pcpu_unit_size = ret;
......
...@@ -84,13 +84,12 @@ extern size_t __init pcpu_setup_first_chunk( ...@@ -84,13 +84,12 @@ extern size_t __init pcpu_setup_first_chunk(
#ifdef CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK #ifdef CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK
extern ssize_t __init pcpu_embed_first_chunk( extern ssize_t __init pcpu_embed_first_chunk(
size_t static_size, size_t reserved_size, size_t reserved_size, ssize_t dyn_size);
ssize_t dyn_size);
#endif #endif
#ifdef CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK #ifdef CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK
extern ssize_t __init pcpu_page_first_chunk( extern ssize_t __init pcpu_page_first_chunk(
size_t static_size, size_t reserved_size, size_t reserved_size,
pcpu_fc_alloc_fn_t alloc_fn, pcpu_fc_alloc_fn_t alloc_fn,
pcpu_fc_free_fn_t free_fn, pcpu_fc_free_fn_t free_fn,
pcpu_fc_populate_pte_fn_t populate_pte_fn); pcpu_fc_populate_pte_fn_t populate_pte_fn);
...@@ -98,16 +97,15 @@ extern ssize_t __init pcpu_page_first_chunk( ...@@ -98,16 +97,15 @@ extern ssize_t __init pcpu_page_first_chunk(
#ifdef CONFIG_NEED_PER_CPU_LPAGE_FIRST_CHUNK #ifdef CONFIG_NEED_PER_CPU_LPAGE_FIRST_CHUNK
extern int __init pcpu_lpage_build_unit_map( extern int __init pcpu_lpage_build_unit_map(
size_t static_size, size_t reserved_size, size_t reserved_size, ssize_t *dyn_sizep,
ssize_t *dyn_sizep, size_t *unit_sizep, size_t *unit_sizep, size_t lpage_size,
size_t lpage_size, int *unit_map, int *unit_map,
pcpu_fc_cpu_distance_fn_t cpu_distance_fn); pcpu_fc_cpu_distance_fn_t cpu_distance_fn);
extern ssize_t __init pcpu_lpage_first_chunk( extern ssize_t __init pcpu_lpage_first_chunk(
size_t static_size, size_t reserved_size, size_t reserved_size, size_t dyn_size,
size_t dyn_size, size_t unit_size, size_t unit_size, size_t lpage_size,
size_t lpage_size, const int *unit_map, const int *unit_map, int nr_units,
int nr_units,
pcpu_fc_alloc_fn_t alloc_fn, pcpu_fc_alloc_fn_t alloc_fn,
pcpu_fc_free_fn_t free_fn, pcpu_fc_free_fn_t free_fn,
pcpu_fc_map_fn_t map_fn); pcpu_fc_map_fn_t map_fn);
......
...@@ -1464,7 +1464,6 @@ static inline size_t pcpu_calc_fc_sizes(size_t static_size, ...@@ -1464,7 +1464,6 @@ static inline size_t pcpu_calc_fc_sizes(size_t static_size,
!defined(CONFIG_HAVE_SETUP_PER_CPU_AREA) !defined(CONFIG_HAVE_SETUP_PER_CPU_AREA)
/** /**
* pcpu_embed_first_chunk - embed the first percpu chunk into bootmem * pcpu_embed_first_chunk - embed the first percpu chunk into bootmem
* @static_size: the size of static percpu area in bytes
* @reserved_size: the size of reserved percpu area in bytes * @reserved_size: the size of reserved percpu area in bytes
* @dyn_size: free size for dynamic allocation in bytes, -1 for auto * @dyn_size: free size for dynamic allocation in bytes, -1 for auto
* *
...@@ -1489,9 +1488,9 @@ static inline size_t pcpu_calc_fc_sizes(size_t static_size, ...@@ -1489,9 +1488,9 @@ static inline size_t pcpu_calc_fc_sizes(size_t static_size,
* The determined pcpu_unit_size which can be used to initialize * The determined pcpu_unit_size which can be used to initialize
* percpu access on success, -errno on failure. * percpu access on success, -errno on failure.
*/ */
ssize_t __init pcpu_embed_first_chunk(size_t static_size, size_t reserved_size, ssize_t __init pcpu_embed_first_chunk(size_t reserved_size, ssize_t dyn_size)
ssize_t dyn_size)
{ {
const size_t static_size = __per_cpu_end - __per_cpu_start;
size_t size_sum, unit_size, chunk_size; size_t size_sum, unit_size, chunk_size;
void *base; void *base;
unsigned int cpu; unsigned int cpu;
...@@ -1536,7 +1535,6 @@ ssize_t __init pcpu_embed_first_chunk(size_t static_size, size_t reserved_size, ...@@ -1536,7 +1535,6 @@ ssize_t __init pcpu_embed_first_chunk(size_t static_size, size_t reserved_size,
#ifdef CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK #ifdef CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK
/** /**
* pcpu_page_first_chunk - map the first chunk using PAGE_SIZE pages * pcpu_page_first_chunk - map the first chunk using PAGE_SIZE pages
* @static_size: the size of static percpu area in bytes
* @reserved_size: the size of reserved percpu area in bytes * @reserved_size: the size of reserved percpu area in bytes
* @alloc_fn: function to allocate percpu page, always called with PAGE_SIZE * @alloc_fn: function to allocate percpu page, always called with PAGE_SIZE
* @free_fn: funtion to free percpu page, always called with PAGE_SIZE * @free_fn: funtion to free percpu page, always called with PAGE_SIZE
...@@ -1552,12 +1550,13 @@ ssize_t __init pcpu_embed_first_chunk(size_t static_size, size_t reserved_size, ...@@ -1552,12 +1550,13 @@ ssize_t __init pcpu_embed_first_chunk(size_t static_size, size_t reserved_size,
* The determined pcpu_unit_size which can be used to initialize * The determined pcpu_unit_size which can be used to initialize
* percpu access on success, -errno on failure. * percpu access on success, -errno on failure.
*/ */
ssize_t __init pcpu_page_first_chunk(size_t static_size, size_t reserved_size, ssize_t __init pcpu_page_first_chunk(size_t reserved_size,
pcpu_fc_alloc_fn_t alloc_fn, pcpu_fc_alloc_fn_t alloc_fn,
pcpu_fc_free_fn_t free_fn, pcpu_fc_free_fn_t free_fn,
pcpu_fc_populate_pte_fn_t populate_pte_fn) pcpu_fc_populate_pte_fn_t populate_pte_fn)
{ {
static struct vm_struct vm; static struct vm_struct vm;
const size_t static_size = __per_cpu_end - __per_cpu_start;
char psize_str[16]; char psize_str[16];
int unit_pages; int unit_pages;
size_t pages_size; size_t pages_size;
...@@ -1641,7 +1640,6 @@ ssize_t __init pcpu_page_first_chunk(size_t static_size, size_t reserved_size, ...@@ -1641,7 +1640,6 @@ ssize_t __init pcpu_page_first_chunk(size_t static_size, size_t reserved_size,
#ifdef CONFIG_NEED_PER_CPU_LPAGE_FIRST_CHUNK #ifdef CONFIG_NEED_PER_CPU_LPAGE_FIRST_CHUNK
/** /**
* pcpu_lpage_build_unit_map - build unit_map for large page remapping * pcpu_lpage_build_unit_map - build unit_map for large page remapping
* @static_size: the size of static percpu area in bytes
* @reserved_size: the size of reserved percpu area in bytes * @reserved_size: the size of reserved percpu area in bytes
* @dyn_sizep: in/out parameter for dynamic size, -1 for auto * @dyn_sizep: in/out parameter for dynamic size, -1 for auto
* @unit_sizep: out parameter for unit size * @unit_sizep: out parameter for unit size
...@@ -1661,13 +1659,14 @@ ssize_t __init pcpu_page_first_chunk(size_t static_size, size_t reserved_size, ...@@ -1661,13 +1659,14 @@ ssize_t __init pcpu_page_first_chunk(size_t static_size, size_t reserved_size,
* On success, fills in @unit_map, sets *@dyn_sizep, *@unit_sizep and * On success, fills in @unit_map, sets *@dyn_sizep, *@unit_sizep and
* returns the number of units to be allocated. -errno on failure. * returns the number of units to be allocated. -errno on failure.
*/ */
int __init pcpu_lpage_build_unit_map(size_t static_size, size_t reserved_size, int __init pcpu_lpage_build_unit_map(size_t reserved_size, ssize_t *dyn_sizep,
ssize_t *dyn_sizep, size_t *unit_sizep, size_t *unit_sizep, size_t lpage_size,
size_t lpage_size, int *unit_map, int *unit_map,
pcpu_fc_cpu_distance_fn_t cpu_distance_fn) pcpu_fc_cpu_distance_fn_t cpu_distance_fn)
{ {
static int group_map[NR_CPUS] __initdata; static int group_map[NR_CPUS] __initdata;
static int group_cnt[NR_CPUS] __initdata; static int group_cnt[NR_CPUS] __initdata;
const size_t static_size = __per_cpu_end - __per_cpu_start;
int group_cnt_max = 0; int group_cnt_max = 0;
size_t size_sum, min_unit_size, alloc_size; size_t size_sum, min_unit_size, alloc_size;
int upa, max_upa, uninitialized_var(best_upa); /* units_per_alloc */ int upa, max_upa, uninitialized_var(best_upa); /* units_per_alloc */
...@@ -1819,7 +1818,6 @@ static void __init pcpul_lpage_dump_cfg(const char *lvl, size_t static_size, ...@@ -1819,7 +1818,6 @@ static void __init pcpul_lpage_dump_cfg(const char *lvl, size_t static_size,
/** /**
* pcpu_lpage_first_chunk - remap the first percpu chunk using large page * pcpu_lpage_first_chunk - remap the first percpu chunk using large page
* @static_size: the size of static percpu area in bytes
* @reserved_size: the size of reserved percpu area in bytes * @reserved_size: the size of reserved percpu area in bytes
* @dyn_size: free size for dynamic allocation in bytes * @dyn_size: free size for dynamic allocation in bytes
* @unit_size: unit size in bytes * @unit_size: unit size in bytes
...@@ -1850,15 +1848,15 @@ static void __init pcpul_lpage_dump_cfg(const char *lvl, size_t static_size, ...@@ -1850,15 +1848,15 @@ static void __init pcpul_lpage_dump_cfg(const char *lvl, size_t static_size,
* The determined pcpu_unit_size which can be used to initialize * The determined pcpu_unit_size which can be used to initialize
* percpu access on success, -errno on failure. * percpu access on success, -errno on failure.
*/ */
ssize_t __init pcpu_lpage_first_chunk(size_t static_size, size_t reserved_size, ssize_t __init pcpu_lpage_first_chunk(size_t reserved_size, size_t dyn_size,
size_t dyn_size, size_t unit_size, size_t unit_size, size_t lpage_size,
size_t lpage_size, const int *unit_map, const int *unit_map, int nr_units,
int nr_units,
pcpu_fc_alloc_fn_t alloc_fn, pcpu_fc_alloc_fn_t alloc_fn,
pcpu_fc_free_fn_t free_fn, pcpu_fc_free_fn_t free_fn,
pcpu_fc_map_fn_t map_fn) pcpu_fc_map_fn_t map_fn)
{ {
static struct vm_struct vm; static struct vm_struct vm;
const size_t static_size = __per_cpu_end - __per_cpu_start;
size_t chunk_size = unit_size * nr_units; size_t chunk_size = unit_size * nr_units;
size_t map_size; size_t map_size;
unsigned int cpu; unsigned int cpu;
...@@ -2037,7 +2035,6 @@ EXPORT_SYMBOL(__per_cpu_offset); ...@@ -2037,7 +2035,6 @@ EXPORT_SYMBOL(__per_cpu_offset);
void __init setup_per_cpu_areas(void) void __init setup_per_cpu_areas(void)
{ {
size_t static_size = __per_cpu_end - __per_cpu_start;
ssize_t unit_size; ssize_t unit_size;
unsigned long delta; unsigned long delta;
unsigned int cpu; unsigned int cpu;
...@@ -2046,7 +2043,7 @@ void __init setup_per_cpu_areas(void) ...@@ -2046,7 +2043,7 @@ void __init setup_per_cpu_areas(void)
* Always reserve area for module percpu variables. That's * Always reserve area for module percpu variables. That's
* what the legacy allocator did. * what the legacy allocator did.
*/ */
unit_size = pcpu_embed_first_chunk(static_size, PERCPU_MODULE_RESERVE, unit_size = pcpu_embed_first_chunk(PERCPU_MODULE_RESERVE,
PERCPU_DYNAMIC_RESERVE); PERCPU_DYNAMIC_RESERVE);
if (unit_size < 0) if (unit_size < 0)
panic("Failed to initialized percpu areas."); panic("Failed to initialized percpu areas.");
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment