Commit 08fc4580 authored by Tejun Heo's avatar Tejun Heo

percpu: build first chunk allocators selectively

There's no need to build unused first chunk allocators in.  Define
CONFIG_NEED_PER_CPU_*_FIRST_CHUNK and let archs enable them
selectively.
Signed-off-by: default avatarTejun Heo <tj@kernel.org>
parent 00ae4064
......@@ -150,6 +150,16 @@ config ARCH_HAS_CACHE_LINE_SIZE
config HAVE_SETUP_PER_CPU_AREA
def_bool y
config NEED_PER_CPU_EMBED_FIRST_CHUNK
def_bool y
config NEED_PER_CPU_PAGE_FIRST_CHUNK
def_bool y
config NEED_PER_CPU_LPAGE_FIRST_CHUNK
def_bool y
depends on NEED_MULTIPLE_NODES
config HAVE_CPUMASK_OF_CPU_MAP
def_bool X86_64_SMP
......
......@@ -70,17 +70,21 @@ extern size_t __init pcpu_setup_first_chunk(
ssize_t dyn_size, size_t unit_size,
void *base_addr, const int *unit_map);
#ifdef CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK
extern ssize_t __init pcpu_embed_first_chunk(
size_t static_size, size_t reserved_size,
ssize_t dyn_size);
#endif
#ifdef CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK
extern ssize_t __init pcpu_page_first_chunk(
size_t static_size, size_t reserved_size,
pcpu_fc_alloc_fn_t alloc_fn,
pcpu_fc_free_fn_t free_fn,
pcpu_fc_populate_pte_fn_t populate_pte_fn);
#endif
#ifdef CONFIG_NEED_MULTIPLE_NODES
#ifdef CONFIG_NEED_PER_CPU_LPAGE_FIRST_CHUNK
extern int __init pcpu_lpage_build_unit_map(
size_t static_size, size_t reserved_size,
ssize_t *dyn_sizep, size_t *unit_sizep,
......@@ -98,27 +102,6 @@ extern ssize_t __init pcpu_lpage_first_chunk(
extern void *pcpu_lpage_remapped(void *kaddr);
#else
static inline int pcpu_lpage_build_unit_map(
size_t static_size, size_t reserved_size,
ssize_t *dyn_sizep, size_t *unit_sizep,
size_t lpage_size, int *unit_map,
pcpu_fc_cpu_distance_fn_t cpu_distance_fn)
{
return -EINVAL;
}
static inline ssize_t __init pcpu_lpage_first_chunk(
size_t static_size, size_t reserved_size,
size_t dyn_size, size_t unit_size,
size_t lpage_size, const int *unit_map,
int nr_units,
pcpu_fc_alloc_fn_t alloc_fn,
pcpu_fc_free_fn_t free_fn,
pcpu_fc_map_fn_t map_fn)
{
return -EINVAL;
}
static inline void *pcpu_lpage_remapped(void *kaddr)
{
return NULL;
......
......@@ -1414,8 +1414,9 @@ size_t __init pcpu_setup_first_chunk(size_t static_size, size_t reserved_size,
return pcpu_unit_size;
}
static size_t pcpu_calc_fc_sizes(size_t static_size, size_t reserved_size,
ssize_t *dyn_sizep)
static inline size_t pcpu_calc_fc_sizes(size_t static_size,
size_t reserved_size,
ssize_t *dyn_sizep)
{
size_t size_sum;
......@@ -1427,6 +1428,8 @@ static size_t pcpu_calc_fc_sizes(size_t static_size, size_t reserved_size,
return size_sum;
}
#if defined(CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK) || \
!defined(CONFIG_HAVE_SETUP_PER_CPU_AREA)
/**
* pcpu_embed_first_chunk - embed the first percpu chunk into bootmem
* @static_size: the size of static percpu area in bytes
......@@ -1495,7 +1498,10 @@ ssize_t __init pcpu_embed_first_chunk(size_t static_size, size_t reserved_size,
return pcpu_setup_first_chunk(static_size, reserved_size, dyn_size,
unit_size, base, NULL);
}
#endif /* CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK ||
!CONFIG_HAVE_SETUP_PER_CPU_AREA */
#ifdef CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK
/**
* pcpu_page_first_chunk - map the first chunk using PAGE_SIZE pages
* @static_size: the size of static percpu area in bytes
......@@ -1598,12 +1604,9 @@ ssize_t __init pcpu_page_first_chunk(size_t static_size, size_t reserved_size,
free_bootmem(__pa(pages), pages_size);
return ret;
}
#endif /* CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK */
/*
* Large page remapping first chunk setup helper
*/
#ifdef CONFIG_NEED_MULTIPLE_NODES
#ifdef CONFIG_NEED_PER_CPU_LPAGE_FIRST_CHUNK
/**
* pcpu_lpage_build_unit_map - build unit_map for large page remapping
* @static_size: the size of static percpu area in bytes
......@@ -1982,7 +1985,7 @@ void *pcpu_lpage_remapped(void *kaddr)
return NULL;
}
#endif
#endif /* CONFIG_NEED_PER_CPU_LPAGE_FIRST_CHUNK */
/*
* Generic percpu area setup.
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment