Commit e2266705 authored by Dennis Zhou (Facebook)'s avatar Dennis Zhou (Facebook) Committed by Tejun Heo

percpu: introduce start_offset to pcpu_chunk

The reserved chunk arithmetic uses a global variable
pcpu_reserved_chunk_limit that is set in the first chunk init code to
hide a portion of the area map. The bitmap allocator to come will
eventually move the base_addr up and require both the reserved chunk
and static chunk to maintain this offset. pcpu_reserved_chunk_limit is
removed and start_offset is added.

The first chunk that is circulated and is pcpu_first_chunk serves the
dynamic region, the region following the reserved region. The reserved
chunk address check will temporarily use the first chunk to identify its
address range. A following patch will increase the base_addr and remove
this. If there is no reserved chunk, this will check the static region
and return false because those values should never be passed into the
allocator.

Lastly, when linking in the first chunk, make sure to count the right
free region for the number of empty populated pages.
Signed-off-by: default avatarDennis Zhou <dennisszhou@gmail.com>
Reviewed-by: default avatarJosef Bacik <jbacik@fb.com>
Signed-off-by: default avatarTejun Heo <tj@kernel.org>
parent fb29a2cc
...@@ -28,6 +28,9 @@ struct pcpu_chunk { ...@@ -28,6 +28,9 @@ struct pcpu_chunk {
contain reservation for static chunk. contain reservation for static chunk.
Dynamic chunk will contain reservation Dynamic chunk will contain reservation
for static and reserved chunks. */ for static and reserved chunks. */
int start_offset; /* the overlap with the previous
region to have a page aligned
base_addr */
int nr_populated; /* # of populated pages */ int nr_populated; /* # of populated pages */
unsigned long populated[]; /* populated bitmap */ unsigned long populated[]; /* populated bitmap */
}; };
......
...@@ -145,13 +145,10 @@ struct pcpu_chunk *pcpu_first_chunk __ro_after_init; ...@@ -145,13 +145,10 @@ struct pcpu_chunk *pcpu_first_chunk __ro_after_init;
/* /*
* Optional reserved chunk. This chunk reserves part of the first * Optional reserved chunk. This chunk reserves part of the first
* chunk and serves it for reserved allocations. The amount of * chunk and serves it for reserved allocations. When the reserved
* reserved offset is in pcpu_reserved_chunk_limit. When reserved * region doesn't exist, the following variable is NULL.
* area doesn't exist, the following variables contain NULL and 0
* respectively.
*/ */
struct pcpu_chunk *pcpu_reserved_chunk __ro_after_init; struct pcpu_chunk *pcpu_reserved_chunk __ro_after_init;
static int pcpu_reserved_chunk_limit __ro_after_init;
DEFINE_SPINLOCK(pcpu_lock); /* all internal data structures */ DEFINE_SPINLOCK(pcpu_lock); /* all internal data structures */
static DEFINE_MUTEX(pcpu_alloc_mutex); /* chunk create/destroy, [de]pop, map ext */ static DEFINE_MUTEX(pcpu_alloc_mutex); /* chunk create/destroy, [de]pop, map ext */
...@@ -196,7 +193,7 @@ static bool pcpu_addr_in_reserved_chunk(void *addr) ...@@ -196,7 +193,7 @@ static bool pcpu_addr_in_reserved_chunk(void *addr)
void *first_start = pcpu_first_chunk->base_addr; void *first_start = pcpu_first_chunk->base_addr;
return addr >= first_start && return addr >= first_start &&
addr < first_start + pcpu_reserved_chunk_limit; addr < first_start + pcpu_first_chunk->start_offset;
} }
static int __pcpu_size_to_slot(int size) static int __pcpu_size_to_slot(int size)
...@@ -1687,6 +1684,7 @@ int __init pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai, ...@@ -1687,6 +1684,7 @@ int __init pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai,
INIT_LIST_HEAD(&schunk->list); INIT_LIST_HEAD(&schunk->list);
INIT_LIST_HEAD(&schunk->map_extend_list); INIT_LIST_HEAD(&schunk->map_extend_list);
schunk->base_addr = base_addr; schunk->base_addr = base_addr;
schunk->start_offset = ai->static_size;
schunk->map = smap; schunk->map = smap;
schunk->map_alloc = ARRAY_SIZE(smap); schunk->map_alloc = ARRAY_SIZE(smap);
schunk->immutable = true; schunk->immutable = true;
...@@ -1696,7 +1694,6 @@ int __init pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai, ...@@ -1696,7 +1694,6 @@ int __init pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai,
if (ai->reserved_size) { if (ai->reserved_size) {
schunk->free_size = ai->reserved_size; schunk->free_size = ai->reserved_size;
pcpu_reserved_chunk = schunk; pcpu_reserved_chunk = schunk;
pcpu_reserved_chunk_limit = ai->static_size + ai->reserved_size;
} else { } else {
schunk->free_size = dyn_size; schunk->free_size = dyn_size;
dyn_size = 0; /* dynamic area covered */ dyn_size = 0; /* dynamic area covered */
...@@ -1704,7 +1701,7 @@ int __init pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai, ...@@ -1704,7 +1701,7 @@ int __init pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai,
schunk->contig_hint = schunk->free_size; schunk->contig_hint = schunk->free_size;
schunk->map[0] = 1; schunk->map[0] = 1;
schunk->map[1] = ai->static_size; schunk->map[1] = schunk->start_offset;
schunk->map[2] = (ai->static_size + schunk->free_size) | 1; schunk->map[2] = (ai->static_size + schunk->free_size) | 1;
schunk->map_used = 2; schunk->map_used = 2;
schunk->has_reserved = true; schunk->has_reserved = true;
...@@ -1715,6 +1712,7 @@ int __init pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai, ...@@ -1715,6 +1712,7 @@ int __init pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai,
INIT_LIST_HEAD(&dchunk->list); INIT_LIST_HEAD(&dchunk->list);
INIT_LIST_HEAD(&dchunk->map_extend_list); INIT_LIST_HEAD(&dchunk->map_extend_list);
dchunk->base_addr = base_addr; dchunk->base_addr = base_addr;
dchunk->start_offset = ai->static_size + ai->reserved_size;
dchunk->map = dmap; dchunk->map = dmap;
dchunk->map_alloc = ARRAY_SIZE(dmap); dchunk->map_alloc = ARRAY_SIZE(dmap);
dchunk->immutable = true; dchunk->immutable = true;
...@@ -1723,16 +1721,17 @@ int __init pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai, ...@@ -1723,16 +1721,17 @@ int __init pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai,
dchunk->contig_hint = dchunk->free_size = dyn_size; dchunk->contig_hint = dchunk->free_size = dyn_size;
dchunk->map[0] = 1; dchunk->map[0] = 1;
dchunk->map[1] = pcpu_reserved_chunk_limit; dchunk->map[1] = dchunk->start_offset;
dchunk->map[2] = (pcpu_reserved_chunk_limit + dchunk->free_size) | 1; dchunk->map[2] = (dchunk->start_offset + dchunk->free_size) | 1;
dchunk->map_used = 2; dchunk->map_used = 2;
dchunk->has_reserved = true; dchunk->has_reserved = true;
} }
/* link the first chunk in */ /* link the first chunk in */
pcpu_first_chunk = dchunk ?: schunk; pcpu_first_chunk = dchunk ?: schunk;
i = (pcpu_first_chunk->start_offset) ? 1 : 0;
pcpu_nr_empty_pop_pages += pcpu_nr_empty_pop_pages +=
pcpu_count_occupied_pages(pcpu_first_chunk, 1); pcpu_count_occupied_pages(pcpu_first_chunk, i);
pcpu_chunk_relocate(pcpu_first_chunk, -1); pcpu_chunk_relocate(pcpu_first_chunk, -1);
pcpu_stats_chunk_alloc(); pcpu_stats_chunk_alloc();
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment