Commit db06d759 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'for-4.7-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/percpu

Pull percpu fixes from Tejun Heo:
 "While adding GFP_ATOMIC support to the percpu allocator, the
  synchronization for the fast-path which doesn't require external
  allocations was separated into pcpu_lock.

  Unfortunately, it incorrectly decoupled async paths and percpu
  chunks could get destroyed while still being operated on.  This
  contains two patches to fix the bug"

* 'for-4.7-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/percpu:
  percpu: fix synchronization between synchronous map extension and chunk destruction
  percpu: fix synchronization between chunk->map_extend_work and chunk destruction
parents 35398ee3 6710e594
...@@ -112,7 +112,7 @@ struct pcpu_chunk { ...@@ -112,7 +112,7 @@ struct pcpu_chunk {
int map_used; /* # of map entries used before the sentry */ int map_used; /* # of map entries used before the sentry */
int map_alloc; /* # of map entries allocated */ int map_alloc; /* # of map entries allocated */
int *map; /* allocation map */ int *map; /* allocation map */
struct work_struct map_extend_work;/* async ->map[] extension */ struct list_head map_extend_list;/* on pcpu_map_extend_chunks */
void *data; /* chunk data */ void *data; /* chunk data */
int first_free; /* no free below this */ int first_free; /* no free below this */
...@@ -162,10 +162,13 @@ static struct pcpu_chunk *pcpu_reserved_chunk; ...@@ -162,10 +162,13 @@ static struct pcpu_chunk *pcpu_reserved_chunk;
static int pcpu_reserved_chunk_limit; static int pcpu_reserved_chunk_limit;
static DEFINE_SPINLOCK(pcpu_lock); /* all internal data structures */ static DEFINE_SPINLOCK(pcpu_lock); /* all internal data structures */
static DEFINE_MUTEX(pcpu_alloc_mutex); /* chunk create/destroy, [de]pop */ static DEFINE_MUTEX(pcpu_alloc_mutex); /* chunk create/destroy, [de]pop, map ext */
static struct list_head *pcpu_slot __read_mostly; /* chunk list slots */ static struct list_head *pcpu_slot __read_mostly; /* chunk list slots */
/* chunks which need their map areas extended, protected by pcpu_lock */
static LIST_HEAD(pcpu_map_extend_chunks);
/* /*
* The number of empty populated pages, protected by pcpu_lock. The * The number of empty populated pages, protected by pcpu_lock. The
* reserved chunk doesn't contribute to the count. * reserved chunk doesn't contribute to the count.
...@@ -395,13 +398,19 @@ static int pcpu_need_to_extend(struct pcpu_chunk *chunk, bool is_atomic) ...@@ -395,13 +398,19 @@ static int pcpu_need_to_extend(struct pcpu_chunk *chunk, bool is_atomic)
{ {
int margin, new_alloc; int margin, new_alloc;
lockdep_assert_held(&pcpu_lock);
if (is_atomic) { if (is_atomic) {
margin = 3; margin = 3;
if (chunk->map_alloc < if (chunk->map_alloc <
chunk->map_used + PCPU_ATOMIC_MAP_MARGIN_LOW && chunk->map_used + PCPU_ATOMIC_MAP_MARGIN_LOW) {
pcpu_async_enabled) if (list_empty(&chunk->map_extend_list)) {
schedule_work(&chunk->map_extend_work); list_add_tail(&chunk->map_extend_list,
&pcpu_map_extend_chunks);
pcpu_schedule_balance_work();
}
}
} else { } else {
margin = PCPU_ATOMIC_MAP_MARGIN_HIGH; margin = PCPU_ATOMIC_MAP_MARGIN_HIGH;
} }
...@@ -435,6 +444,8 @@ static int pcpu_extend_area_map(struct pcpu_chunk *chunk, int new_alloc) ...@@ -435,6 +444,8 @@ static int pcpu_extend_area_map(struct pcpu_chunk *chunk, int new_alloc)
size_t old_size = 0, new_size = new_alloc * sizeof(new[0]); size_t old_size = 0, new_size = new_alloc * sizeof(new[0]);
unsigned long flags; unsigned long flags;
lockdep_assert_held(&pcpu_alloc_mutex);
new = pcpu_mem_zalloc(new_size); new = pcpu_mem_zalloc(new_size);
if (!new) if (!new)
return -ENOMEM; return -ENOMEM;
...@@ -467,20 +478,6 @@ static int pcpu_extend_area_map(struct pcpu_chunk *chunk, int new_alloc) ...@@ -467,20 +478,6 @@ static int pcpu_extend_area_map(struct pcpu_chunk *chunk, int new_alloc)
return 0; return 0;
} }
static void pcpu_map_extend_workfn(struct work_struct *work)
{
struct pcpu_chunk *chunk = container_of(work, struct pcpu_chunk,
map_extend_work);
int new_alloc;
spin_lock_irq(&pcpu_lock);
new_alloc = pcpu_need_to_extend(chunk, false);
spin_unlock_irq(&pcpu_lock);
if (new_alloc)
pcpu_extend_area_map(chunk, new_alloc);
}
/** /**
* pcpu_fit_in_area - try to fit the requested allocation in a candidate area * pcpu_fit_in_area - try to fit the requested allocation in a candidate area
* @chunk: chunk the candidate area belongs to * @chunk: chunk the candidate area belongs to
...@@ -740,7 +737,7 @@ static struct pcpu_chunk *pcpu_alloc_chunk(void) ...@@ -740,7 +737,7 @@ static struct pcpu_chunk *pcpu_alloc_chunk(void)
chunk->map_used = 1; chunk->map_used = 1;
INIT_LIST_HEAD(&chunk->list); INIT_LIST_HEAD(&chunk->list);
INIT_WORK(&chunk->map_extend_work, pcpu_map_extend_workfn); INIT_LIST_HEAD(&chunk->map_extend_list);
chunk->free_size = pcpu_unit_size; chunk->free_size = pcpu_unit_size;
chunk->contig_hint = pcpu_unit_size; chunk->contig_hint = pcpu_unit_size;
...@@ -895,6 +892,9 @@ static void __percpu *pcpu_alloc(size_t size, size_t align, bool reserved, ...@@ -895,6 +892,9 @@ static void __percpu *pcpu_alloc(size_t size, size_t align, bool reserved,
return NULL; return NULL;
} }
if (!is_atomic)
mutex_lock(&pcpu_alloc_mutex);
spin_lock_irqsave(&pcpu_lock, flags); spin_lock_irqsave(&pcpu_lock, flags);
/* serve reserved allocations from the reserved chunk if available */ /* serve reserved allocations from the reserved chunk if available */
...@@ -967,12 +967,9 @@ static void __percpu *pcpu_alloc(size_t size, size_t align, bool reserved, ...@@ -967,12 +967,9 @@ static void __percpu *pcpu_alloc(size_t size, size_t align, bool reserved,
if (is_atomic) if (is_atomic)
goto fail; goto fail;
mutex_lock(&pcpu_alloc_mutex);
if (list_empty(&pcpu_slot[pcpu_nr_slots - 1])) { if (list_empty(&pcpu_slot[pcpu_nr_slots - 1])) {
chunk = pcpu_create_chunk(); chunk = pcpu_create_chunk();
if (!chunk) { if (!chunk) {
mutex_unlock(&pcpu_alloc_mutex);
err = "failed to allocate new chunk"; err = "failed to allocate new chunk";
goto fail; goto fail;
} }
...@@ -983,7 +980,6 @@ static void __percpu *pcpu_alloc(size_t size, size_t align, bool reserved, ...@@ -983,7 +980,6 @@ static void __percpu *pcpu_alloc(size_t size, size_t align, bool reserved,
spin_lock_irqsave(&pcpu_lock, flags); spin_lock_irqsave(&pcpu_lock, flags);
} }
mutex_unlock(&pcpu_alloc_mutex);
goto restart; goto restart;
area_found: area_found:
...@@ -993,8 +989,6 @@ static void __percpu *pcpu_alloc(size_t size, size_t align, bool reserved, ...@@ -993,8 +989,6 @@ static void __percpu *pcpu_alloc(size_t size, size_t align, bool reserved,
if (!is_atomic) { if (!is_atomic) {
int page_start, page_end, rs, re; int page_start, page_end, rs, re;
mutex_lock(&pcpu_alloc_mutex);
page_start = PFN_DOWN(off); page_start = PFN_DOWN(off);
page_end = PFN_UP(off + size); page_end = PFN_UP(off + size);
...@@ -1005,7 +999,6 @@ static void __percpu *pcpu_alloc(size_t size, size_t align, bool reserved, ...@@ -1005,7 +999,6 @@ static void __percpu *pcpu_alloc(size_t size, size_t align, bool reserved,
spin_lock_irqsave(&pcpu_lock, flags); spin_lock_irqsave(&pcpu_lock, flags);
if (ret) { if (ret) {
mutex_unlock(&pcpu_alloc_mutex);
pcpu_free_area(chunk, off, &occ_pages); pcpu_free_area(chunk, off, &occ_pages);
err = "failed to populate"; err = "failed to populate";
goto fail_unlock; goto fail_unlock;
...@@ -1045,6 +1038,8 @@ static void __percpu *pcpu_alloc(size_t size, size_t align, bool reserved, ...@@ -1045,6 +1038,8 @@ static void __percpu *pcpu_alloc(size_t size, size_t align, bool reserved,
/* see the flag handling in pcpu_blance_workfn() */ /* see the flag handling in pcpu_blance_workfn() */
pcpu_atomic_alloc_failed = true; pcpu_atomic_alloc_failed = true;
pcpu_schedule_balance_work(); pcpu_schedule_balance_work();
} else {
mutex_unlock(&pcpu_alloc_mutex);
} }
return NULL; return NULL;
} }
...@@ -1129,6 +1124,7 @@ static void pcpu_balance_workfn(struct work_struct *work) ...@@ -1129,6 +1124,7 @@ static void pcpu_balance_workfn(struct work_struct *work)
if (chunk == list_first_entry(free_head, struct pcpu_chunk, list)) if (chunk == list_first_entry(free_head, struct pcpu_chunk, list))
continue; continue;
list_del_init(&chunk->map_extend_list);
list_move(&chunk->list, &to_free); list_move(&chunk->list, &to_free);
} }
...@@ -1146,6 +1142,25 @@ static void pcpu_balance_workfn(struct work_struct *work) ...@@ -1146,6 +1142,25 @@ static void pcpu_balance_workfn(struct work_struct *work)
pcpu_destroy_chunk(chunk); pcpu_destroy_chunk(chunk);
} }
/* service chunks which requested async area map extension */
do {
int new_alloc = 0;
spin_lock_irq(&pcpu_lock);
chunk = list_first_entry_or_null(&pcpu_map_extend_chunks,
struct pcpu_chunk, map_extend_list);
if (chunk) {
list_del_init(&chunk->map_extend_list);
new_alloc = pcpu_need_to_extend(chunk, false);
}
spin_unlock_irq(&pcpu_lock);
if (new_alloc)
pcpu_extend_area_map(chunk, new_alloc);
} while (chunk);
/* /*
* Ensure there are certain number of free populated pages for * Ensure there are certain number of free populated pages for
* atomic allocs. Fill up from the most packed so that atomic * atomic allocs. Fill up from the most packed so that atomic
...@@ -1644,7 +1659,7 @@ int __init pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai, ...@@ -1644,7 +1659,7 @@ int __init pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai,
*/ */
schunk = memblock_virt_alloc(pcpu_chunk_struct_size, 0); schunk = memblock_virt_alloc(pcpu_chunk_struct_size, 0);
INIT_LIST_HEAD(&schunk->list); INIT_LIST_HEAD(&schunk->list);
INIT_WORK(&schunk->map_extend_work, pcpu_map_extend_workfn); INIT_LIST_HEAD(&schunk->map_extend_list);
schunk->base_addr = base_addr; schunk->base_addr = base_addr;
schunk->map = smap; schunk->map = smap;
schunk->map_alloc = ARRAY_SIZE(smap); schunk->map_alloc = ARRAY_SIZE(smap);
...@@ -1673,7 +1688,7 @@ int __init pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai, ...@@ -1673,7 +1688,7 @@ int __init pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai,
if (dyn_size) { if (dyn_size) {
dchunk = memblock_virt_alloc(pcpu_chunk_struct_size, 0); dchunk = memblock_virt_alloc(pcpu_chunk_struct_size, 0);
INIT_LIST_HEAD(&dchunk->list); INIT_LIST_HEAD(&dchunk->list);
INIT_WORK(&dchunk->map_extend_work, pcpu_map_extend_workfn); INIT_LIST_HEAD(&dchunk->map_extend_list);
dchunk->base_addr = base_addr; dchunk->base_addr = base_addr;
dchunk->map = dmap; dchunk->map = dmap;
dchunk->map_alloc = ARRAY_SIZE(dmap); dchunk->map_alloc = ARRAY_SIZE(dmap);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment