Commit 3f906ba2 authored by Thomas Gleixner's avatar Thomas Gleixner Committed by Linus Torvalds

mm/memory-hotplug: switch locking to a percpu rwsem

Andrey reported a potential deadlock with the memory hotplug lock and
the cpu hotplug lock.

The reason is that memory hotplug takes the memory hotplug lock and then
calls stop_machine() which calls get_online_cpus().  That's the reverse
lock order to get_online_cpus(); get_online_mems(); in mm/slub_common.c

The problem has been there forever.  The reason why this was never
reported is that the cpu hotplug locking had this homebrewn recursive
reader writer semaphore construct which due to the recursion evaded the
full lock dep coverage.  The memory hotplug code copied that construct
verbatim and therefor has similar issues.

Three steps to fix this:

1) Convert the memory hotplug locking to a per cpu rwsem so the
   potential issues get reported proper by lockdep.

2) Lock the online cpus in mem_hotplug_begin() before taking the memory
   hotplug rwsem and use stop_machine_cpuslocked() in the page_alloc
   code to avoid recursive locking.

3) The cpu hotpluck locking in #2 causes a recursive locking of the cpu
   hotplug lock via __offline_pages() -> lru_add_drain_all(). Solve this
   by invoking lru_add_drain_all_cpuslocked() instead.

Link: http://lkml.kernel.org/r/20170704093421.506836322@linutronix.deReported-by: default avatarAndrey Ryabinin <aryabinin@virtuozzo.com>
Signed-off-by: default avatarThomas Gleixner <tglx@linutronix.de>
Acked-by: default avatarMichal Hocko <mhocko@suse.com>
Acked-by: default avatarVlastimil Babka <vbabka@suse.cz>
Cc: Vladimir Davydov <vdavydov.dev@gmail.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Davidlohr Bueso <dave@stgolabs.net>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent a47fed5b
...@@ -52,32 +52,17 @@ static void generic_online_page(struct page *page); ...@@ -52,32 +52,17 @@ static void generic_online_page(struct page *page);
static online_page_callback_t online_page_callback = generic_online_page; static online_page_callback_t online_page_callback = generic_online_page;
static DEFINE_MUTEX(online_page_callback_lock); static DEFINE_MUTEX(online_page_callback_lock);
/* The same as the cpu_hotplug lock, but for memory hotplug. */ DEFINE_STATIC_PERCPU_RWSEM(mem_hotplug_lock);
static struct {
struct task_struct *active_writer;
struct mutex lock; /* Synchronizes accesses to refcount, */
/*
* Also blocks the new readers during
* an ongoing mem hotplug operation.
*/
int refcount;
#ifdef CONFIG_DEBUG_LOCK_ALLOC void get_online_mems(void)
struct lockdep_map dep_map; {
#endif percpu_down_read(&mem_hotplug_lock);
} mem_hotplug = { }
.active_writer = NULL,
.lock = __MUTEX_INITIALIZER(mem_hotplug.lock),
.refcount = 0,
#ifdef CONFIG_DEBUG_LOCK_ALLOC
.dep_map = {.name = "mem_hotplug.lock" },
#endif
};
/* Lockdep annotations for get/put_online_mems() and mem_hotplug_begin/end() */ void put_online_mems(void)
#define memhp_lock_acquire_read() lock_map_acquire_read(&mem_hotplug.dep_map) {
#define memhp_lock_acquire() lock_map_acquire(&mem_hotplug.dep_map) percpu_up_read(&mem_hotplug_lock);
#define memhp_lock_release() lock_map_release(&mem_hotplug.dep_map) }
bool movable_node_enabled = false; bool movable_node_enabled = false;
...@@ -99,60 +84,16 @@ static int __init setup_memhp_default_state(char *str) ...@@ -99,60 +84,16 @@ static int __init setup_memhp_default_state(char *str)
} }
__setup("memhp_default_state=", setup_memhp_default_state); __setup("memhp_default_state=", setup_memhp_default_state);
void get_online_mems(void)
{
might_sleep();
if (mem_hotplug.active_writer == current)
return;
memhp_lock_acquire_read();
mutex_lock(&mem_hotplug.lock);
mem_hotplug.refcount++;
mutex_unlock(&mem_hotplug.lock);
}
void put_online_mems(void)
{
if (mem_hotplug.active_writer == current)
return;
mutex_lock(&mem_hotplug.lock);
if (WARN_ON(!mem_hotplug.refcount))
mem_hotplug.refcount++; /* try to fix things up */
if (!--mem_hotplug.refcount && unlikely(mem_hotplug.active_writer))
wake_up_process(mem_hotplug.active_writer);
mutex_unlock(&mem_hotplug.lock);
memhp_lock_release();
}
/* Serializes write accesses to mem_hotplug.active_writer. */
static DEFINE_MUTEX(memory_add_remove_lock);
void mem_hotplug_begin(void) void mem_hotplug_begin(void)
{ {
mutex_lock(&memory_add_remove_lock); cpus_read_lock();
percpu_down_write(&mem_hotplug_lock);
mem_hotplug.active_writer = current;
memhp_lock_acquire();
for (;;) {
mutex_lock(&mem_hotplug.lock);
if (likely(!mem_hotplug.refcount))
break;
__set_current_state(TASK_UNINTERRUPTIBLE);
mutex_unlock(&mem_hotplug.lock);
schedule();
}
} }
void mem_hotplug_done(void) void mem_hotplug_done(void)
{ {
mem_hotplug.active_writer = NULL; percpu_up_write(&mem_hotplug_lock);
mutex_unlock(&mem_hotplug.lock); cpus_read_unlock();
memhp_lock_release();
mutex_unlock(&memory_add_remove_lock);
} }
/* add this memory to iomem resource */ /* add this memory to iomem resource */
...@@ -1725,7 +1666,7 @@ static int __ref __offline_pages(unsigned long start_pfn, ...@@ -1725,7 +1666,7 @@ static int __ref __offline_pages(unsigned long start_pfn,
goto failed_removal; goto failed_removal;
ret = 0; ret = 0;
if (drain) { if (drain) {
lru_add_drain_all(); lru_add_drain_all_cpuslocked();
cond_resched(); cond_resched();
drain_all_pages(zone); drain_all_pages(zone);
} }
...@@ -1746,7 +1687,7 @@ static int __ref __offline_pages(unsigned long start_pfn, ...@@ -1746,7 +1687,7 @@ static int __ref __offline_pages(unsigned long start_pfn,
} }
} }
/* drain all zone's lru pagevec, this is asynchronous... */ /* drain all zone's lru pagevec, this is asynchronous... */
lru_add_drain_all(); lru_add_drain_all_cpuslocked();
yield(); yield();
/* drain pcp pages, this is synchronous. */ /* drain pcp pages, this is synchronous. */
drain_all_pages(zone); drain_all_pages(zone);
......
...@@ -5278,7 +5278,7 @@ void __ref build_all_zonelists(pg_data_t *pgdat, struct zone *zone) ...@@ -5278,7 +5278,7 @@ void __ref build_all_zonelists(pg_data_t *pgdat, struct zone *zone)
#endif #endif
/* we have to stop all cpus to guarantee there is no user /* we have to stop all cpus to guarantee there is no user
of zonelist */ of zonelist */
stop_machine(__build_all_zonelists, pgdat, NULL); stop_machine_cpuslocked(__build_all_zonelists, pgdat, NULL);
/* cpuset refresh routine should be here */ /* cpuset refresh routine should be here */
} }
vm_total_pages = nr_free_pagecache_pages(); vm_total_pages = nr_free_pagecache_pages();
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment