Commit a47fed5b authored by Thomas Gleixner's avatar Thomas Gleixner Committed by Linus Torvalds

mm: swap: provide lru_add_drain_all_cpuslocked()

The rework of the cpu hotplug locking unearthed potential deadlocks with
the memory hotplug locking code.

The solution for these is to rework the memory hotplug locking code as
well and take the cpu hotplug lock before the memory hotplug lock in
mem_hotplug_begin(), but this will cause a recursive locking of the cpu
hotplug lock when the memory hotplug code calls lru_add_drain_all().

Split out the inner workings of lru_add_drain_all() into
lru_add_drain_all_cpuslocked() so this function can be invoked from the
memory hotplug code with the cpu hotplug lock held.

Link: http://lkml.kernel.org/r/20170704093421.419329357@linutronix.deSigned-off-by: default avatarThomas Gleixner <tglx@linutronix.de>
Reported-by: default avatarAndrey Ryabinin <aryabinin@virtuozzo.com>
Acked-by: default avatarMichal Hocko <mhocko@suse.com>
Acked-by: default avatarVlastimil Babka <vbabka@suse.cz>
Cc: Vladimir Davydov <vdavydov.dev@gmail.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Davidlohr Bueso <dave@stgolabs.net>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 24c79d8e
...@@ -277,6 +277,7 @@ extern void mark_page_accessed(struct page *); ...@@ -277,6 +277,7 @@ extern void mark_page_accessed(struct page *);
extern void lru_add_drain(void); extern void lru_add_drain(void);
extern void lru_add_drain_cpu(int cpu); extern void lru_add_drain_cpu(int cpu);
extern void lru_add_drain_all(void); extern void lru_add_drain_all(void);
extern void lru_add_drain_all_cpuslocked(void);
extern void rotate_reclaimable_page(struct page *page); extern void rotate_reclaimable_page(struct page *page);
extern void deactivate_file_page(struct page *page); extern void deactivate_file_page(struct page *page);
extern void mark_page_lazyfree(struct page *page); extern void mark_page_lazyfree(struct page *page);
......
...@@ -688,7 +688,7 @@ static void lru_add_drain_per_cpu(struct work_struct *dummy) ...@@ -688,7 +688,7 @@ static void lru_add_drain_per_cpu(struct work_struct *dummy)
static DEFINE_PER_CPU(struct work_struct, lru_add_drain_work); static DEFINE_PER_CPU(struct work_struct, lru_add_drain_work);
void lru_add_drain_all(void) void lru_add_drain_all_cpuslocked(void)
{ {
static DEFINE_MUTEX(lock); static DEFINE_MUTEX(lock);
static struct cpumask has_work; static struct cpumask has_work;
...@@ -702,7 +702,6 @@ void lru_add_drain_all(void) ...@@ -702,7 +702,6 @@ void lru_add_drain_all(void)
return; return;
mutex_lock(&lock); mutex_lock(&lock);
get_online_cpus();
cpumask_clear(&has_work); cpumask_clear(&has_work);
for_each_online_cpu(cpu) { for_each_online_cpu(cpu) {
...@@ -722,10 +721,16 @@ void lru_add_drain_all(void) ...@@ -722,10 +721,16 @@ void lru_add_drain_all(void)
for_each_cpu(cpu, &has_work) for_each_cpu(cpu, &has_work)
flush_work(&per_cpu(lru_add_drain_work, cpu)); flush_work(&per_cpu(lru_add_drain_work, cpu));
put_online_cpus();
mutex_unlock(&lock); mutex_unlock(&lock);
} }
void lru_add_drain_all(void)
{
get_online_cpus();
lru_add_drain_all_cpuslocked();
put_online_cpus();
}
/** /**
* release_pages - batched put_page() * release_pages - batched put_page()
* @pages: array of pages to release * @pages: array of pages to release
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment