Commit 5fbc4616 authored by Chris Metcalf's avatar Chris Metcalf Committed by Linus Torvalds

mm: make lru_add_drain_all() selective

make lru_add_drain_all() only selectively interrupt the cpus that have
per-cpu free pages that can be drained.

This is important in nohz mode where calling mlockall(), for example,
otherwise will interrupt every core unnecessarily.

This is important on workloads where nohz cores are handling 10 Gb traffic
in userspace.  Those CPUs do not enter the kernel and place pages into LRU
pagevecs and they really, really don't want to be interrupted, or they
drop packets on the floor.
Signed-off-by: default avatarChris Metcalf <cmetcalf@tilera.com>
Reviewed-by: default avatarTejun Heo <tj@kernel.org>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 9cb2dc1c
...@@ -280,7 +280,7 @@ extern void activate_page(struct page *); ...@@ -280,7 +280,7 @@ extern void activate_page(struct page *);
extern void mark_page_accessed(struct page *); extern void mark_page_accessed(struct page *);
extern void lru_add_drain(void); extern void lru_add_drain(void);
extern void lru_add_drain_cpu(int cpu); extern void lru_add_drain_cpu(int cpu);
extern int lru_add_drain_all(void); extern void lru_add_drain_all(void);
extern void rotate_reclaimable_page(struct page *page); extern void rotate_reclaimable_page(struct page *page);
extern void deactivate_page(struct page *page); extern void deactivate_page(struct page *page);
extern void swap_setup(void); extern void swap_setup(void);
......
...@@ -432,6 +432,11 @@ static void activate_page_drain(int cpu) ...@@ -432,6 +432,11 @@ static void activate_page_drain(int cpu)
pagevec_lru_move_fn(pvec, __activate_page, NULL); pagevec_lru_move_fn(pvec, __activate_page, NULL);
} }
static bool need_activate_page_drain(int cpu)
{
return pagevec_count(&per_cpu(activate_page_pvecs, cpu)) != 0;
}
void activate_page(struct page *page) void activate_page(struct page *page)
{ {
if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) { if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) {
...@@ -449,6 +454,11 @@ static inline void activate_page_drain(int cpu) ...@@ -449,6 +454,11 @@ static inline void activate_page_drain(int cpu)
{ {
} }
static bool need_activate_page_drain(int cpu)
{
return false;
}
void activate_page(struct page *page) void activate_page(struct page *page)
{ {
struct zone *zone = page_zone(page); struct zone *zone = page_zone(page);
...@@ -701,12 +711,36 @@ static void lru_add_drain_per_cpu(struct work_struct *dummy) ...@@ -701,12 +711,36 @@ static void lru_add_drain_per_cpu(struct work_struct *dummy)
lru_add_drain(); lru_add_drain();
} }
/* static DEFINE_PER_CPU(struct work_struct, lru_add_drain_work);
* Returns 0 for success
*/ void lru_add_drain_all(void)
int lru_add_drain_all(void)
{ {
return schedule_on_each_cpu(lru_add_drain_per_cpu); static DEFINE_MUTEX(lock);
static struct cpumask has_work;
int cpu;
mutex_lock(&lock);
get_online_cpus();
cpumask_clear(&has_work);
for_each_online_cpu(cpu) {
struct work_struct *work = &per_cpu(lru_add_drain_work, cpu);
if (pagevec_count(&per_cpu(lru_add_pvec, cpu)) ||
pagevec_count(&per_cpu(lru_rotate_pvecs, cpu)) ||
pagevec_count(&per_cpu(lru_deactivate_pvecs, cpu)) ||
need_activate_page_drain(cpu)) {
INIT_WORK(work, lru_add_drain_per_cpu);
schedule_work_on(cpu, work);
cpumask_set_cpu(cpu, &has_work);
}
}
for_each_cpu(cpu, &has_work)
flush_work(&per_cpu(lru_add_drain_work, cpu));
put_online_cpus();
mutex_unlock(&lock);
} }
/* /*
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment