Commit bf6bddf1 authored by Rafael Aquini's avatar Rafael Aquini Committed by Linus Torvalds

mm: introduce compaction and migration for ballooned pages

Memory fragmentation introduced by ballooning might reduce significantly
the number of 2MB contiguous memory blocks that can be used within a guest,
thus imposing performance penalties associated with the reduced number of
transparent huge pages that could be used by the guest workload.

This patch introduces the helper functions as well as the necessary changes
to teach compaction and migration bits how to cope with pages which are
part of a guest memory balloon, in order to make them movable by memory
compaction procedures.
Signed-off-by: default avatarRafael Aquini <aquini@redhat.com>
Acked-by: default avatarMel Gorman <mel@csn.ul.ie>
Cc: Rusty Russell <rusty@rustcorp.com.au>
Cc: "Michael S. Tsirkin" <mst@redhat.com>
Cc: Rik van Riel <riel@redhat.com>
Cc: Andi Kleen <andi@firstfloor.org>
Cc: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
Cc: Minchan Kim <minchan@kernel.org>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 18468d93
...@@ -14,6 +14,7 @@ ...@@ -14,6 +14,7 @@
#include <linux/backing-dev.h> #include <linux/backing-dev.h>
#include <linux/sysctl.h> #include <linux/sysctl.h>
#include <linux/sysfs.h> #include <linux/sysfs.h>
#include <linux/balloon_compaction.h>
#include "internal.h" #include "internal.h"
#if defined CONFIG_COMPACTION || defined CONFIG_CMA #if defined CONFIG_COMPACTION || defined CONFIG_CMA
...@@ -565,9 +566,24 @@ isolate_migratepages_range(struct zone *zone, struct compact_control *cc, ...@@ -565,9 +566,24 @@ isolate_migratepages_range(struct zone *zone, struct compact_control *cc,
goto next_pageblock; goto next_pageblock;
} }
/* Check may be lockless but that's ok as we recheck later */ /*
if (!PageLRU(page)) * Check may be lockless but that's ok as we recheck later.
* It's possible to migrate LRU pages and balloon pages
* Skip any other type of page
*/
if (!PageLRU(page)) {
if (unlikely(balloon_page_movable(page))) {
if (locked && balloon_page_isolate(page)) {
/* Successfully isolated */
cc->finished_update_migrate = true;
list_add(&page->lru, migratelist);
cc->nr_migratepages++;
nr_isolated++;
goto check_compact_cluster;
}
}
continue; continue;
}
/* /*
* PageLRU is set. lru_lock normally excludes isolation * PageLRU is set. lru_lock normally excludes isolation
...@@ -621,6 +637,7 @@ isolate_migratepages_range(struct zone *zone, struct compact_control *cc, ...@@ -621,6 +637,7 @@ isolate_migratepages_range(struct zone *zone, struct compact_control *cc,
cc->nr_migratepages++; cc->nr_migratepages++;
nr_isolated++; nr_isolated++;
check_compact_cluster:
/* Avoid isolating too much */ /* Avoid isolating too much */
if (cc->nr_migratepages == COMPACT_CLUSTER_MAX) { if (cc->nr_migratepages == COMPACT_CLUSTER_MAX) {
++low_pfn; ++low_pfn;
......
...@@ -35,6 +35,7 @@ ...@@ -35,6 +35,7 @@
#include <linux/hugetlb.h> #include <linux/hugetlb.h>
#include <linux/hugetlb_cgroup.h> #include <linux/hugetlb_cgroup.h>
#include <linux/gfp.h> #include <linux/gfp.h>
#include <linux/balloon_compaction.h>
#include <asm/tlbflush.h> #include <asm/tlbflush.h>
...@@ -79,6 +80,9 @@ void putback_lru_pages(struct list_head *l) ...@@ -79,6 +80,9 @@ void putback_lru_pages(struct list_head *l)
list_del(&page->lru); list_del(&page->lru);
dec_zone_page_state(page, NR_ISOLATED_ANON + dec_zone_page_state(page, NR_ISOLATED_ANON +
page_is_file_cache(page)); page_is_file_cache(page));
if (unlikely(balloon_page_movable(page)))
balloon_page_putback(page);
else
putback_lru_page(page); putback_lru_page(page);
} }
} }
...@@ -768,6 +772,18 @@ static int __unmap_and_move(struct page *page, struct page *newpage, ...@@ -768,6 +772,18 @@ static int __unmap_and_move(struct page *page, struct page *newpage,
} }
} }
if (unlikely(balloon_page_movable(page))) {
/*
* A ballooned page does not need any special attention from
* physical to virtual reverse mapping procedures.
* Skip any attempt to unmap PTEs or to remap swap cache,
* in order to avoid burning cycles at rmap level, and perform
* the page migration right away (proteced by page lock).
*/
rc = balloon_page_migrate(newpage, page, mode);
goto uncharge;
}
/* /*
* Corner case handling: * Corner case handling:
* 1. When a new swap-cache page is read into, it is added to the LRU * 1. When a new swap-cache page is read into, it is added to the LRU
...@@ -804,7 +820,9 @@ static int __unmap_and_move(struct page *page, struct page *newpage, ...@@ -804,7 +820,9 @@ static int __unmap_and_move(struct page *page, struct page *newpage,
put_anon_vma(anon_vma); put_anon_vma(anon_vma);
uncharge: uncharge:
mem_cgroup_end_migration(mem, page, newpage, rc == MIGRATEPAGE_SUCCESS); mem_cgroup_end_migration(mem, page, newpage,
(rc == MIGRATEPAGE_SUCCESS ||
rc == MIGRATEPAGE_BALLOON_SUCCESS));
unlock: unlock:
unlock_page(page); unlock_page(page);
out: out:
...@@ -836,6 +854,18 @@ static int unmap_and_move(new_page_t get_new_page, unsigned long private, ...@@ -836,6 +854,18 @@ static int unmap_and_move(new_page_t get_new_page, unsigned long private,
goto out; goto out;
rc = __unmap_and_move(page, newpage, force, offlining, mode); rc = __unmap_and_move(page, newpage, force, offlining, mode);
if (unlikely(rc == MIGRATEPAGE_BALLOON_SUCCESS)) {
/*
* A ballooned page has been migrated already.
* Now, it's the time to wrap-up counters,
* handle the page back to Buddy and return.
*/
dec_zone_page_state(page, NR_ISOLATED_ANON +
page_is_file_cache(page));
balloon_page_free(page);
return MIGRATEPAGE_SUCCESS;
}
out: out:
if (rc != -EAGAIN) { if (rc != -EAGAIN) {
/* /*
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment