Commit 9c620e2b authored by Hugh Dickins's avatar Hugh Dickins Committed by Linus Torvalds

mm: remove offlining arg to migrate_pages

No functional change, but the only purpose of the offlining argument to
migrate_pages() etc, was to ensure that __unmap_and_move() could migrate a
KSM page for memory hotremove (which took ksm_thread_mutex) but not for
other callers.  Now all cases are safe, remove the arg.
Signed-off-by: default avatarHugh Dickins <hughd@google.com>
Cc: Rik van Riel <riel@redhat.com>
Cc: Petr Holasek <pholasek@redhat.com>
Cc: Andrea Arcangeli <aarcange@redhat.com>
Cc: Izik Eidus <izik.eidus@ravellosystems.com>
Cc: Gerald Schaefer <gerald.schaefer@de.ibm.com>
Cc: KOSAKI Motohiro <kosaki.motohiro@gmail.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent b79bc0a0
...@@ -40,11 +40,9 @@ extern void putback_movable_pages(struct list_head *l); ...@@ -40,11 +40,9 @@ extern void putback_movable_pages(struct list_head *l);
extern int migrate_page(struct address_space *, extern int migrate_page(struct address_space *,
struct page *, struct page *, enum migrate_mode); struct page *, struct page *, enum migrate_mode);
extern int migrate_pages(struct list_head *l, new_page_t x, extern int migrate_pages(struct list_head *l, new_page_t x,
unsigned long private, bool offlining, unsigned long private, enum migrate_mode mode, int reason);
enum migrate_mode mode, int reason);
extern int migrate_huge_page(struct page *, new_page_t x, extern int migrate_huge_page(struct page *, new_page_t x,
unsigned long private, bool offlining, unsigned long private, enum migrate_mode mode);
enum migrate_mode mode);
extern int fail_migrate_page(struct address_space *, extern int fail_migrate_page(struct address_space *,
struct page *, struct page *); struct page *, struct page *);
...@@ -62,11 +60,11 @@ extern int migrate_huge_page_move_mapping(struct address_space *mapping, ...@@ -62,11 +60,11 @@ extern int migrate_huge_page_move_mapping(struct address_space *mapping,
static inline void putback_lru_pages(struct list_head *l) {} static inline void putback_lru_pages(struct list_head *l) {}
static inline void putback_movable_pages(struct list_head *l) {} static inline void putback_movable_pages(struct list_head *l) {}
static inline int migrate_pages(struct list_head *l, new_page_t x, static inline int migrate_pages(struct list_head *l, new_page_t x,
unsigned long private, bool offlining, unsigned long private, enum migrate_mode mode, int reason)
enum migrate_mode mode, int reason) { return -ENOSYS; } { return -ENOSYS; }
static inline int migrate_huge_page(struct page *page, new_page_t x, static inline int migrate_huge_page(struct page *page, new_page_t x,
unsigned long private, bool offlining, unsigned long private, enum migrate_mode mode)
enum migrate_mode mode) { return -ENOSYS; } { return -ENOSYS; }
static inline int migrate_prep(void) { return -ENOSYS; } static inline int migrate_prep(void) { return -ENOSYS; }
static inline int migrate_prep_local(void) { return -ENOSYS; } static inline int migrate_prep_local(void) { return -ENOSYS; }
......
...@@ -980,7 +980,7 @@ static int compact_zone(struct zone *zone, struct compact_control *cc) ...@@ -980,7 +980,7 @@ static int compact_zone(struct zone *zone, struct compact_control *cc)
nr_migrate = cc->nr_migratepages; nr_migrate = cc->nr_migratepages;
err = migrate_pages(&cc->migratepages, compaction_alloc, err = migrate_pages(&cc->migratepages, compaction_alloc,
(unsigned long)cc, false, (unsigned long)cc,
cc->sync ? MIGRATE_SYNC_LIGHT : MIGRATE_ASYNC, cc->sync ? MIGRATE_SYNC_LIGHT : MIGRATE_ASYNC,
MR_COMPACTION); MR_COMPACTION);
update_nr_listpages(cc); update_nr_listpages(cc);
......
...@@ -1465,7 +1465,7 @@ static int soft_offline_huge_page(struct page *page, int flags) ...@@ -1465,7 +1465,7 @@ static int soft_offline_huge_page(struct page *page, int flags)
unlock_page(hpage); unlock_page(hpage);
/* Keep page count to indicate a given hugepage is isolated. */ /* Keep page count to indicate a given hugepage is isolated. */
ret = migrate_huge_page(hpage, new_page, MPOL_MF_MOVE_ALL, false, ret = migrate_huge_page(hpage, new_page, MPOL_MF_MOVE_ALL,
MIGRATE_SYNC); MIGRATE_SYNC);
put_page(hpage); put_page(hpage);
if (ret) { if (ret) {
...@@ -1597,11 +1597,10 @@ static int __soft_offline_page(struct page *page, int flags) ...@@ -1597,11 +1597,10 @@ static int __soft_offline_page(struct page *page, int flags)
if (!ret) { if (!ret) {
LIST_HEAD(pagelist); LIST_HEAD(pagelist);
inc_zone_page_state(page, NR_ISOLATED_ANON + inc_zone_page_state(page, NR_ISOLATED_ANON +
page_is_file_cache(page)); page_is_file_cache(page));
list_add(&page->lru, &pagelist); list_add(&page->lru, &pagelist);
ret = migrate_pages(&pagelist, new_page, MPOL_MF_MOVE_ALL, ret = migrate_pages(&pagelist, new_page, MPOL_MF_MOVE_ALL,
false, MIGRATE_SYNC, MIGRATE_SYNC, MR_MEMORY_FAILURE);
MR_MEMORY_FAILURE);
if (ret) { if (ret) {
putback_lru_pages(&pagelist); putback_lru_pages(&pagelist);
pr_info("soft offline: %#lx: migration failed %d, type %lx\n", pr_info("soft offline: %#lx: migration failed %d, type %lx\n",
......
...@@ -1286,8 +1286,7 @@ do_migrate_range(unsigned long start_pfn, unsigned long end_pfn) ...@@ -1286,8 +1286,7 @@ do_migrate_range(unsigned long start_pfn, unsigned long end_pfn)
* migrate_pages returns # of failed pages. * migrate_pages returns # of failed pages.
*/ */
ret = migrate_pages(&source, alloc_migrate_target, 0, ret = migrate_pages(&source, alloc_migrate_target, 0,
true, MIGRATE_SYNC, MIGRATE_SYNC, MR_MEMORY_HOTPLUG);
MR_MEMORY_HOTPLUG);
if (ret) if (ret)
putback_lru_pages(&source); putback_lru_pages(&source);
} }
......
...@@ -1014,8 +1014,7 @@ static int migrate_to_node(struct mm_struct *mm, int source, int dest, ...@@ -1014,8 +1014,7 @@ static int migrate_to_node(struct mm_struct *mm, int source, int dest,
if (!list_empty(&pagelist)) { if (!list_empty(&pagelist)) {
err = migrate_pages(&pagelist, new_node_page, dest, err = migrate_pages(&pagelist, new_node_page, dest,
false, MIGRATE_SYNC, MIGRATE_SYNC, MR_SYSCALL);
MR_SYSCALL);
if (err) if (err)
putback_lru_pages(&pagelist); putback_lru_pages(&pagelist);
} }
...@@ -1259,9 +1258,8 @@ static long do_mbind(unsigned long start, unsigned long len, ...@@ -1259,9 +1258,8 @@ static long do_mbind(unsigned long start, unsigned long len,
if (!list_empty(&pagelist)) { if (!list_empty(&pagelist)) {
WARN_ON_ONCE(flags & MPOL_MF_LAZY); WARN_ON_ONCE(flags & MPOL_MF_LAZY);
nr_failed = migrate_pages(&pagelist, new_vma_page, nr_failed = migrate_pages(&pagelist, new_vma_page,
(unsigned long)vma, (unsigned long)vma,
false, MIGRATE_SYNC, MIGRATE_SYNC, MR_MEMPOLICY_MBIND);
MR_MEMPOLICY_MBIND);
if (nr_failed) if (nr_failed)
putback_lru_pages(&pagelist); putback_lru_pages(&pagelist);
} }
......
...@@ -701,7 +701,7 @@ static int move_to_new_page(struct page *newpage, struct page *page, ...@@ -701,7 +701,7 @@ static int move_to_new_page(struct page *newpage, struct page *page,
} }
static int __unmap_and_move(struct page *page, struct page *newpage, static int __unmap_and_move(struct page *page, struct page *newpage,
int force, bool offlining, enum migrate_mode mode) int force, enum migrate_mode mode)
{ {
int rc = -EAGAIN; int rc = -EAGAIN;
int remap_swapcache = 1; int remap_swapcache = 1;
...@@ -847,8 +847,7 @@ static int __unmap_and_move(struct page *page, struct page *newpage, ...@@ -847,8 +847,7 @@ static int __unmap_and_move(struct page *page, struct page *newpage,
* to the newly allocated page in newpage. * to the newly allocated page in newpage.
*/ */
static int unmap_and_move(new_page_t get_new_page, unsigned long private, static int unmap_and_move(new_page_t get_new_page, unsigned long private,
struct page *page, int force, bool offlining, struct page *page, int force, enum migrate_mode mode)
enum migrate_mode mode)
{ {
int rc = 0; int rc = 0;
int *result = NULL; int *result = NULL;
...@@ -866,7 +865,7 @@ static int unmap_and_move(new_page_t get_new_page, unsigned long private, ...@@ -866,7 +865,7 @@ static int unmap_and_move(new_page_t get_new_page, unsigned long private,
if (unlikely(split_huge_page(page))) if (unlikely(split_huge_page(page)))
goto out; goto out;
rc = __unmap_and_move(page, newpage, force, offlining, mode); rc = __unmap_and_move(page, newpage, force, mode);
if (unlikely(rc == MIGRATEPAGE_BALLOON_SUCCESS)) { if (unlikely(rc == MIGRATEPAGE_BALLOON_SUCCESS)) {
/* /*
...@@ -926,8 +925,7 @@ static int unmap_and_move(new_page_t get_new_page, unsigned long private, ...@@ -926,8 +925,7 @@ static int unmap_and_move(new_page_t get_new_page, unsigned long private,
*/ */
static int unmap_and_move_huge_page(new_page_t get_new_page, static int unmap_and_move_huge_page(new_page_t get_new_page,
unsigned long private, struct page *hpage, unsigned long private, struct page *hpage,
int force, bool offlining, int force, enum migrate_mode mode)
enum migrate_mode mode)
{ {
int rc = 0; int rc = 0;
int *result = NULL; int *result = NULL;
...@@ -989,9 +987,8 @@ static int unmap_and_move_huge_page(new_page_t get_new_page, ...@@ -989,9 +987,8 @@ static int unmap_and_move_huge_page(new_page_t get_new_page,
* *
* Return: Number of pages not migrated or error code. * Return: Number of pages not migrated or error code.
*/ */
int migrate_pages(struct list_head *from, int migrate_pages(struct list_head *from, new_page_t get_new_page,
new_page_t get_new_page, unsigned long private, bool offlining, unsigned long private, enum migrate_mode mode, int reason)
enum migrate_mode mode, int reason)
{ {
int retry = 1; int retry = 1;
int nr_failed = 0; int nr_failed = 0;
...@@ -1012,8 +1009,7 @@ int migrate_pages(struct list_head *from, ...@@ -1012,8 +1009,7 @@ int migrate_pages(struct list_head *from,
cond_resched(); cond_resched();
rc = unmap_and_move(get_new_page, private, rc = unmap_and_move(get_new_page, private,
page, pass > 2, offlining, page, pass > 2, mode);
mode);
switch(rc) { switch(rc) {
case -ENOMEM: case -ENOMEM:
...@@ -1046,15 +1042,13 @@ int migrate_pages(struct list_head *from, ...@@ -1046,15 +1042,13 @@ int migrate_pages(struct list_head *from,
} }
int migrate_huge_page(struct page *hpage, new_page_t get_new_page, int migrate_huge_page(struct page *hpage, new_page_t get_new_page,
unsigned long private, bool offlining, unsigned long private, enum migrate_mode mode)
enum migrate_mode mode)
{ {
int pass, rc; int pass, rc;
for (pass = 0; pass < 10; pass++) { for (pass = 0; pass < 10; pass++) {
rc = unmap_and_move_huge_page(get_new_page, rc = unmap_and_move_huge_page(get_new_page, private,
private, hpage, pass > 2, offlining, hpage, pass > 2, mode);
mode);
switch (rc) { switch (rc) {
case -ENOMEM: case -ENOMEM:
goto out; goto out;
...@@ -1177,8 +1171,7 @@ static int do_move_page_to_node_array(struct mm_struct *mm, ...@@ -1177,8 +1171,7 @@ static int do_move_page_to_node_array(struct mm_struct *mm,
err = 0; err = 0;
if (!list_empty(&pagelist)) { if (!list_empty(&pagelist)) {
err = migrate_pages(&pagelist, new_page_node, err = migrate_pages(&pagelist, new_page_node,
(unsigned long)pm, 0, MIGRATE_SYNC, (unsigned long)pm, MIGRATE_SYNC, MR_SYSCALL);
MR_SYSCALL);
if (err) if (err)
putback_lru_pages(&pagelist); putback_lru_pages(&pagelist);
} }
...@@ -1613,10 +1606,8 @@ int migrate_misplaced_page(struct page *page, int node) ...@@ -1613,10 +1606,8 @@ int migrate_misplaced_page(struct page *page, int node)
goto out; goto out;
list_add(&page->lru, &migratepages); list_add(&page->lru, &migratepages);
nr_remaining = migrate_pages(&migratepages, nr_remaining = migrate_pages(&migratepages, alloc_misplaced_dst_page,
alloc_misplaced_dst_page, node, MIGRATE_ASYNC, MR_NUMA_MISPLACED);
node, false, MIGRATE_ASYNC,
MR_NUMA_MISPLACED);
if (nr_remaining) { if (nr_remaining) {
putback_lru_pages(&migratepages); putback_lru_pages(&migratepages);
isolated = 0; isolated = 0;
......
...@@ -6084,10 +6084,8 @@ static int __alloc_contig_migrate_range(struct compact_control *cc, ...@@ -6084,10 +6084,8 @@ static int __alloc_contig_migrate_range(struct compact_control *cc,
&cc->migratepages); &cc->migratepages);
cc->nr_migratepages -= nr_reclaimed; cc->nr_migratepages -= nr_reclaimed;
ret = migrate_pages(&cc->migratepages, ret = migrate_pages(&cc->migratepages, alloc_migrate_target,
alloc_migrate_target, 0, MIGRATE_SYNC, MR_CMA);
0, false, MIGRATE_SYNC,
MR_CMA);
} }
if (ret < 0) { if (ret < 0) {
putback_movable_pages(&cc->migratepages); putback_movable_pages(&cc->migratepages);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment