Commit aaa994b3 authored by Christoph Lameter's avatar Christoph Lameter Committed by Linus Torvalds

[PATCH] page migration: handle freeing of pages in migrate_pages()

Do not leave pages on the lists passed to migrate_pages().  Seems that we will
not need any postprocessing of pages.  This will simplify the handling of
pages by the callers of migrate_pages().
Signed-off-by: default avatarChristoph Lameter <clameter@sgi.com>
Cc: Hugh Dickins <hugh@veritas.com>
Cc: Jes Sorensen <jes@trained-monkey.org>
Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Cc: Lee Schermerhorn <lee.schermerhorn@hp.com>
Cc: Andi Kleen <ak@muc.de>
Signed-off-by: default avatarAndrew Morton <akpm@osdl.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@osdl.org>
parent e24f0b8f
...@@ -8,8 +8,7 @@ extern int isolate_lru_page(struct page *p, struct list_head *pagelist); ...@@ -8,8 +8,7 @@ extern int isolate_lru_page(struct page *p, struct list_head *pagelist);
extern int putback_lru_pages(struct list_head *l); extern int putback_lru_pages(struct list_head *l);
extern int migrate_page(struct address_space *, extern int migrate_page(struct address_space *,
struct page *, struct page *); struct page *, struct page *);
extern int migrate_pages(struct list_head *l, struct list_head *t, extern int migrate_pages(struct list_head *l, struct list_head *t);
struct list_head *moved, struct list_head *failed);
extern int migrate_pages_to(struct list_head *pagelist, extern int migrate_pages_to(struct list_head *pagelist,
struct vm_area_struct *vma, int dest); struct vm_area_struct *vma, int dest);
extern int fail_migrate_page(struct address_space *, extern int fail_migrate_page(struct address_space *,
...@@ -22,8 +21,8 @@ extern int migrate_prep(void); ...@@ -22,8 +21,8 @@ extern int migrate_prep(void);
static inline int isolate_lru_page(struct page *p, struct list_head *list) static inline int isolate_lru_page(struct page *p, struct list_head *list)
{ return -ENOSYS; } { return -ENOSYS; }
static inline int putback_lru_pages(struct list_head *l) { return 0; } static inline int putback_lru_pages(struct list_head *l) { return 0; }
static inline int migrate_pages(struct list_head *l, struct list_head *t, static inline int migrate_pages(struct list_head *l, struct list_head *t)
struct list_head *moved, struct list_head *failed) { return -ENOSYS; } { return -ENOSYS; }
static inline int migrate_pages_to(struct list_head *pagelist, static inline int migrate_pages_to(struct list_head *pagelist,
struct vm_area_struct *vma, int dest) { return 0; } struct vm_area_struct *vma, int dest) { return 0; }
......
...@@ -603,11 +603,8 @@ int migrate_to_node(struct mm_struct *mm, int source, int dest, int flags) ...@@ -603,11 +603,8 @@ int migrate_to_node(struct mm_struct *mm, int source, int dest, int flags)
check_range(mm, mm->mmap->vm_start, TASK_SIZE, &nmask, check_range(mm, mm->mmap->vm_start, TASK_SIZE, &nmask,
flags | MPOL_MF_DISCONTIG_OK, &pagelist); flags | MPOL_MF_DISCONTIG_OK, &pagelist);
if (!list_empty(&pagelist)) {
err = migrate_pages_to(&pagelist, NULL, dest);
if (!list_empty(&pagelist)) if (!list_empty(&pagelist))
putback_lru_pages(&pagelist); err = migrate_pages_to(&pagelist, NULL, dest);
}
return err; return err;
} }
...@@ -773,9 +770,6 @@ long do_mbind(unsigned long start, unsigned long len, ...@@ -773,9 +770,6 @@ long do_mbind(unsigned long start, unsigned long len,
err = -EIO; err = -EIO;
} }
if (!list_empty(&pagelist))
putback_lru_pages(&pagelist);
up_write(&mm->mmap_sem); up_write(&mm->mmap_sem);
mpol_free(new); mpol_free(new);
return err; return err;
......
...@@ -624,6 +624,15 @@ static int unmap_and_move(struct page *newpage, struct page *page, int force) ...@@ -624,6 +624,15 @@ static int unmap_and_move(struct page *newpage, struct page *page, int force)
unlock_page(page); unlock_page(page);
ret: ret:
if (rc != -EAGAIN) { if (rc != -EAGAIN) {
/*
* A page that has been migrated has all references
* removed and will be freed. A page that has not been
* migrated will have kepts its references and be
* restored.
*/
list_del(&page->lru);
move_to_lru(page);
list_del(&newpage->lru); list_del(&newpage->lru);
move_to_lru(newpage); move_to_lru(newpage);
} }
...@@ -640,12 +649,12 @@ static int unmap_and_move(struct page *newpage, struct page *page, int force) ...@@ -640,12 +649,12 @@ static int unmap_and_move(struct page *newpage, struct page *page, int force)
* *
* The function returns after 10 attempts or if no pages * The function returns after 10 attempts or if no pages
* are movable anymore because to has become empty * are movable anymore because to has become empty
* or no retryable pages exist anymore. * or no retryable pages exist anymore. All pages will be
* retruned to the LRU or freed.
* *
* Return: Number of pages not migrated when "to" ran empty. * Return: Number of pages not migrated.
*/ */
int migrate_pages(struct list_head *from, struct list_head *to, int migrate_pages(struct list_head *from, struct list_head *to)
struct list_head *moved, struct list_head *failed)
{ {
int retry = 1; int retry = 1;
int nr_failed = 0; int nr_failed = 0;
...@@ -675,11 +684,9 @@ int migrate_pages(struct list_head *from, struct list_head *to, ...@@ -675,11 +684,9 @@ int migrate_pages(struct list_head *from, struct list_head *to,
retry++; retry++;
break; break;
case 0: case 0:
list_move(&page->lru, moved);
break; break;
default: default:
/* Permanent failure */ /* Permanent failure */
list_move(&page->lru, failed);
nr_failed++; nr_failed++;
break; break;
} }
...@@ -689,6 +696,7 @@ int migrate_pages(struct list_head *from, struct list_head *to, ...@@ -689,6 +696,7 @@ int migrate_pages(struct list_head *from, struct list_head *to,
if (!swapwrite) if (!swapwrite)
current->flags &= ~PF_SWAPWRITE; current->flags &= ~PF_SWAPWRITE;
putback_lru_pages(from);
return nr_failed + retry; return nr_failed + retry;
} }
...@@ -702,11 +710,10 @@ int migrate_pages_to(struct list_head *pagelist, ...@@ -702,11 +710,10 @@ int migrate_pages_to(struct list_head *pagelist,
struct vm_area_struct *vma, int dest) struct vm_area_struct *vma, int dest)
{ {
LIST_HEAD(newlist); LIST_HEAD(newlist);
LIST_HEAD(moved);
LIST_HEAD(failed);
int err = 0; int err = 0;
unsigned long offset = 0; unsigned long offset = 0;
int nr_pages; int nr_pages;
int nr_failed = 0;
struct page *page; struct page *page;
struct list_head *p; struct list_head *p;
...@@ -740,26 +747,17 @@ int migrate_pages_to(struct list_head *pagelist, ...@@ -740,26 +747,17 @@ int migrate_pages_to(struct list_head *pagelist,
if (nr_pages > MIGRATE_CHUNK_SIZE) if (nr_pages > MIGRATE_CHUNK_SIZE)
break; break;
} }
err = migrate_pages(pagelist, &newlist, &moved, &failed); err = migrate_pages(pagelist, &newlist);
putback_lru_pages(&moved); /* Call release pages instead ?? */
if (err >= 0 && list_empty(&newlist) && !list_empty(pagelist)) if (err >= 0) {
nr_failed += err;
if (list_empty(&newlist) && !list_empty(pagelist))
goto redo; goto redo;
out:
/* Return leftover allocated pages */
while (!list_empty(&newlist)) {
page = list_entry(newlist.next, struct page, lru);
list_del(&page->lru);
__free_page(page);
} }
list_splice(&failed, pagelist); out:
if (err < 0)
return err;
/* Calculate number of leftover pages */ /* Calculate number of leftover pages */
nr_pages = 0;
list_for_each(p, pagelist) list_for_each(p, pagelist)
nr_pages++; nr_failed++;
return nr_pages; return nr_failed;
} }
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment