Commit e13861d8 authored by Minchan Kim's avatar Minchan Kim Committed by Linus Torvalds

mm: remove return value of putback_lru_pages()

putback_lru_page() never can fail.  So it doesn't matter count of "the
number of pages put back".

In addition, users of this functions don't use return value.

Let's remove unnecessary code.
Signed-off-by: default avatarMinchan Kim <minchan.kim@gmail.com>
Reviewed-by: default avatarRik van Riel <riel@redhat.com>
Reviewed-by: default avatarKOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Reviewed-by: default avatarKAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 4b50dc26
...@@ -9,7 +9,7 @@ typedef struct page *new_page_t(struct page *, unsigned long private, int **); ...@@ -9,7 +9,7 @@ typedef struct page *new_page_t(struct page *, unsigned long private, int **);
#ifdef CONFIG_MIGRATION #ifdef CONFIG_MIGRATION
#define PAGE_MIGRATION 1 #define PAGE_MIGRATION 1
extern int putback_lru_pages(struct list_head *l); extern void putback_lru_pages(struct list_head *l);
extern int migrate_page(struct address_space *, extern int migrate_page(struct address_space *,
struct page *, struct page *); struct page *, struct page *);
extern int migrate_pages(struct list_head *l, new_page_t x, extern int migrate_pages(struct list_head *l, new_page_t x,
...@@ -25,7 +25,7 @@ extern int migrate_vmas(struct mm_struct *mm, ...@@ -25,7 +25,7 @@ extern int migrate_vmas(struct mm_struct *mm,
#else #else
#define PAGE_MIGRATION 0 #define PAGE_MIGRATION 0
static inline int putback_lru_pages(struct list_head *l) { return 0; } static inline void putback_lru_pages(struct list_head *l) {}
static inline int migrate_pages(struct list_head *l, new_page_t x, static inline int migrate_pages(struct list_head *l, new_page_t x,
unsigned long private, int offlining) { return -ENOSYS; } unsigned long private, int offlining) { return -ENOSYS; }
......
...@@ -58,23 +58,18 @@ int migrate_prep(void) ...@@ -58,23 +58,18 @@ int migrate_prep(void)
/* /*
* Add isolated pages on the list back to the LRU under page lock * Add isolated pages on the list back to the LRU under page lock
* to avoid leaking evictable pages back onto unevictable list. * to avoid leaking evictable pages back onto unevictable list.
*
* returns the number of pages put back.
*/ */
int putback_lru_pages(struct list_head *l) void putback_lru_pages(struct list_head *l)
{ {
struct page *page; struct page *page;
struct page *page2; struct page *page2;
int count = 0;
list_for_each_entry_safe(page, page2, l, lru) { list_for_each_entry_safe(page, page2, l, lru) {
list_del(&page->lru); list_del(&page->lru);
dec_zone_page_state(page, NR_ISOLATED_ANON + dec_zone_page_state(page, NR_ISOLATED_ANON +
page_is_file_cache(page)); page_is_file_cache(page));
putback_lru_page(page); putback_lru_page(page);
count++;
} }
return count;
} }
/* /*
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment