Commit 1df631ae authored by Minchan Kim's avatar Minchan Kim Committed by Linus Torvalds

mm: make rmap_walk() return void

There is no user of the return value from rmap_walk() and friends so
this patch makes them void-returning functions.

Link: http://lkml.kernel.org/r/1489555493-14659-9-git-send-email-minchan@kernel.orgSigned-off-by: default avatarMinchan Kim <minchan@kernel.org>
Cc: Anshuman Khandual <khandual@linux.vnet.ibm.com>
Cc: Hillf Danton <hillf.zj@alibaba-inc.com>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Cc: Michal Hocko <mhocko@suse.com>
Cc: Naoya Horiguchi <n-horiguchi@ah.jp.nec.com>
Cc: Vlastimil Babka <vbabka@suse.cz>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 666e5a40
...@@ -61,7 +61,7 @@ static inline void set_page_stable_node(struct page *page, ...@@ -61,7 +61,7 @@ static inline void set_page_stable_node(struct page *page,
struct page *ksm_might_need_to_copy(struct page *page, struct page *ksm_might_need_to_copy(struct page *page,
struct vm_area_struct *vma, unsigned long address); struct vm_area_struct *vma, unsigned long address);
int rmap_walk_ksm(struct page *page, struct rmap_walk_control *rwc); void rmap_walk_ksm(struct page *page, struct rmap_walk_control *rwc);
void ksm_migrate_page(struct page *newpage, struct page *oldpage); void ksm_migrate_page(struct page *newpage, struct page *oldpage);
#else /* !CONFIG_KSM */ #else /* !CONFIG_KSM */
...@@ -94,10 +94,9 @@ static inline int page_referenced_ksm(struct page *page, ...@@ -94,10 +94,9 @@ static inline int page_referenced_ksm(struct page *page,
return 0; return 0;
} }
static inline int rmap_walk_ksm(struct page *page, static inline void rmap_walk_ksm(struct page *page,
struct rmap_walk_control *rwc) struct rmap_walk_control *rwc)
{ {
return 0;
} }
static inline void ksm_migrate_page(struct page *newpage, struct page *oldpage) static inline void ksm_migrate_page(struct page *newpage, struct page *oldpage)
......
...@@ -264,8 +264,8 @@ struct rmap_walk_control { ...@@ -264,8 +264,8 @@ struct rmap_walk_control {
bool (*invalid_vma)(struct vm_area_struct *vma, void *arg); bool (*invalid_vma)(struct vm_area_struct *vma, void *arg);
}; };
int rmap_walk(struct page *page, struct rmap_walk_control *rwc); void rmap_walk(struct page *page, struct rmap_walk_control *rwc);
int rmap_walk_locked(struct page *page, struct rmap_walk_control *rwc); void rmap_walk_locked(struct page *page, struct rmap_walk_control *rwc);
#else /* !CONFIG_MMU */ #else /* !CONFIG_MMU */
......
...@@ -1933,11 +1933,10 @@ struct page *ksm_might_need_to_copy(struct page *page, ...@@ -1933,11 +1933,10 @@ struct page *ksm_might_need_to_copy(struct page *page,
return new_page; return new_page;
} }
int rmap_walk_ksm(struct page *page, struct rmap_walk_control *rwc) void rmap_walk_ksm(struct page *page, struct rmap_walk_control *rwc)
{ {
struct stable_node *stable_node; struct stable_node *stable_node;
struct rmap_item *rmap_item; struct rmap_item *rmap_item;
int ret = SWAP_AGAIN;
int search_new_forks = 0; int search_new_forks = 0;
VM_BUG_ON_PAGE(!PageKsm(page), page); VM_BUG_ON_PAGE(!PageKsm(page), page);
...@@ -1950,7 +1949,7 @@ int rmap_walk_ksm(struct page *page, struct rmap_walk_control *rwc) ...@@ -1950,7 +1949,7 @@ int rmap_walk_ksm(struct page *page, struct rmap_walk_control *rwc)
stable_node = page_stable_node(page); stable_node = page_stable_node(page);
if (!stable_node) if (!stable_node)
return ret; return;
again: again:
hlist_for_each_entry(rmap_item, &stable_node->hlist, hlist) { hlist_for_each_entry(rmap_item, &stable_node->hlist, hlist) {
struct anon_vma *anon_vma = rmap_item->anon_vma; struct anon_vma *anon_vma = rmap_item->anon_vma;
...@@ -1978,23 +1977,20 @@ int rmap_walk_ksm(struct page *page, struct rmap_walk_control *rwc) ...@@ -1978,23 +1977,20 @@ int rmap_walk_ksm(struct page *page, struct rmap_walk_control *rwc)
if (rwc->invalid_vma && rwc->invalid_vma(vma, rwc->arg)) if (rwc->invalid_vma && rwc->invalid_vma(vma, rwc->arg))
continue; continue;
ret = rwc->rmap_one(page, vma, if (SWAP_AGAIN != rwc->rmap_one(page, vma,
rmap_item->address, rwc->arg); rmap_item->address, rwc->arg)) {
if (ret != SWAP_AGAIN) {
anon_vma_unlock_read(anon_vma); anon_vma_unlock_read(anon_vma);
goto out; return;
} }
if (rwc->done && rwc->done(page)) { if (rwc->done && rwc->done(page)) {
anon_vma_unlock_read(anon_vma); anon_vma_unlock_read(anon_vma);
goto out; return;
} }
} }
anon_vma_unlock_read(anon_vma); anon_vma_unlock_read(anon_vma);
} }
if (!search_new_forks++) if (!search_new_forks++)
goto again; goto again;
out:
return ret;
} }
#ifdef CONFIG_MIGRATION #ifdef CONFIG_MIGRATION
......
...@@ -1607,13 +1607,12 @@ static struct anon_vma *rmap_walk_anon_lock(struct page *page, ...@@ -1607,13 +1607,12 @@ static struct anon_vma *rmap_walk_anon_lock(struct page *page,
* vm_flags for that VMA. That should be OK, because that vma shouldn't be * vm_flags for that VMA. That should be OK, because that vma shouldn't be
* LOCKED. * LOCKED.
*/ */
static int rmap_walk_anon(struct page *page, struct rmap_walk_control *rwc, static void rmap_walk_anon(struct page *page, struct rmap_walk_control *rwc,
bool locked) bool locked)
{ {
struct anon_vma *anon_vma; struct anon_vma *anon_vma;
pgoff_t pgoff_start, pgoff_end; pgoff_t pgoff_start, pgoff_end;
struct anon_vma_chain *avc; struct anon_vma_chain *avc;
int ret = SWAP_AGAIN;
if (locked) { if (locked) {
anon_vma = page_anon_vma(page); anon_vma = page_anon_vma(page);
...@@ -1623,7 +1622,7 @@ static int rmap_walk_anon(struct page *page, struct rmap_walk_control *rwc, ...@@ -1623,7 +1622,7 @@ static int rmap_walk_anon(struct page *page, struct rmap_walk_control *rwc,
anon_vma = rmap_walk_anon_lock(page, rwc); anon_vma = rmap_walk_anon_lock(page, rwc);
} }
if (!anon_vma) if (!anon_vma)
return ret; return;
pgoff_start = page_to_pgoff(page); pgoff_start = page_to_pgoff(page);
pgoff_end = pgoff_start + hpage_nr_pages(page) - 1; pgoff_end = pgoff_start + hpage_nr_pages(page) - 1;
...@@ -1637,8 +1636,7 @@ static int rmap_walk_anon(struct page *page, struct rmap_walk_control *rwc, ...@@ -1637,8 +1636,7 @@ static int rmap_walk_anon(struct page *page, struct rmap_walk_control *rwc,
if (rwc->invalid_vma && rwc->invalid_vma(vma, rwc->arg)) if (rwc->invalid_vma && rwc->invalid_vma(vma, rwc->arg))
continue; continue;
ret = rwc->rmap_one(page, vma, address, rwc->arg); if (SWAP_AGAIN != rwc->rmap_one(page, vma, address, rwc->arg))
if (ret != SWAP_AGAIN)
break; break;
if (rwc->done && rwc->done(page)) if (rwc->done && rwc->done(page))
break; break;
...@@ -1646,7 +1644,6 @@ static int rmap_walk_anon(struct page *page, struct rmap_walk_control *rwc, ...@@ -1646,7 +1644,6 @@ static int rmap_walk_anon(struct page *page, struct rmap_walk_control *rwc,
if (!locked) if (!locked)
anon_vma_unlock_read(anon_vma); anon_vma_unlock_read(anon_vma);
return ret;
} }
/* /*
...@@ -1662,13 +1659,12 @@ static int rmap_walk_anon(struct page *page, struct rmap_walk_control *rwc, ...@@ -1662,13 +1659,12 @@ static int rmap_walk_anon(struct page *page, struct rmap_walk_control *rwc,
* vm_flags for that VMA. That should be OK, because that vma shouldn't be * vm_flags for that VMA. That should be OK, because that vma shouldn't be
* LOCKED. * LOCKED.
*/ */
static int rmap_walk_file(struct page *page, struct rmap_walk_control *rwc, static void rmap_walk_file(struct page *page, struct rmap_walk_control *rwc,
bool locked) bool locked)
{ {
struct address_space *mapping = page_mapping(page); struct address_space *mapping = page_mapping(page);
pgoff_t pgoff_start, pgoff_end; pgoff_t pgoff_start, pgoff_end;
struct vm_area_struct *vma; struct vm_area_struct *vma;
int ret = SWAP_AGAIN;
/* /*
* The page lock not only makes sure that page->mapping cannot * The page lock not only makes sure that page->mapping cannot
...@@ -1679,7 +1675,7 @@ static int rmap_walk_file(struct page *page, struct rmap_walk_control *rwc, ...@@ -1679,7 +1675,7 @@ static int rmap_walk_file(struct page *page, struct rmap_walk_control *rwc,
VM_BUG_ON_PAGE(!PageLocked(page), page); VM_BUG_ON_PAGE(!PageLocked(page), page);
if (!mapping) if (!mapping)
return ret; return;
pgoff_start = page_to_pgoff(page); pgoff_start = page_to_pgoff(page);
pgoff_end = pgoff_start + hpage_nr_pages(page) - 1; pgoff_end = pgoff_start + hpage_nr_pages(page) - 1;
...@@ -1694,8 +1690,7 @@ static int rmap_walk_file(struct page *page, struct rmap_walk_control *rwc, ...@@ -1694,8 +1690,7 @@ static int rmap_walk_file(struct page *page, struct rmap_walk_control *rwc,
if (rwc->invalid_vma && rwc->invalid_vma(vma, rwc->arg)) if (rwc->invalid_vma && rwc->invalid_vma(vma, rwc->arg))
continue; continue;
ret = rwc->rmap_one(page, vma, address, rwc->arg); if (SWAP_AGAIN != rwc->rmap_one(page, vma, address, rwc->arg))
if (ret != SWAP_AGAIN)
goto done; goto done;
if (rwc->done && rwc->done(page)) if (rwc->done && rwc->done(page))
goto done; goto done;
...@@ -1704,28 +1699,27 @@ static int rmap_walk_file(struct page *page, struct rmap_walk_control *rwc, ...@@ -1704,28 +1699,27 @@ static int rmap_walk_file(struct page *page, struct rmap_walk_control *rwc,
done: done:
if (!locked) if (!locked)
i_mmap_unlock_read(mapping); i_mmap_unlock_read(mapping);
return ret;
} }
int rmap_walk(struct page *page, struct rmap_walk_control *rwc) void rmap_walk(struct page *page, struct rmap_walk_control *rwc)
{ {
if (unlikely(PageKsm(page))) if (unlikely(PageKsm(page)))
return rmap_walk_ksm(page, rwc); rmap_walk_ksm(page, rwc);
else if (PageAnon(page)) else if (PageAnon(page))
return rmap_walk_anon(page, rwc, false); rmap_walk_anon(page, rwc, false);
else else
return rmap_walk_file(page, rwc, false); rmap_walk_file(page, rwc, false);
} }
/* Like rmap_walk, but caller holds relevant rmap lock */ /* Like rmap_walk, but caller holds relevant rmap lock */
int rmap_walk_locked(struct page *page, struct rmap_walk_control *rwc) void rmap_walk_locked(struct page *page, struct rmap_walk_control *rwc)
{ {
/* no ksm support for now */ /* no ksm support for now */
VM_BUG_ON_PAGE(PageKsm(page), page); VM_BUG_ON_PAGE(PageKsm(page), page);
if (PageAnon(page)) if (PageAnon(page))
return rmap_walk_anon(page, rwc, true); rmap_walk_anon(page, rwc, true);
else else
return rmap_walk_file(page, rwc, true); rmap_walk_file(page, rwc, true);
} }
#ifdef CONFIG_HUGETLB_PAGE #ifdef CONFIG_HUGETLB_PAGE
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment