Commit 666feb21 authored by Michal Hocko's avatar Michal Hocko Committed by Linus Torvalds

mm, migrate: remove reason argument from new_page_t

No allocation callback is using this argument anymore.  new_page_node
used to use this parameter to convey node_id resp.  migration error up
to move_pages code (do_move_page_to_node_array).  The error status never
made it into the final status field and we have a better way to
communicate node id to the status field now.  All other allocation
callbacks simply ignored the argument so we can drop it finally.

[mhocko@suse.com: fix migration callback]
  Link: http://lkml.kernel.org/r/20180105085259.GH2801@dhcp22.suse.cz
[akpm@linux-foundation.org: fix alloc_misplaced_dst_page()]
[mhocko@kernel.org: fix build]
  Link: http://lkml.kernel.org/r/20180103091134.GB11319@dhcp22.suse.cz
Link: http://lkml.kernel.org/r/20180103082555.14592-3-mhocko@kernel.orgSigned-off-by: default avatarMichal Hocko <mhocko@suse.com>
Reviewed-by: default avatarZi Yan <zi.yan@cs.rutgers.edu>
Cc: Andrea Reale <ar@linux.vnet.ibm.com>
Cc: Anshuman Khandual <khandual@linux.vnet.ibm.com>
Cc: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Cc: Mike Kravetz <mike.kravetz@oracle.com>
Cc: Naoya Horiguchi <n-horiguchi@ah.jp.nec.com>
Cc: Vlastimil Babka <vbabka@suse.cz>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent a49bd4d7
...@@ -75,8 +75,7 @@ EXPORT_SYMBOL_GPL(mm_iommu_preregistered); ...@@ -75,8 +75,7 @@ EXPORT_SYMBOL_GPL(mm_iommu_preregistered);
/* /*
* Taken from alloc_migrate_target with changes to remove CMA allocations * Taken from alloc_migrate_target with changes to remove CMA allocations
*/ */
struct page *new_iommu_non_cma_page(struct page *page, unsigned long private, struct page *new_iommu_non_cma_page(struct page *page, unsigned long private)
int **resultp)
{ {
gfp_t gfp_mask = GFP_USER; gfp_t gfp_mask = GFP_USER;
struct page *new_page; struct page *new_page;
......
...@@ -7,8 +7,7 @@ ...@@ -7,8 +7,7 @@
#include <linux/migrate_mode.h> #include <linux/migrate_mode.h>
#include <linux/hugetlb.h> #include <linux/hugetlb.h>
typedef struct page *new_page_t(struct page *page, unsigned long private, typedef struct page *new_page_t(struct page *page, unsigned long private);
int **reason);
typedef void free_page_t(struct page *page, unsigned long private); typedef void free_page_t(struct page *page, unsigned long private);
/* /*
......
...@@ -63,7 +63,6 @@ undo_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn, ...@@ -63,7 +63,6 @@ undo_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn,
int test_pages_isolated(unsigned long start_pfn, unsigned long end_pfn, int test_pages_isolated(unsigned long start_pfn, unsigned long end_pfn,
bool skip_hwpoisoned_pages); bool skip_hwpoisoned_pages);
struct page *alloc_migrate_target(struct page *page, unsigned long private, struct page *alloc_migrate_target(struct page *page, unsigned long private);
int **resultp);
#endif #endif
...@@ -1166,8 +1166,7 @@ static void isolate_freepages(struct compact_control *cc) ...@@ -1166,8 +1166,7 @@ static void isolate_freepages(struct compact_control *cc)
* from the isolated freelists in the block we are migrating to. * from the isolated freelists in the block we are migrating to.
*/ */
static struct page *compaction_alloc(struct page *migratepage, static struct page *compaction_alloc(struct page *migratepage,
unsigned long data, unsigned long data)
int **result)
{ {
struct compact_control *cc = (struct compact_control *)data; struct compact_control *cc = (struct compact_control *)data;
struct page *freepage; struct page *freepage;
......
...@@ -538,5 +538,5 @@ static inline bool is_migrate_highatomic_page(struct page *page) ...@@ -538,5 +538,5 @@ static inline bool is_migrate_highatomic_page(struct page *page)
} }
void setup_zone_pageset(struct zone *zone); void setup_zone_pageset(struct zone *zone);
extern struct page *alloc_new_node_page(struct page *page, unsigned long node, int **x); extern struct page *alloc_new_node_page(struct page *page, unsigned long node);
#endif /* __MM_INTERNAL_H */ #endif /* __MM_INTERNAL_H */
...@@ -1487,7 +1487,7 @@ int unpoison_memory(unsigned long pfn) ...@@ -1487,7 +1487,7 @@ int unpoison_memory(unsigned long pfn)
} }
EXPORT_SYMBOL(unpoison_memory); EXPORT_SYMBOL(unpoison_memory);
static struct page *new_page(struct page *p, unsigned long private, int **x) static struct page *new_page(struct page *p, unsigned long private)
{ {
int nid = page_to_nid(p); int nid = page_to_nid(p);
......
...@@ -1329,8 +1329,7 @@ static unsigned long scan_movable_pages(unsigned long start, unsigned long end) ...@@ -1329,8 +1329,7 @@ static unsigned long scan_movable_pages(unsigned long start, unsigned long end)
return 0; return 0;
} }
static struct page *new_node_page(struct page *page, unsigned long private, static struct page *new_node_page(struct page *page, unsigned long private)
int **result)
{ {
int nid = page_to_nid(page); int nid = page_to_nid(page);
nodemask_t nmask = node_states[N_MEMORY]; nodemask_t nmask = node_states[N_MEMORY];
......
...@@ -943,7 +943,7 @@ static void migrate_page_add(struct page *page, struct list_head *pagelist, ...@@ -943,7 +943,7 @@ static void migrate_page_add(struct page *page, struct list_head *pagelist,
} }
/* page allocation callback for NUMA node migration */ /* page allocation callback for NUMA node migration */
struct page *alloc_new_node_page(struct page *page, unsigned long node, int **x) struct page *alloc_new_node_page(struct page *page, unsigned long node)
{ {
if (PageHuge(page)) if (PageHuge(page))
return alloc_huge_page_node(page_hstate(compound_head(page)), return alloc_huge_page_node(page_hstate(compound_head(page)),
...@@ -1108,7 +1108,7 @@ int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from, ...@@ -1108,7 +1108,7 @@ int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from,
* list of pages handed to migrate_pages()--which is how we get here-- * list of pages handed to migrate_pages()--which is how we get here--
* is in virtual address order. * is in virtual address order.
*/ */
static struct page *new_page(struct page *page, unsigned long start, int **x) static struct page *new_page(struct page *page, unsigned long start)
{ {
struct vm_area_struct *vma; struct vm_area_struct *vma;
unsigned long uninitialized_var(address); unsigned long uninitialized_var(address);
...@@ -1153,7 +1153,7 @@ int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from, ...@@ -1153,7 +1153,7 @@ int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from,
return -ENOSYS; return -ENOSYS;
} }
static struct page *new_page(struct page *page, unsigned long start, int **x) static struct page *new_page(struct page *page, unsigned long start)
{ {
return NULL; return NULL;
} }
......
...@@ -1137,10 +1137,9 @@ static ICE_noinline int unmap_and_move(new_page_t get_new_page, ...@@ -1137,10 +1137,9 @@ static ICE_noinline int unmap_and_move(new_page_t get_new_page,
enum migrate_reason reason) enum migrate_reason reason)
{ {
int rc = MIGRATEPAGE_SUCCESS; int rc = MIGRATEPAGE_SUCCESS;
int *result = NULL;
struct page *newpage; struct page *newpage;
newpage = get_new_page(page, private, &result); newpage = get_new_page(page, private);
if (!newpage) if (!newpage)
return -ENOMEM; return -ENOMEM;
...@@ -1231,12 +1230,6 @@ static ICE_noinline int unmap_and_move(new_page_t get_new_page, ...@@ -1231,12 +1230,6 @@ static ICE_noinline int unmap_and_move(new_page_t get_new_page,
put_page(newpage); put_page(newpage);
} }
if (result) {
if (rc)
*result = rc;
else
*result = page_to_nid(newpage);
}
return rc; return rc;
} }
...@@ -1264,7 +1257,6 @@ static int unmap_and_move_huge_page(new_page_t get_new_page, ...@@ -1264,7 +1257,6 @@ static int unmap_and_move_huge_page(new_page_t get_new_page,
enum migrate_mode mode, int reason) enum migrate_mode mode, int reason)
{ {
int rc = -EAGAIN; int rc = -EAGAIN;
int *result = NULL;
int page_was_mapped = 0; int page_was_mapped = 0;
struct page *new_hpage; struct page *new_hpage;
struct anon_vma *anon_vma = NULL; struct anon_vma *anon_vma = NULL;
...@@ -1281,7 +1273,7 @@ static int unmap_and_move_huge_page(new_page_t get_new_page, ...@@ -1281,7 +1273,7 @@ static int unmap_and_move_huge_page(new_page_t get_new_page,
return -ENOSYS; return -ENOSYS;
} }
new_hpage = get_new_page(hpage, private, &result); new_hpage = get_new_page(hpage, private);
if (!new_hpage) if (!new_hpage)
return -ENOMEM; return -ENOMEM;
...@@ -1345,12 +1337,6 @@ static int unmap_and_move_huge_page(new_page_t get_new_page, ...@@ -1345,12 +1337,6 @@ static int unmap_and_move_huge_page(new_page_t get_new_page,
else else
putback_active_hugepage(new_hpage); putback_active_hugepage(new_hpage);
if (result) {
if (rc)
*result = rc;
else
*result = page_to_nid(new_hpage);
}
return rc; return rc;
} }
...@@ -1828,8 +1814,7 @@ static bool migrate_balanced_pgdat(struct pglist_data *pgdat, ...@@ -1828,8 +1814,7 @@ static bool migrate_balanced_pgdat(struct pglist_data *pgdat,
} }
static struct page *alloc_misplaced_dst_page(struct page *page, static struct page *alloc_misplaced_dst_page(struct page *page,
unsigned long data, unsigned long data)
int **result)
{ {
int nid = (int) data; int nid = (int) data;
struct page *newpage; struct page *newpage;
......
...@@ -309,8 +309,7 @@ int test_pages_isolated(unsigned long start_pfn, unsigned long end_pfn, ...@@ -309,8 +309,7 @@ int test_pages_isolated(unsigned long start_pfn, unsigned long end_pfn,
return pfn < end_pfn ? -EBUSY : 0; return pfn < end_pfn ? -EBUSY : 0;
} }
struct page *alloc_migrate_target(struct page *page, unsigned long private, struct page *alloc_migrate_target(struct page *page, unsigned long private)
int **resultp)
{ {
return new_page_nodemask(page, numa_node_id(), &node_states[N_MEMORY]); return new_page_nodemask(page, numa_node_id(), &node_states[N_MEMORY]);
} }
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment