Commit 74060e4d authored by Naoya Horiguchi's avatar Naoya Horiguchi Committed by Linus Torvalds

mm: mbind: add hugepage migration code to mbind()

Extend do_mbind() to handle vma with VM_HUGETLB set.  We will be able to
migrate hugepage with mbind(2) after applying the enablement patch which
comes later in this series.
Signed-off-by: default avatarNaoya Horiguchi <n-horiguchi@ah.jp.nec.com>
Acked-by: default avatarAndi Kleen <ak@linux.intel.com>
Reviewed-by: default avatarWanpeng Li <liwanp@linux.vnet.ibm.com>
Acked-by: default avatarHillf Danton <dhillf@gmail.com>
Cc: Mel Gorman <mgorman@suse.de>
Cc: Hugh Dickins <hughd@google.com>
Cc: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Cc: Michal Hocko <mhocko@suse.cz>
Cc: Rik van Riel <riel@redhat.com>
Cc: "Aneesh Kumar K.V" <aneesh.kumar@linux.vnet.ibm.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent e632a938
......@@ -265,6 +265,8 @@ struct huge_bootmem_page {
};
struct page *alloc_huge_page_node(struct hstate *h, int nid);
struct page *alloc_huge_page_noerr(struct vm_area_struct *vma,
unsigned long addr, int avoid_reserve);
/* arch callback */
int __init alloc_bootmem_huge_page(struct hstate *h);
......@@ -378,6 +380,7 @@ static inline pgoff_t basepage_index(struct page *page)
#else /* CONFIG_HUGETLB_PAGE */
struct hstate {};
#define alloc_huge_page_node(h, nid) NULL
#define alloc_huge_page_noerr(v, a, r) NULL
#define alloc_bootmem_huge_page(h) NULL
#define hstate_file(f) NULL
#define hstate_sizelog(s) NULL
......
......@@ -1207,6 +1207,20 @@ static struct page *alloc_huge_page(struct vm_area_struct *vma,
return page;
}
/*
* alloc_huge_page()'s wrapper which simply returns the page if allocation
* succeeds, otherwise NULL. This function is called from new_vma_page(),
* where no ERR_VALUE is expected to be returned.
*/
struct page *alloc_huge_page_noerr(struct vm_area_struct *vma,
unsigned long addr, int avoid_reserve)
{
struct page *page = alloc_huge_page(vma, addr, avoid_reserve);
if (IS_ERR(page))
page = NULL;
return page;
}
int __weak alloc_bootmem_huge_page(struct hstate *h)
{
struct huge_bootmem_page *m;
......
......@@ -1192,6 +1192,8 @@ static struct page *new_vma_page(struct page *page, unsigned long private, int *
vma = vma->vm_next;
}
if (PageHuge(page))
return alloc_huge_page_noerr(vma, address, 1);
/*
* if !vma, alloc_page_vma() will use task or system default policy
*/
......@@ -1302,7 +1304,7 @@ static long do_mbind(unsigned long start, unsigned long len,
(unsigned long)vma,
MIGRATE_SYNC, MR_MEMPOLICY_MBIND);
if (nr_failed)
putback_lru_pages(&pagelist);
putback_movable_pages(&pagelist);
}
if (nr_failed && (flags & MPOL_MF_STRICT))
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment