Commit c177c81e authored by Naoya Horiguchi's avatar Naoya Horiguchi Committed by Linus Torvalds

hugetlb: restrict hugepage_migration_support() to x86_64

Currently hugepage migration is available for all archs which support
pmd-level hugepage, but testing is done only for x86_64 and there're
bugs for other archs.  So to avoid breaking such archs, this patch
limits the availability strictly to x86_64 until developers of other
archs get interested in enabling this feature.

Simply disabling hugepage migration on non-x86_64 archs is not enough to
fix the reported problem where sys_move_pages() hits the BUG_ON() in
follow_page(FOLL_GET), so let's fix this by checking if hugepage
migration is supported in vma_migratable().
Signed-off-by: default avatarNaoya Horiguchi <n-horiguchi@ah.jp.nec.com>
Reported-by: default avatarMichael Ellerman <mpe@ellerman.id.au>
Tested-by: default avatarMichael Ellerman <mpe@ellerman.id.au>
Acked-by: default avatarHugh Dickins <hughd@google.com>
Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Cc: Tony Luck <tony.luck@intel.com>
Cc: Russell King <rmk@arm.linux.org.uk>
Cc: Martin Schwidefsky <schwidefsky@de.ibm.com>
Cc: James Hogan <james.hogan@imgtec.com>
Cc: Ralf Baechle <ralf@linux-mips.org>
Cc: David Miller <davem@davemloft.net>
Cc: <stable@vger.kernel.org>	[3.12+]
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 7f39dda9
...@@ -56,8 +56,3 @@ int pmd_huge(pmd_t pmd) ...@@ -56,8 +56,3 @@ int pmd_huge(pmd_t pmd)
{ {
return pmd_val(pmd) && !(pmd_val(pmd) & PMD_TABLE_BIT); return pmd_val(pmd) && !(pmd_val(pmd) & PMD_TABLE_BIT);
} }
int pmd_huge_support(void)
{
return 1;
}
...@@ -58,11 +58,6 @@ int pud_huge(pud_t pud) ...@@ -58,11 +58,6 @@ int pud_huge(pud_t pud)
#endif #endif
} }
int pmd_huge_support(void)
{
return 1;
}
static __init int setup_hugepagesz(char *opt) static __init int setup_hugepagesz(char *opt)
{ {
unsigned long ps = memparse(opt, &opt); unsigned long ps = memparse(opt, &opt);
......
...@@ -114,11 +114,6 @@ int pud_huge(pud_t pud) ...@@ -114,11 +114,6 @@ int pud_huge(pud_t pud)
return 0; return 0;
} }
int pmd_huge_support(void)
{
return 0;
}
struct page * struct page *
follow_huge_pmd(struct mm_struct *mm, unsigned long address, pmd_t *pmd, int write) follow_huge_pmd(struct mm_struct *mm, unsigned long address, pmd_t *pmd, int write)
{ {
......
...@@ -110,11 +110,6 @@ int pud_huge(pud_t pud) ...@@ -110,11 +110,6 @@ int pud_huge(pud_t pud)
return 0; return 0;
} }
int pmd_huge_support(void)
{
return 1;
}
struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address, struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address,
pmd_t *pmd, int write) pmd_t *pmd, int write)
{ {
......
...@@ -84,11 +84,6 @@ int pud_huge(pud_t pud) ...@@ -84,11 +84,6 @@ int pud_huge(pud_t pud)
return (pud_val(pud) & _PAGE_HUGE) != 0; return (pud_val(pud) & _PAGE_HUGE) != 0;
} }
int pmd_huge_support(void)
{
return 1;
}
struct page * struct page *
follow_huge_pmd(struct mm_struct *mm, unsigned long address, follow_huge_pmd(struct mm_struct *mm, unsigned long address,
pmd_t *pmd, int write) pmd_t *pmd, int write)
......
...@@ -86,11 +86,6 @@ int pgd_huge(pgd_t pgd) ...@@ -86,11 +86,6 @@ int pgd_huge(pgd_t pgd)
*/ */
return ((pgd_val(pgd) & 0x3) != 0x0); return ((pgd_val(pgd) & 0x3) != 0x0);
} }
int pmd_huge_support(void)
{
return 1;
}
#else #else
int pmd_huge(pmd_t pmd) int pmd_huge(pmd_t pmd)
{ {
...@@ -106,11 +101,6 @@ int pgd_huge(pgd_t pgd) ...@@ -106,11 +101,6 @@ int pgd_huge(pgd_t pgd)
{ {
return 0; return 0;
} }
int pmd_huge_support(void)
{
return 0;
}
#endif #endif
pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr) pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
......
...@@ -220,11 +220,6 @@ int pud_huge(pud_t pud) ...@@ -220,11 +220,6 @@ int pud_huge(pud_t pud)
return 0; return 0;
} }
int pmd_huge_support(void)
{
return 1;
}
struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address, struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address,
pmd_t *pmdp, int write) pmd_t *pmdp, int write)
{ {
......
...@@ -83,11 +83,6 @@ int pud_huge(pud_t pud) ...@@ -83,11 +83,6 @@ int pud_huge(pud_t pud)
return 0; return 0;
} }
int pmd_huge_support(void)
{
return 0;
}
struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address, struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address,
pmd_t *pmd, int write) pmd_t *pmd, int write)
{ {
......
...@@ -231,11 +231,6 @@ int pud_huge(pud_t pud) ...@@ -231,11 +231,6 @@ int pud_huge(pud_t pud)
return 0; return 0;
} }
int pmd_huge_support(void)
{
return 0;
}
struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address, struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address,
pmd_t *pmd, int write) pmd_t *pmd, int write)
{ {
......
...@@ -166,11 +166,6 @@ int pud_huge(pud_t pud) ...@@ -166,11 +166,6 @@ int pud_huge(pud_t pud)
return !!(pud_val(pud) & _PAGE_HUGE_PAGE); return !!(pud_val(pud) & _PAGE_HUGE_PAGE);
} }
int pmd_huge_support(void)
{
return 1;
}
struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address, struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address,
pmd_t *pmd, int write) pmd_t *pmd, int write)
{ {
......
...@@ -1873,6 +1873,10 @@ config ARCH_ENABLE_SPLIT_PMD_PTLOCK ...@@ -1873,6 +1873,10 @@ config ARCH_ENABLE_SPLIT_PMD_PTLOCK
def_bool y def_bool y
depends on X86_64 || X86_PAE depends on X86_64 || X86_PAE
config ARCH_ENABLE_HUGEPAGE_MIGRATION
def_bool y
depends on X86_64 && HUGETLB_PAGE && MIGRATION
menu "Power management and ACPI options" menu "Power management and ACPI options"
config ARCH_HIBERNATION_HEADER config ARCH_HIBERNATION_HEADER
......
...@@ -58,11 +58,6 @@ follow_huge_pmd(struct mm_struct *mm, unsigned long address, ...@@ -58,11 +58,6 @@ follow_huge_pmd(struct mm_struct *mm, unsigned long address,
{ {
return NULL; return NULL;
} }
int pmd_huge_support(void)
{
return 0;
}
#else #else
struct page * struct page *
...@@ -80,11 +75,6 @@ int pud_huge(pud_t pud) ...@@ -80,11 +75,6 @@ int pud_huge(pud_t pud)
{ {
return !!(pud_val(pud) & _PAGE_PSE); return !!(pud_val(pud) & _PAGE_PSE);
} }
int pmd_huge_support(void)
{
return 1;
}
#endif #endif
#ifdef CONFIG_HUGETLB_PAGE #ifdef CONFIG_HUGETLB_PAGE
......
...@@ -392,15 +392,13 @@ static inline pgoff_t basepage_index(struct page *page) ...@@ -392,15 +392,13 @@ static inline pgoff_t basepage_index(struct page *page)
extern void dissolve_free_huge_pages(unsigned long start_pfn, extern void dissolve_free_huge_pages(unsigned long start_pfn,
unsigned long end_pfn); unsigned long end_pfn);
int pmd_huge_support(void);
/*
* Currently hugepage migration is enabled only for pmd-based hugepage.
* This function will be updated when hugepage migration is more widely
* supported.
*/
static inline int hugepage_migration_support(struct hstate *h) static inline int hugepage_migration_support(struct hstate *h)
{ {
return pmd_huge_support() && (huge_page_shift(h) == PMD_SHIFT); #ifdef CONFIG_ARCH_ENABLE_HUGEPAGE_MIGRATION
return huge_page_shift(h) == PMD_SHIFT;
#else
return 0;
#endif
} }
static inline spinlock_t *huge_pte_lockptr(struct hstate *h, static inline spinlock_t *huge_pte_lockptr(struct hstate *h,
...@@ -450,7 +448,6 @@ static inline pgoff_t basepage_index(struct page *page) ...@@ -450,7 +448,6 @@ static inline pgoff_t basepage_index(struct page *page)
return page->index; return page->index;
} }
#define dissolve_free_huge_pages(s, e) do {} while (0) #define dissolve_free_huge_pages(s, e) do {} while (0)
#define pmd_huge_support() 0
#define hugepage_migration_support(h) 0 #define hugepage_migration_support(h) 0
static inline spinlock_t *huge_pte_lockptr(struct hstate *h, static inline spinlock_t *huge_pte_lockptr(struct hstate *h,
......
...@@ -175,6 +175,12 @@ static inline int vma_migratable(struct vm_area_struct *vma) ...@@ -175,6 +175,12 @@ static inline int vma_migratable(struct vm_area_struct *vma)
{ {
if (vma->vm_flags & (VM_IO | VM_PFNMAP)) if (vma->vm_flags & (VM_IO | VM_PFNMAP))
return 0; return 0;
#ifndef CONFIG_ARCH_ENABLE_HUGEPAGE_MIGRATION
if (vma->vm_flags & VM_HUGETLB)
return 0;
#endif
/* /*
* Migration allocates pages in the highest zone. If we cannot * Migration allocates pages in the highest zone. If we cannot
* do so then migration (at least from node to node) is not * do so then migration (at least from node to node) is not
......
...@@ -267,6 +267,9 @@ config MIGRATION ...@@ -267,6 +267,9 @@ config MIGRATION
pages as migration can relocate pages to satisfy a huge page pages as migration can relocate pages to satisfy a huge page
allocation instead of reclaiming. allocation instead of reclaiming.
config ARCH_ENABLE_HUGEPAGE_MIGRATION
boolean
config PHYS_ADDR_T_64BIT config PHYS_ADDR_T_64BIT
def_bool 64BIT || ARCH_PHYS_ADDR_T_64BIT def_bool 64BIT || ARCH_PHYS_ADDR_T_64BIT
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment