Commit a15d5146 authored by Naoya Horiguchi's avatar Naoya Horiguchi Committed by Greg Kroah-Hartman

mm/hugetlb: reduce arch dependent code around follow_huge_*

commit 61f77eda upstream.

Currently we have many duplicates in definitions around
follow_huge_addr(), follow_huge_pmd(), and follow_huge_pud(), so this
patch tries to remove the m.  The basic idea is to put the default
implementation for these functions in mm/hugetlb.c as weak symbols
(regardless of CONFIG_ARCH_WANT_GENERAL_HUGETL B), and to implement
arch-specific code only when the arch needs it.

For follow_huge_addr(), only powerpc and ia64 have their own
implementation, and in all other architectures this function just returns
ERR_PTR(-EINVAL).  So this patch sets returning ERR_PTR(-EINVAL) as
default.

As for follow_huge_(pmd|pud)(), if (pmd|pud)_huge() is implemented to
always return 0 in your architecture (like in ia64 or sparc,) it's never
called (the callsite is optimized away) no matter how implemented it is.
So in such architectures, we don't need arch-specific implementation.

In some architecture (like mips, s390 and tile,) their current
arch-specific follow_huge_(pmd|pud)() are effectively identical with the
common code, so this patch lets these architecture use the common code.

One exception is metag, where pmd_huge() could return non-zero but it
expects follow_huge_pmd() to always return NULL.  This means that we need
arch-specific implementation which returns NULL.  This behavior looks
strange to me (because non-zero pmd_huge() implies that the architecture
supports PMD-based hugepage, so follow_huge_pmd() can/should return some
relevant value,) but that's beyond this cleanup patch, so let's keep it.

Justification of non-trivial changes:
- in s390, follow_huge_pmd() checks !MACHINE_HAS_HPAGE at first, and this
  patch removes the check. This is OK because we can assume MACHINE_HAS_HPAGE
  is true when follow_huge_pmd() can be called (note that pmd_huge() has
  the same check and always returns 0 for !MACHINE_HAS_HPAGE.)
- in s390 and mips, we use HPAGE_MASK instead of PMD_MASK as done in common
  code. This patch forces these archs use PMD_MASK, but it's OK because
  they are identical in both archs.
  In s390, both of HPAGE_SHIFT and PMD_SHIFT are 20.
  In mips, HPAGE_SHIFT is defined as (PAGE_SHIFT + PAGE_SHIFT - 3) and
  PMD_SHIFT is define as (PAGE_SHIFT + PAGE_SHIFT + PTE_ORDER - 3), but
  PTE_ORDER is always 0, so these are identical.

[n-horiguchi@ah.jp.nec.com: resolve conflict to apply to v3.19.1]
Signed-off-by: default avatarNaoya Horiguchi <n-horiguchi@ah.jp.nec.com>
Acked-by: default avatarHugh Dickins <hughd@google.com>
Cc: James Hogan <james.hogan@imgtec.com>
Cc: David Rientjes <rientjes@google.com>
Cc: Mel Gorman <mel@csn.ul.ie>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: Michal Hocko <mhocko@suse.cz>
Cc: Rik van Riel <riel@redhat.com>
Cc: Andrea Arcangeli <aarcange@redhat.com>
Cc: Luiz Capitulino <lcapitulino@redhat.com>
Cc: Nishanth Aravamudan <nacc@linux.vnet.ibm.com>
Cc: Lee Schermerhorn <lee.schermerhorn@hp.com>
Cc: Steve Capper <steve.capper@linaro.org>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
Signed-off-by: default avatarGreg Kroah-Hartman <gregkh@linuxfoundation.org>
parent b52d8696
...@@ -36,12 +36,6 @@ ...@@ -36,12 +36,6 @@
* of type casting from pmd_t * to pte_t *. * of type casting from pmd_t * to pte_t *.
*/ */
struct page *follow_huge_addr(struct mm_struct *mm, unsigned long address,
int write)
{
return ERR_PTR(-EINVAL);
}
int pud_huge(pud_t pud) int pud_huge(pud_t pud)
{ {
return 0; return 0;
......
...@@ -38,12 +38,6 @@ int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep) ...@@ -38,12 +38,6 @@ int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep)
} }
#endif #endif
struct page *follow_huge_addr(struct mm_struct *mm, unsigned long address,
int write)
{
return ERR_PTR(-EINVAL);
}
int pmd_huge(pmd_t pmd) int pmd_huge(pmd_t pmd)
{ {
return !(pmd_val(pmd) & PMD_TABLE_BIT); return !(pmd_val(pmd) & PMD_TABLE_BIT);
......
...@@ -114,12 +114,6 @@ int pud_huge(pud_t pud) ...@@ -114,12 +114,6 @@ int pud_huge(pud_t pud)
return 0; return 0;
} }
struct page *
follow_huge_pmd(struct mm_struct *mm, unsigned long address, pmd_t *pmd, int write)
{
return NULL;
}
void hugetlb_free_pgd_range(struct mmu_gather *tlb, void hugetlb_free_pgd_range(struct mmu_gather *tlb,
unsigned long addr, unsigned long end, unsigned long addr, unsigned long end,
unsigned long floor, unsigned long ceiling) unsigned long floor, unsigned long ceiling)
......
...@@ -94,12 +94,6 @@ int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep) ...@@ -94,12 +94,6 @@ int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep)
return 0; return 0;
} }
struct page *follow_huge_addr(struct mm_struct *mm,
unsigned long address, int write)
{
return ERR_PTR(-EINVAL);
}
int pmd_huge(pmd_t pmd) int pmd_huge(pmd_t pmd)
{ {
return pmd_page_shift(pmd) > PAGE_SHIFT; return pmd_page_shift(pmd) > PAGE_SHIFT;
......
...@@ -68,12 +68,6 @@ int is_aligned_hugepage_range(unsigned long addr, unsigned long len) ...@@ -68,12 +68,6 @@ int is_aligned_hugepage_range(unsigned long addr, unsigned long len)
return 0; return 0;
} }
struct page *
follow_huge_addr(struct mm_struct *mm, unsigned long address, int write)
{
return ERR_PTR(-EINVAL);
}
int pmd_huge(pmd_t pmd) int pmd_huge(pmd_t pmd)
{ {
return (pmd_val(pmd) & _PAGE_HUGE) != 0; return (pmd_val(pmd) & _PAGE_HUGE) != 0;
...@@ -83,15 +77,3 @@ int pud_huge(pud_t pud) ...@@ -83,15 +77,3 @@ int pud_huge(pud_t pud)
{ {
return (pud_val(pud) & _PAGE_HUGE) != 0; return (pud_val(pud) & _PAGE_HUGE) != 0;
} }
struct page *
follow_huge_pmd(struct mm_struct *mm, unsigned long address,
pmd_t *pmd, int write)
{
struct page *page;
page = pte_page(*(pte_t *)pmd);
if (page)
page += ((address & ~HPAGE_MASK) >> PAGE_SHIFT);
return page;
}
...@@ -714,6 +714,14 @@ follow_huge_pmd(struct mm_struct *mm, unsigned long address, ...@@ -714,6 +714,14 @@ follow_huge_pmd(struct mm_struct *mm, unsigned long address,
return NULL; return NULL;
} }
struct page *
follow_huge_pud(struct mm_struct *mm, unsigned long address,
pud_t *pud, int write)
{
BUG();
return NULL;
}
static unsigned long hugepte_addr_end(unsigned long addr, unsigned long end, static unsigned long hugepte_addr_end(unsigned long addr, unsigned long end,
unsigned long sz) unsigned long sz)
{ {
......
...@@ -192,12 +192,6 @@ int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep) ...@@ -192,12 +192,6 @@ int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep)
return 0; return 0;
} }
struct page *follow_huge_addr(struct mm_struct *mm, unsigned long address,
int write)
{
return ERR_PTR(-EINVAL);
}
int pmd_huge(pmd_t pmd) int pmd_huge(pmd_t pmd)
{ {
if (!MACHINE_HAS_HPAGE) if (!MACHINE_HAS_HPAGE)
...@@ -210,17 +204,3 @@ int pud_huge(pud_t pud) ...@@ -210,17 +204,3 @@ int pud_huge(pud_t pud)
{ {
return 0; return 0;
} }
struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address,
pmd_t *pmdp, int write)
{
struct page *page;
if (!MACHINE_HAS_HPAGE)
return NULL;
page = pmd_page(*pmdp);
if (page)
page += ((address & ~HPAGE_MASK) >> PAGE_SHIFT);
return page;
}
...@@ -67,12 +67,6 @@ int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep) ...@@ -67,12 +67,6 @@ int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep)
return 0; return 0;
} }
struct page *follow_huge_addr(struct mm_struct *mm,
unsigned long address, int write)
{
return ERR_PTR(-EINVAL);
}
int pmd_huge(pmd_t pmd) int pmd_huge(pmd_t pmd)
{ {
return 0; return 0;
...@@ -82,9 +76,3 @@ int pud_huge(pud_t pud) ...@@ -82,9 +76,3 @@ int pud_huge(pud_t pud)
{ {
return 0; return 0;
} }
struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address,
pmd_t *pmd, int write)
{
return NULL;
}
...@@ -215,12 +215,6 @@ pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, ...@@ -215,12 +215,6 @@ pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
return entry; return entry;
} }
struct page *follow_huge_addr(struct mm_struct *mm,
unsigned long address, int write)
{
return ERR_PTR(-EINVAL);
}
int pmd_huge(pmd_t pmd) int pmd_huge(pmd_t pmd)
{ {
return 0; return 0;
...@@ -230,9 +224,3 @@ int pud_huge(pud_t pud) ...@@ -230,9 +224,3 @@ int pud_huge(pud_t pud)
{ {
return 0; return 0;
} }
struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address,
pmd_t *pmd, int write)
{
return NULL;
}
...@@ -150,12 +150,6 @@ pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr) ...@@ -150,12 +150,6 @@ pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
return NULL; return NULL;
} }
struct page *follow_huge_addr(struct mm_struct *mm, unsigned long address,
int write)
{
return ERR_PTR(-EINVAL);
}
int pmd_huge(pmd_t pmd) int pmd_huge(pmd_t pmd)
{ {
return !!(pmd_val(pmd) & _PAGE_HUGE_PAGE); return !!(pmd_val(pmd) & _PAGE_HUGE_PAGE);
...@@ -166,28 +160,6 @@ int pud_huge(pud_t pud) ...@@ -166,28 +160,6 @@ int pud_huge(pud_t pud)
return !!(pud_val(pud) & _PAGE_HUGE_PAGE); return !!(pud_val(pud) & _PAGE_HUGE_PAGE);
} }
struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address,
pmd_t *pmd, int write)
{
struct page *page;
page = pte_page(*(pte_t *)pmd);
if (page)
page += ((address & ~PMD_MASK) >> PAGE_SHIFT);
return page;
}
struct page *follow_huge_pud(struct mm_struct *mm, unsigned long address,
pud_t *pud, int write)
{
struct page *page;
page = pte_page(*(pte_t *)pud);
if (page)
page += ((address & ~PUD_MASK) >> PAGE_SHIFT);
return page;
}
int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep) int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep)
{ {
return 0; return 0;
......
...@@ -52,20 +52,8 @@ int pud_huge(pud_t pud) ...@@ -52,20 +52,8 @@ int pud_huge(pud_t pud)
return 0; return 0;
} }
struct page *
follow_huge_pmd(struct mm_struct *mm, unsigned long address,
pmd_t *pmd, int write)
{
return NULL;
}
#else #else
struct page *
follow_huge_addr(struct mm_struct *mm, unsigned long address, int write)
{
return ERR_PTR(-EINVAL);
}
/* /*
* pmd_huge() returns 1 if @pmd is hugetlb related entry, that is normal * pmd_huge() returns 1 if @pmd is hugetlb related entry, that is normal
* hugetlb entry or non-present (migration or hwpoisoned) hugetlb entry. * hugetlb entry or non-present (migration or hwpoisoned) hugetlb entry.
......
...@@ -3700,7 +3700,20 @@ pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr) ...@@ -3700,7 +3700,20 @@ pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
return (pte_t *) pmd; return (pte_t *) pmd;
} }
struct page * #endif /* CONFIG_ARCH_WANT_GENERAL_HUGETLB */
/*
* These functions are overwritable if your architecture needs its own
* behavior.
*/
struct page * __weak
follow_huge_addr(struct mm_struct *mm, unsigned long address,
int write)
{
return ERR_PTR(-EINVAL);
}
struct page * __weak
follow_huge_pmd(struct mm_struct *mm, unsigned long address, follow_huge_pmd(struct mm_struct *mm, unsigned long address,
pmd_t *pmd, int write) pmd_t *pmd, int write)
{ {
...@@ -3714,7 +3727,7 @@ follow_huge_pmd(struct mm_struct *mm, unsigned long address, ...@@ -3714,7 +3727,7 @@ follow_huge_pmd(struct mm_struct *mm, unsigned long address,
return page; return page;
} }
struct page * struct page * __weak
follow_huge_pud(struct mm_struct *mm, unsigned long address, follow_huge_pud(struct mm_struct *mm, unsigned long address,
pud_t *pud, int write) pud_t *pud, int write)
{ {
...@@ -3726,19 +3739,6 @@ follow_huge_pud(struct mm_struct *mm, unsigned long address, ...@@ -3726,19 +3739,6 @@ follow_huge_pud(struct mm_struct *mm, unsigned long address,
return page; return page;
} }
#else /* !CONFIG_ARCH_WANT_GENERAL_HUGETLB */
/* Can be overriden by architectures */
struct page * __weak
follow_huge_pud(struct mm_struct *mm, unsigned long address,
pud_t *pud, int write)
{
BUG();
return NULL;
}
#endif /* CONFIG_ARCH_WANT_GENERAL_HUGETLB */
#ifdef CONFIG_MEMORY_FAILURE #ifdef CONFIG_MEMORY_FAILURE
/* Should be called in hugetlb_lock */ /* Should be called in hugetlb_lock */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment