Commit ab57bd75 authored by Christophe Leroy's avatar Christophe Leroy Committed by Michael Ellerman

powerpc/mm: Move get_unmapped_area functions to slice.c

hugetlb_get_unmapped_area() is now identical to the
generic version if only RADIX is enabled, so move it
to slice.c and let it fallback on the generic one
when HASH MMU is not compiled in.

Do the same with arch_get_unmapped_area() and
arch_get_unmapped_area_topdown().
Signed-off-by: default avatarChristophe Leroy <christophe.leroy@csgroup.eu>
Signed-off-by: default avatarMichael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/b5d9c124e82889e0cb115c150915a0c0d84eb960.1649523076.git.christophe.leroy@csgroup.eu
parent 1a0261fd
...@@ -4,12 +4,6 @@ ...@@ -4,12 +4,6 @@
#include <asm/page.h> #include <asm/page.h>
#ifdef CONFIG_HUGETLB_PAGE
#define HAVE_ARCH_HUGETLB_UNMAPPED_AREA
#endif
#define HAVE_ARCH_UNMAPPED_AREA
#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
#ifndef __ASSEMBLY__ #ifndef __ASSEMBLY__
/* /*
* Page size definition * Page size definition
......
...@@ -4,6 +4,12 @@ ...@@ -4,6 +4,12 @@
#ifndef __ASSEMBLY__ #ifndef __ASSEMBLY__
#ifdef CONFIG_HUGETLB_PAGE
#define HAVE_ARCH_HUGETLB_UNMAPPED_AREA
#endif
#define HAVE_ARCH_UNMAPPED_AREA
#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
#define SLICE_LOW_SHIFT 28 #define SLICE_LOW_SHIFT 28
#define SLICE_LOW_TOP (0x100000000ul) #define SLICE_LOW_TOP (0x100000000ul)
#define SLICE_NUM_LOW (SLICE_LOW_TOP >> SLICE_LOW_SHIFT) #define SLICE_NUM_LOW (SLICE_LOW_TOP >> SLICE_LOW_SHIFT)
......
...@@ -639,6 +639,32 @@ unsigned long slice_get_unmapped_area(unsigned long addr, unsigned long len, ...@@ -639,6 +639,32 @@ unsigned long slice_get_unmapped_area(unsigned long addr, unsigned long len,
} }
EXPORT_SYMBOL_GPL(slice_get_unmapped_area); EXPORT_SYMBOL_GPL(slice_get_unmapped_area);
unsigned long arch_get_unmapped_area(struct file *filp,
unsigned long addr,
unsigned long len,
unsigned long pgoff,
unsigned long flags)
{
if (radix_enabled())
return generic_get_unmapped_area(filp, addr, len, pgoff, flags);
return slice_get_unmapped_area(addr, len, flags,
mm_ctx_user_psize(&current->mm->context), 0);
}
unsigned long arch_get_unmapped_area_topdown(struct file *filp,
const unsigned long addr0,
const unsigned long len,
const unsigned long pgoff,
const unsigned long flags)
{
if (radix_enabled())
return generic_get_unmapped_area_topdown(filp, addr0, len, pgoff, flags);
return slice_get_unmapped_area(addr0, len, flags,
mm_ctx_user_psize(&current->mm->context), 1);
}
unsigned int notrace get_slice_psize(struct mm_struct *mm, unsigned long addr) unsigned int notrace get_slice_psize(struct mm_struct *mm, unsigned long addr)
{ {
unsigned char *psizes; unsigned char *psizes;
...@@ -766,4 +792,20 @@ unsigned long vma_mmu_pagesize(struct vm_area_struct *vma) ...@@ -766,4 +792,20 @@ unsigned long vma_mmu_pagesize(struct vm_area_struct *vma)
return 1UL << mmu_psize_to_shift(get_slice_psize(vma->vm_mm, vma->vm_start)); return 1UL << mmu_psize_to_shift(get_slice_psize(vma->vm_mm, vma->vm_start));
} }
static int file_to_psize(struct file *file)
{
struct hstate *hstate = hstate_file(file);
return shift_to_mmu_psize(huge_page_shift(hstate));
}
unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
unsigned long len, unsigned long pgoff,
unsigned long flags)
{
if (radix_enabled())
return generic_hugetlb_get_unmapped_area(file, addr, len, pgoff, flags);
return slice_get_unmapped_area(addr, len, flags, file_to_psize(file), 1);
}
#endif #endif
...@@ -542,27 +542,6 @@ struct page *follow_huge_pd(struct vm_area_struct *vma, ...@@ -542,27 +542,6 @@ struct page *follow_huge_pd(struct vm_area_struct *vma,
return page; return page;
} }
#ifdef HAVE_ARCH_HUGETLB_UNMAPPED_AREA
static inline int file_to_psize(struct file *file)
{
struct hstate *hstate = hstate_file(file);
return shift_to_mmu_psize(huge_page_shift(hstate));
}
unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
unsigned long len, unsigned long pgoff,
unsigned long flags)
{
if (radix_enabled())
return generic_hugetlb_get_unmapped_area(file, addr, len,
pgoff, flags);
#ifdef CONFIG_PPC_64S_HASH_MMU
return slice_get_unmapped_area(addr, len, flags, file_to_psize(file), 1);
#endif
BUG();
}
#endif
bool __init arch_hugetlb_valid_size(unsigned long size) bool __init arch_hugetlb_valid_size(unsigned long size)
{ {
int shift = __ffs(size); int shift = __ffs(size);
......
...@@ -80,42 +80,6 @@ static inline unsigned long mmap_base(unsigned long rnd, ...@@ -80,42 +80,6 @@ static inline unsigned long mmap_base(unsigned long rnd,
return PAGE_ALIGN(DEFAULT_MAP_WINDOW - gap - rnd); return PAGE_ALIGN(DEFAULT_MAP_WINDOW - gap - rnd);
} }
#ifdef HAVE_ARCH_UNMAPPED_AREA
unsigned long arch_get_unmapped_area(struct file *filp,
unsigned long addr,
unsigned long len,
unsigned long pgoff,
unsigned long flags)
{
if (radix_enabled())
return generic_get_unmapped_area(filp, addr, len, pgoff, flags);
#ifdef CONFIG_PPC_64S_HASH_MMU
return slice_get_unmapped_area(addr, len, flags,
mm_ctx_user_psize(&current->mm->context), 0);
#else
BUG();
#endif
}
unsigned long arch_get_unmapped_area_topdown(struct file *filp,
const unsigned long addr0,
const unsigned long len,
const unsigned long pgoff,
const unsigned long flags)
{
if (radix_enabled())
return generic_get_unmapped_area_topdown(filp, addr0, len, pgoff, flags);
#ifdef CONFIG_PPC_64S_HASH_MMU
return slice_get_unmapped_area(addr0, len, flags,
mm_ctx_user_psize(&current->mm->context), 1);
#else
BUG();
#endif
}
#endif /* HAVE_ARCH_UNMAPPED_AREA */
/* /*
* This function, called very early during the creation of a new * This function, called very early during the creation of a new
* process VM image, sets up which VM layout function to use: * process VM image, sets up which VM layout function to use:
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment