Commit 014a32b3 authored by Nicholas Piggin's avatar Nicholas Piggin Committed by Michael Ellerman

powerpc/mm/slice: remove radix calls to the slice code

This is a tidy up which removes radix MMU calls into the slice
code.
Signed-off-by: default avatarNicholas Piggin <npiggin@gmail.com>
Signed-off-by: default avatarMichael Ellerman <mpe@ellerman.id.au>
parent d262bd5a
...@@ -89,17 +89,17 @@ pte_t *huge_pte_offset_and_shift(struct mm_struct *mm, ...@@ -89,17 +89,17 @@ pte_t *huge_pte_offset_and_shift(struct mm_struct *mm,
void flush_dcache_icache_hugepage(struct page *page); void flush_dcache_icache_hugepage(struct page *page);
#if defined(CONFIG_PPC_MM_SLICES) int slice_is_hugepage_only_range(struct mm_struct *mm, unsigned long addr,
int is_hugepage_only_range(struct mm_struct *mm, unsigned long addr,
unsigned long len); unsigned long len);
#else
static inline int is_hugepage_only_range(struct mm_struct *mm, static inline int is_hugepage_only_range(struct mm_struct *mm,
unsigned long addr, unsigned long addr,
unsigned long len) unsigned long len)
{ {
if (IS_ENABLED(CONFIG_PPC_MM_SLICES) && !radix_enabled())
return slice_is_hugepage_only_range(mm, addr, len);
return 0; return 0;
} }
#endif
void book3e_hugetlb_preload(struct vm_area_struct *vma, unsigned long ea, void book3e_hugetlb_preload(struct vm_area_struct *vma, unsigned long ea,
pte_t pte); pte_t pte);
......
...@@ -565,10 +565,12 @@ unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, ...@@ -565,10 +565,12 @@ unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
unsigned long vma_mmu_pagesize(struct vm_area_struct *vma) unsigned long vma_mmu_pagesize(struct vm_area_struct *vma)
{ {
#ifdef CONFIG_PPC_MM_SLICES #ifdef CONFIG_PPC_MM_SLICES
unsigned int psize = get_slice_psize(vma->vm_mm, vma->vm_start);
/* With radix we don't use slice, so derive it from vma*/ /* With radix we don't use slice, so derive it from vma*/
if (!radix_enabled()) if (!radix_enabled()) {
unsigned int psize = get_slice_psize(vma->vm_mm, vma->vm_start);
return 1UL << mmu_psize_to_shift(psize); return 1UL << mmu_psize_to_shift(psize);
}
#endif #endif
if (!is_vm_hugetlb_page(vma)) if (!is_vm_hugetlb_page(vma))
return PAGE_SIZE; return PAGE_SIZE;
......
...@@ -686,16 +686,8 @@ unsigned int get_slice_psize(struct mm_struct *mm, unsigned long addr) ...@@ -686,16 +686,8 @@ unsigned int get_slice_psize(struct mm_struct *mm, unsigned long addr)
unsigned char *psizes; unsigned char *psizes;
int index, mask_index; int index, mask_index;
/* VM_BUG_ON(radix_enabled());
* Radix doesn't use slice, but can get enabled along with MMU_SLICE
*/
if (radix_enabled()) {
#ifdef CONFIG_PPC_64K_PAGES
return MMU_PAGE_64K;
#else
return MMU_PAGE_4K;
#endif
}
if (addr < SLICE_LOW_TOP) { if (addr < SLICE_LOW_TOP) {
psizes = mm->context.low_slices_psize; psizes = mm->context.low_slices_psize;
index = GET_LOW_SLICE_INDEX(addr); index = GET_LOW_SLICE_INDEX(addr);
...@@ -778,14 +770,13 @@ void slice_set_range_psize(struct mm_struct *mm, unsigned long start, ...@@ -778,14 +770,13 @@ void slice_set_range_psize(struct mm_struct *mm, unsigned long start,
* for now as we only use slices with hugetlbfs enabled. This should * for now as we only use slices with hugetlbfs enabled. This should
* be fixed as the generic code gets fixed. * be fixed as the generic code gets fixed.
*/ */
int is_hugepage_only_range(struct mm_struct *mm, unsigned long addr, int slice_is_hugepage_only_range(struct mm_struct *mm, unsigned long addr,
unsigned long len) unsigned long len)
{ {
const struct slice_mask *maskp; const struct slice_mask *maskp;
unsigned int psize = mm->context.user_psize; unsigned int psize = mm->context.user_psize;
if (radix_enabled()) VM_BUG_ON(radix_enabled());
return 0;
maskp = slice_mask_for_size(mm, psize); maskp = slice_mask_for_size(mm, psize);
#ifdef CONFIG_PPC_64K_PAGES #ifdef CONFIG_PPC_64K_PAGES
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment