Commit 4c1616ef authored by Christophe Leroy's avatar Christophe Leroy Committed by Michael Ellerman

powerpc/mm: move FSL_BOOK3 version of update_mmu_cache()

Move FSL_BOOK3E version of update_mmu_cache() at the same
place as book3e_hugetlb_preload() as update_mmu_cache() is
the only user of book3e_hugetlb_preload().
Signed-off-by: default avatarChristophe Leroy <christophe.leroy@c-s.fr>
Signed-off-by: default avatarMichael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/4d69fdc86df9c74adc71a60331a86f6afb8b5e9e.1565933217.git.christophe.leroy@c-s.fr
parent d9642117
...@@ -31,9 +31,6 @@ static inline int is_hugepage_only_range(struct mm_struct *mm, ...@@ -31,9 +31,6 @@ static inline int is_hugepage_only_range(struct mm_struct *mm,
return 0; return 0;
} }
void book3e_hugetlb_preload(struct vm_area_struct *vma, unsigned long ea,
pte_t pte);
#define __HAVE_ARCH_HUGETLB_FREE_PGD_RANGE #define __HAVE_ARCH_HUGETLB_FREE_PGD_RANGE
void hugetlb_free_pgd_range(struct mmu_gather *tlb, unsigned long addr, void hugetlb_free_pgd_range(struct mmu_gather *tlb, unsigned long addr,
unsigned long end, unsigned long floor, unsigned long end, unsigned long floor,
......
...@@ -458,14 +458,6 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, ...@@ -458,14 +458,6 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long address,
hash_preload(vma->vm_mm, address, is_exec, trap); hash_preload(vma->vm_mm, address, is_exec, trap);
} }
#endif /* CONFIG_PPC_BOOK3S */ #endif /* CONFIG_PPC_BOOK3S */
#if defined(CONFIG_PPC_FSL_BOOK3E) && defined(CONFIG_HUGETLB_PAGE)
void update_mmu_cache(struct vm_area_struct *vma, unsigned long address,
pte_t *ptep)
{
if (is_vm_hugetlb_page(vma))
book3e_hugetlb_preload(vma, address, *ptep);
}
#endif
/* /*
* System memory should not be in /proc/iomem but various tools expect it * System memory should not be in /proc/iomem but various tools expect it
......
...@@ -122,8 +122,8 @@ static inline int book3e_tlb_exists(unsigned long ea, unsigned long pid) ...@@ -122,8 +122,8 @@ static inline int book3e_tlb_exists(unsigned long ea, unsigned long pid)
return found; return found;
} }
void book3e_hugetlb_preload(struct vm_area_struct *vma, unsigned long ea, static void
pte_t pte) book3e_hugetlb_preload(struct vm_area_struct *vma, unsigned long ea, pte_t pte)
{ {
unsigned long mas1, mas2; unsigned long mas1, mas2;
u64 mas7_3; u64 mas7_3;
...@@ -183,6 +183,18 @@ void book3e_hugetlb_preload(struct vm_area_struct *vma, unsigned long ea, ...@@ -183,6 +183,18 @@ void book3e_hugetlb_preload(struct vm_area_struct *vma, unsigned long ea,
local_irq_restore(flags); local_irq_restore(flags);
} }
/*
* This is called at the end of handling a user page fault, when the
* fault has been handled by updating a PTE in the linux page tables.
*
* This must always be called with the pte lock held.
*/
void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *ptep)
{
if (is_vm_hugetlb_page(vma))
book3e_hugetlb_preload(vma, address, *ptep);
}
void flush_hugetlb_page(struct vm_area_struct *vma, unsigned long vmaddr) void flush_hugetlb_page(struct vm_area_struct *vma, unsigned long vmaddr)
{ {
struct hstate *hstate = hstate_file(vma->vm_file); struct hstate *hstate = hstate_file(vma->vm_file);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment