Commit 5491ae7b authored by Aneesh Kumar K.V's avatar Aneesh Kumar K.V Committed by Michael Ellerman

powerpc/mm/hugetlb: Add flush_hugetlb_tlb_range

Some archs like ppc64 need to do special things when flushing tlb for
hugepage. Add a new helper to flush hugetlb tlb range. This helps us to
avoid flushing the entire tlb mapping for the pid.
Signed-off-by: default avatarAneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
Signed-off-by: default avatarMichael Ellerman <mpe@ellerman.id.au>
parent fbfa26d8
...@@ -10,6 +10,8 @@ static inline int mmu_get_ap(int psize) ...@@ -10,6 +10,8 @@ static inline int mmu_get_ap(int psize)
return mmu_psize_defs[psize].ap; return mmu_psize_defs[psize].ap;
} }
extern void radix__flush_hugetlb_tlb_range(struct vm_area_struct *vma,
unsigned long start, unsigned long end);
extern void radix__flush_tlb_range_psize(struct mm_struct *mm, unsigned long start, extern void radix__flush_tlb_range_psize(struct mm_struct *mm, unsigned long start,
unsigned long end, int psize); unsigned long end, int psize);
extern void radix__flush_pmd_tlb_range(struct vm_area_struct *vma, extern void radix__flush_pmd_tlb_range(struct vm_area_struct *vma,
......
...@@ -16,6 +16,16 @@ static inline void flush_pmd_tlb_range(struct vm_area_struct *vma, ...@@ -16,6 +16,16 @@ static inline void flush_pmd_tlb_range(struct vm_area_struct *vma,
return hash__flush_tlb_range(vma, start, end); return hash__flush_tlb_range(vma, start, end);
} }
#define __HAVE_ARCH_FLUSH_HUGETLB_TLB_RANGE
static inline void flush_hugetlb_tlb_range(struct vm_area_struct *vma,
unsigned long start,
unsigned long end)
{
if (radix_enabled())
return radix__flush_hugetlb_tlb_range(vma, start, end);
return hash__flush_tlb_range(vma, start, end);
}
static inline void flush_tlb_range(struct vm_area_struct *vma, static inline void flush_tlb_range(struct vm_area_struct *vma,
unsigned long start, unsigned long end) unsigned long start, unsigned long end)
{ {
......
...@@ -25,6 +25,16 @@ void radix__local_flush_hugetlb_page(struct vm_area_struct *vma, unsigned long v ...@@ -25,6 +25,16 @@ void radix__local_flush_hugetlb_page(struct vm_area_struct *vma, unsigned long v
radix__local_flush_tlb_page_psize(vma->vm_mm, vmaddr, psize); radix__local_flush_tlb_page_psize(vma->vm_mm, vmaddr, psize);
} }
void radix__flush_hugetlb_tlb_range(struct vm_area_struct *vma, unsigned long start,
unsigned long end)
{
int psize;
struct hstate *hstate = hstate_file(vma->vm_file);
psize = hstate_get_psize(hstate);
radix__flush_tlb_range_psize(vma->vm_mm, start, end, psize);
}
/* /*
* A vairant of hugetlb_get_unmapped_area doing topdown search * A vairant of hugetlb_get_unmapped_area doing topdown search
* FIXME!! should we do as x86 does or non hugetlb area does ? * FIXME!! should we do as x86 does or non hugetlb area does ?
......
...@@ -3938,6 +3938,14 @@ long follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma, ...@@ -3938,6 +3938,14 @@ long follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
return i ? i : -EFAULT; return i ? i : -EFAULT;
} }
#ifndef __HAVE_ARCH_FLUSH_HUGETLB_TLB_RANGE
/*
* ARCHes with special requirements for evicting HUGETLB backing TLB entries can
* implement this.
*/
#define flush_hugetlb_tlb_range(vma, addr, end) flush_tlb_range(vma, addr, end)
#endif
unsigned long hugetlb_change_protection(struct vm_area_struct *vma, unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
unsigned long address, unsigned long end, pgprot_t newprot) unsigned long address, unsigned long end, pgprot_t newprot)
{ {
...@@ -3998,7 +4006,7 @@ unsigned long hugetlb_change_protection(struct vm_area_struct *vma, ...@@ -3998,7 +4006,7 @@ unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
* once we release i_mmap_rwsem, another task can do the final put_page * once we release i_mmap_rwsem, another task can do the final put_page
* and that page table be reused and filled with junk. * and that page table be reused and filled with junk.
*/ */
flush_tlb_range(vma, start, end); flush_hugetlb_tlb_range(vma, start, end);
mmu_notifier_invalidate_range(mm, start, end); mmu_notifier_invalidate_range(mm, start, end);
i_mmap_unlock_write(vma->vm_file->f_mapping); i_mmap_unlock_write(vma->vm_file->f_mapping);
mmu_notifier_invalidate_range_end(mm, start, end); mmu_notifier_invalidate_range_end(mm, start, end);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment