Commit 12ebc158 authored by Vineet Gupta's avatar Vineet Gupta

mm,thp: introduce flush_pmd_tlb_range

ARCHes with special requirements for evicting THP backing TLB entries
can implement this.

Otherwise also, it can help optimize TLB flush in THP regime.
stock flush_tlb_range() typically has optimization to nuke the entire
TLB if flush span is greater than a certain threshhold, which will
likely be true for a single huge page. Thus a single thp flush will
invalidate the entrire TLB which is not desirable.

e.g. see arch/arc: flush_pmd_tlb_range
Acked-by: default avatarKirill A. Shutemov <kirill.shutemov@linux.intel.com>
Link: http://lkml.kernel.org/r/20151009100816.GC7873@nodeSigned-off-by: default avatarVineet Gupta <vgupta@synopsys.com>
parent bd5e88ad
...@@ -1880,7 +1880,7 @@ static int __split_huge_page_map(struct page *page, ...@@ -1880,7 +1880,7 @@ static int __split_huge_page_map(struct page *page,
* here). But it is generally safer to never allow * here). But it is generally safer to never allow
* small and huge TLB entries for the same virtual * small and huge TLB entries for the same virtual
* address to be loaded simultaneously. So instead of * address to be loaded simultaneously. So instead of
* doing "pmd_populate(); flush_tlb_range();" we first * doing "pmd_populate(); flush_pmd_tlb_range();" we first
* mark the current pmd notpresent (atomically because * mark the current pmd notpresent (atomically because
* here the pmd_trans_huge and pmd_trans_splitting * here the pmd_trans_huge and pmd_trans_splitting
* must remain set at all times on the pmd until the * must remain set at all times on the pmd until the
......
...@@ -84,6 +84,20 @@ pte_t ptep_clear_flush(struct vm_area_struct *vma, unsigned long address, ...@@ -84,6 +84,20 @@ pte_t ptep_clear_flush(struct vm_area_struct *vma, unsigned long address,
#ifdef CONFIG_TRANSPARENT_HUGEPAGE #ifdef CONFIG_TRANSPARENT_HUGEPAGE
#ifndef __HAVE_ARCH_FLUSH_PMD_TLB_RANGE
/*
* ARCHes with special requirements for evicting THP backing TLB entries can
* implement this. Otherwise also, it can help optimize normal TLB flush in
* THP regime. stock flush_tlb_range() typically has optimization to nuke the
* entire TLB TLB if flush span is greater than a threshhold, which will
* likely be true for a single huge page. Thus a single thp flush will
* invalidate the entire TLB which is not desitable.
* e.g. see arch/arc: flush_pmd_tlb_range
*/
#define flush_pmd_tlb_range(vma, addr, end) flush_tlb_range(vma, addr, end)
#endif
#ifndef __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS #ifndef __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS
int pmdp_set_access_flags(struct vm_area_struct *vma, int pmdp_set_access_flags(struct vm_area_struct *vma,
unsigned long address, pmd_t *pmdp, unsigned long address, pmd_t *pmdp,
...@@ -93,7 +107,7 @@ int pmdp_set_access_flags(struct vm_area_struct *vma, ...@@ -93,7 +107,7 @@ int pmdp_set_access_flags(struct vm_area_struct *vma,
VM_BUG_ON(address & ~HPAGE_PMD_MASK); VM_BUG_ON(address & ~HPAGE_PMD_MASK);
if (changed) { if (changed) {
set_pmd_at(vma->vm_mm, address, pmdp, entry); set_pmd_at(vma->vm_mm, address, pmdp, entry);
flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE); flush_pmd_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
} }
return changed; return changed;
} }
...@@ -107,7 +121,7 @@ int pmdp_clear_flush_young(struct vm_area_struct *vma, ...@@ -107,7 +121,7 @@ int pmdp_clear_flush_young(struct vm_area_struct *vma,
VM_BUG_ON(address & ~HPAGE_PMD_MASK); VM_BUG_ON(address & ~HPAGE_PMD_MASK);
young = pmdp_test_and_clear_young(vma, address, pmdp); young = pmdp_test_and_clear_young(vma, address, pmdp);
if (young) if (young)
flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE); flush_pmd_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
return young; return young;
} }
#endif #endif
...@@ -120,7 +134,7 @@ pmd_t pmdp_huge_clear_flush(struct vm_area_struct *vma, unsigned long address, ...@@ -120,7 +134,7 @@ pmd_t pmdp_huge_clear_flush(struct vm_area_struct *vma, unsigned long address,
VM_BUG_ON(address & ~HPAGE_PMD_MASK); VM_BUG_ON(address & ~HPAGE_PMD_MASK);
VM_BUG_ON(!pmd_trans_huge(*pmdp)); VM_BUG_ON(!pmd_trans_huge(*pmdp));
pmd = pmdp_huge_get_and_clear(vma->vm_mm, address, pmdp); pmd = pmdp_huge_get_and_clear(vma->vm_mm, address, pmdp);
flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE); flush_pmd_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
return pmd; return pmd;
} }
#endif #endif
...@@ -133,7 +147,7 @@ void pmdp_splitting_flush(struct vm_area_struct *vma, unsigned long address, ...@@ -133,7 +147,7 @@ void pmdp_splitting_flush(struct vm_area_struct *vma, unsigned long address,
VM_BUG_ON(address & ~HPAGE_PMD_MASK); VM_BUG_ON(address & ~HPAGE_PMD_MASK);
set_pmd_at(vma->vm_mm, address, pmdp, pmd); set_pmd_at(vma->vm_mm, address, pmdp, pmd);
/* tlb flush only to serialize against gup-fast */ /* tlb flush only to serialize against gup-fast */
flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE); flush_pmd_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
} }
#endif #endif
...@@ -179,7 +193,7 @@ void pmdp_invalidate(struct vm_area_struct *vma, unsigned long address, ...@@ -179,7 +193,7 @@ void pmdp_invalidate(struct vm_area_struct *vma, unsigned long address,
{ {
pmd_t entry = *pmdp; pmd_t entry = *pmdp;
set_pmd_at(vma->vm_mm, address, pmdp, pmd_mknotpresent(entry)); set_pmd_at(vma->vm_mm, address, pmdp, pmd_mknotpresent(entry));
flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE); flush_pmd_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
} }
#endif #endif
...@@ -196,7 +210,7 @@ pmd_t pmdp_collapse_flush(struct vm_area_struct *vma, unsigned long address, ...@@ -196,7 +210,7 @@ pmd_t pmdp_collapse_flush(struct vm_area_struct *vma, unsigned long address,
VM_BUG_ON(address & ~HPAGE_PMD_MASK); VM_BUG_ON(address & ~HPAGE_PMD_MASK);
VM_BUG_ON(pmd_trans_huge(*pmdp)); VM_BUG_ON(pmd_trans_huge(*pmdp));
pmd = pmdp_huge_get_and_clear(vma->vm_mm, address, pmdp); pmd = pmdp_huge_get_and_clear(vma->vm_mm, address, pmdp);
flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE); flush_pmd_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
return pmd; return pmd;
} }
#endif #endif
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment