Commit 15a25b2e authored by Aneesh Kumar K.V's avatar Aneesh Kumar K.V Committed by Linus Torvalds

mm/thp: split out pmd collapse flush into separate functions

Architectures like ppc64 [1] need to do special things while clearing pmd
before a collapse.  For them this operation is largely different from a
normal hugepage pte clear.  Hence add a separate function to clear pmd
before collapse.  After this patch pmdp_* functions operate only on
hugepage pte, and not on regular pmd_t values pointing to page table.

[1] ppc64 needs to invalidate all the normal page pte mappings we already
have inserted in the hardware hash page table.  But before doing that we
need to make sure there are no parallel hash page table insert going on.
So we need to do a kick_all_cpus_sync() before flushing the older hash
table entries.  By moving this to a separate function we capture these
details and mention how it is different from a hugepage pte clear.

This patch is a cleanup and only does code movement for clarity.  There
should not be any change in functionality.
Signed-off-by: default avatarAneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
Acked-by: default avatarKirill A. Shutemov <kirill.shutemov@linux.intel.com>
Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Michael Ellerman <mpe@ellerman.id.au>
Cc: Andrea Arcangeli <aarcange@redhat.com>
Cc: Martin Schwidefsky <schwidefsky@de.ibm.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 97f0b134
...@@ -592,6 +592,10 @@ static inline void pmdp_set_wrprotect(struct mm_struct *mm, unsigned long addr, ...@@ -592,6 +592,10 @@ static inline void pmdp_set_wrprotect(struct mm_struct *mm, unsigned long addr,
extern void pmdp_splitting_flush(struct vm_area_struct *vma, extern void pmdp_splitting_flush(struct vm_area_struct *vma,
unsigned long address, pmd_t *pmdp); unsigned long address, pmd_t *pmdp);
extern pmd_t pmdp_collapse_flush(struct vm_area_struct *vma,
unsigned long address, pmd_t *pmdp);
#define pmdp_collapse_flush pmdp_collapse_flush
#define __HAVE_ARCH_PGTABLE_DEPOSIT #define __HAVE_ARCH_PGTABLE_DEPOSIT
extern void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp, extern void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp,
pgtable_t pgtable); pgtable_t pgtable);
......
...@@ -560,12 +560,19 @@ pmd_t pmdp_clear_flush(struct vm_area_struct *vma, unsigned long address, ...@@ -560,12 +560,19 @@ pmd_t pmdp_clear_flush(struct vm_area_struct *vma, unsigned long address,
pmd_t pmd; pmd_t pmd;
VM_BUG_ON(address & ~HPAGE_PMD_MASK); VM_BUG_ON(address & ~HPAGE_PMD_MASK);
if (pmd_trans_huge(*pmdp)) { VM_BUG_ON(!pmd_trans_huge(*pmdp));
pmd = pmdp_get_and_clear(vma->vm_mm, address, pmdp); pmd = pmdp_get_and_clear(vma->vm_mm, address, pmdp);
} else { return pmd;
/* }
* khugepaged calls this for normal pmd
*/ pmd_t pmdp_collapse_flush(struct vm_area_struct *vma, unsigned long address,
pmd_t *pmdp)
{
pmd_t pmd;
VM_BUG_ON(address & ~HPAGE_PMD_MASK);
VM_BUG_ON(pmd_trans_huge(*pmdp));
pmd = *pmdp; pmd = *pmdp;
pmd_clear(pmdp); pmd_clear(pmdp);
/* /*
...@@ -594,7 +601,6 @@ pmd_t pmdp_clear_flush(struct vm_area_struct *vma, unsigned long address, ...@@ -594,7 +601,6 @@ pmd_t pmdp_clear_flush(struct vm_area_struct *vma, unsigned long address,
* the old content. * the old content.
*/ */
flush_tlb_pmd_range(vma->vm_mm, &pmd, address); flush_tlb_pmd_range(vma->vm_mm, &pmd, address);
}
return pmd; return pmd;
} }
......
...@@ -189,6 +189,27 @@ extern void pmdp_splitting_flush(struct vm_area_struct *vma, ...@@ -189,6 +189,27 @@ extern void pmdp_splitting_flush(struct vm_area_struct *vma,
unsigned long address, pmd_t *pmdp); unsigned long address, pmd_t *pmdp);
#endif #endif
#ifndef pmdp_collapse_flush
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
static inline pmd_t pmdp_collapse_flush(struct vm_area_struct *vma,
unsigned long address,
pmd_t *pmdp)
{
return pmdp_clear_flush(vma, address, pmdp);
}
#define pmdp_collapse_flush pmdp_collapse_flush
#else
static inline pmd_t pmdp_collapse_flush(struct vm_area_struct *vma,
unsigned long address,
pmd_t *pmdp)
{
BUILD_BUG();
return *pmdp;
}
#define pmdp_collapse_flush pmdp_collapse_flush
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
#endif
#ifndef __HAVE_ARCH_PGTABLE_DEPOSIT #ifndef __HAVE_ARCH_PGTABLE_DEPOSIT
extern void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp, extern void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp,
pgtable_t pgtable); pgtable_t pgtable);
......
...@@ -2499,7 +2499,7 @@ static void collapse_huge_page(struct mm_struct *mm, ...@@ -2499,7 +2499,7 @@ static void collapse_huge_page(struct mm_struct *mm,
* huge and small TLB entries for the same virtual address * huge and small TLB entries for the same virtual address
* to avoid the risk of CPU bugs in that area. * to avoid the risk of CPU bugs in that area.
*/ */
_pmd = pmdp_clear_flush(vma, address, pmd); _pmd = pmdp_collapse_flush(vma, address, pmd);
spin_unlock(pmd_ptl); spin_unlock(pmd_ptl);
mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end); mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment