Commit fc047955 authored by Aneesh Kumar K.V's avatar Aneesh Kumar K.V Committed by Benjamin Herrenschmidt

powerpc/thp: Handle combo pages in invalidate

If we changed base page size of the segment, either via sub_page_protect
or via remap_4k_pfn, we do a demote_segment which doesn't flush the hash
table entries. We do a lazy hash page table flush for all mapped pages
in the demoted segment. This happens when we handle hash page fault for
these pages.

We use _PAGE_COMBO bit along with _PAGE_HASHPTE to indicate whether a
pte is backed by 4K hash pte. If we find _PAGE_COMBO not set on the pte,
that implies that we could possibly have older 64K hash pte entries in
the hash page table and we need to invalidate those entries.

Use _PAGE_COMBO to determine the page size with which we should
invalidate the hash table entries on unmap.

CC: <stable@vger.kernel.org>
Signed-off-by: default avatarAneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
Signed-off-by: default avatarBenjamin Herrenschmidt <benh@kernel.crashing.org>
parent 629149fa
...@@ -413,7 +413,7 @@ static inline char *get_hpte_slot_array(pmd_t *pmdp) ...@@ -413,7 +413,7 @@ static inline char *get_hpte_slot_array(pmd_t *pmdp)
} }
extern void hpte_do_hugepage_flush(struct mm_struct *mm, unsigned long addr, extern void hpte_do_hugepage_flush(struct mm_struct *mm, unsigned long addr,
pmd_t *pmdp); pmd_t *pmdp, unsigned long old_pmd);
#ifdef CONFIG_TRANSPARENT_HUGEPAGE #ifdef CONFIG_TRANSPARENT_HUGEPAGE
extern pmd_t pfn_pmd(unsigned long pfn, pgprot_t pgprot); extern pmd_t pfn_pmd(unsigned long pfn, pgprot_t pgprot);
extern pmd_t mk_pmd(struct page *page, pgprot_t pgprot); extern pmd_t mk_pmd(struct page *page, pgprot_t pgprot);
......
...@@ -538,7 +538,7 @@ unsigned long pmd_hugepage_update(struct mm_struct *mm, unsigned long addr, ...@@ -538,7 +538,7 @@ unsigned long pmd_hugepage_update(struct mm_struct *mm, unsigned long addr,
*pmdp = __pmd((old & ~clr) | set); *pmdp = __pmd((old & ~clr) | set);
#endif #endif
if (old & _PAGE_HASHPTE) if (old & _PAGE_HASHPTE)
hpte_do_hugepage_flush(mm, addr, pmdp); hpte_do_hugepage_flush(mm, addr, pmdp, old);
return old; return old;
} }
...@@ -645,7 +645,7 @@ void pmdp_splitting_flush(struct vm_area_struct *vma, ...@@ -645,7 +645,7 @@ void pmdp_splitting_flush(struct vm_area_struct *vma,
if (!(old & _PAGE_SPLITTING)) { if (!(old & _PAGE_SPLITTING)) {
/* We need to flush the hpte */ /* We need to flush the hpte */
if (old & _PAGE_HASHPTE) if (old & _PAGE_HASHPTE)
hpte_do_hugepage_flush(vma->vm_mm, address, pmdp); hpte_do_hugepage_flush(vma->vm_mm, address, pmdp, old);
} }
/* /*
* This ensures that generic code that rely on IRQ disabling * This ensures that generic code that rely on IRQ disabling
...@@ -723,7 +723,7 @@ void pmdp_invalidate(struct vm_area_struct *vma, unsigned long address, ...@@ -723,7 +723,7 @@ void pmdp_invalidate(struct vm_area_struct *vma, unsigned long address,
* neesd to be flushed. * neesd to be flushed.
*/ */
void hpte_do_hugepage_flush(struct mm_struct *mm, unsigned long addr, void hpte_do_hugepage_flush(struct mm_struct *mm, unsigned long addr,
pmd_t *pmdp) pmd_t *pmdp, unsigned long old_pmd)
{ {
int ssize, i; int ssize, i;
unsigned long s_addr; unsigned long s_addr;
...@@ -746,7 +746,15 @@ void hpte_do_hugepage_flush(struct mm_struct *mm, unsigned long addr, ...@@ -746,7 +746,15 @@ void hpte_do_hugepage_flush(struct mm_struct *mm, unsigned long addr,
return; return;
/* get the base page size,vsid and segment size */ /* get the base page size,vsid and segment size */
#ifdef CONFIG_DEBUG_VM
psize = get_slice_psize(mm, s_addr); psize = get_slice_psize(mm, s_addr);
BUG_ON(psize == MMU_PAGE_16M);
#endif
if (old_pmd & _PAGE_COMBO)
psize = MMU_PAGE_4K;
else
psize = MMU_PAGE_64K;
if (!is_kernel_addr(s_addr)) { if (!is_kernel_addr(s_addr)) {
ssize = user_segment_size(s_addr); ssize = user_segment_size(s_addr);
vsid = get_vsid(mm->context.id, s_addr, ssize); vsid = get_vsid(mm->context.id, s_addr, ssize);
......
...@@ -216,7 +216,7 @@ void __flush_hash_table_range(struct mm_struct *mm, unsigned long start, ...@@ -216,7 +216,7 @@ void __flush_hash_table_range(struct mm_struct *mm, unsigned long start,
if (!(pte & _PAGE_HASHPTE)) if (!(pte & _PAGE_HASHPTE))
continue; continue;
if (unlikely(hugepage_shift && pmd_trans_huge(*(pmd_t *)pte))) if (unlikely(hugepage_shift && pmd_trans_huge(*(pmd_t *)pte)))
hpte_do_hugepage_flush(mm, start, (pmd_t *)pte); hpte_do_hugepage_flush(mm, start, (pmd_t *)ptep, pte);
else else
hpte_need_flush(mm, start, ptep, pte, 0); hpte_need_flush(mm, start, ptep, pte, 0);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment