Commit 629149fa authored by Aneesh Kumar K.V's avatar Aneesh Kumar K.V Committed by Benjamin Herrenschmidt

powerpc/thp: Invalidate old 64K based hash page mapping before insert of 4k pte

If we changed base page size of the segment, either via sub_page_protect
or via remap_4k_pfn, we do a demote_segment which doesn't flush the hash
table entries. We do a lazy hash page table flush for all mapped pages
in the demoted segment. This happens when we handle hash page fault
for these pages.

We use _PAGE_COMBO bit along with _PAGE_HASHPTE to indicate whether a
pte is backed by 4K hash pte. If we find _PAGE_COMBO not set on the pte,
that implies that we could possibly have older 64K hash pte entries in
the hash page table and we need to invalidate those entries.

Handle this correctly for 16M pages

CC: <stable@vger.kernel.org>
Signed-off-by: default avatarAneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
Signed-off-by: default avatarBenjamin Herrenschmidt <benh@kernel.crashing.org>
parent fa1f8ae8
...@@ -18,6 +18,57 @@ ...@@ -18,6 +18,57 @@
#include <linux/mm.h> #include <linux/mm.h>
#include <asm/machdep.h> #include <asm/machdep.h>
static void invalidate_old_hpte(unsigned long vsid, unsigned long addr,
pmd_t *pmdp, unsigned int psize, int ssize)
{
int i, max_hpte_count, valid;
unsigned long s_addr;
unsigned char *hpte_slot_array;
unsigned long hidx, shift, vpn, hash, slot;
s_addr = addr & HPAGE_PMD_MASK;
hpte_slot_array = get_hpte_slot_array(pmdp);
/*
* IF we try to do a HUGE PTE update after a withdraw is done.
* we will find the below NULL. This happens when we do
* split_huge_page_pmd
*/
if (!hpte_slot_array)
return;
if (ppc_md.hugepage_invalidate)
return ppc_md.hugepage_invalidate(vsid, s_addr, hpte_slot_array,
psize, ssize);
/*
* No bluk hpte removal support, invalidate each entry
*/
shift = mmu_psize_defs[psize].shift;
max_hpte_count = HPAGE_PMD_SIZE >> shift;
for (i = 0; i < max_hpte_count; i++) {
/*
* 8 bits per each hpte entries
* 000| [ secondary group (one bit) | hidx (3 bits) | valid bit]
*/
valid = hpte_valid(hpte_slot_array, i);
if (!valid)
continue;
hidx = hpte_hash_index(hpte_slot_array, i);
/* get the vpn */
addr = s_addr + (i * (1ul << shift));
vpn = hpt_vpn(addr, vsid, ssize);
hash = hpt_hash(vpn, shift, ssize);
if (hidx & _PTEIDX_SECONDARY)
hash = ~hash;
slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
slot += hidx & _PTEIDX_GROUP_IX;
ppc_md.hpte_invalidate(slot, vpn, psize,
MMU_PAGE_16M, ssize, 0);
}
}
int __hash_page_thp(unsigned long ea, unsigned long access, unsigned long vsid, int __hash_page_thp(unsigned long ea, unsigned long access, unsigned long vsid,
pmd_t *pmdp, unsigned long trap, int local, int ssize, pmd_t *pmdp, unsigned long trap, int local, int ssize,
unsigned int psize) unsigned int psize)
...@@ -85,6 +136,15 @@ int __hash_page_thp(unsigned long ea, unsigned long access, unsigned long vsid, ...@@ -85,6 +136,15 @@ int __hash_page_thp(unsigned long ea, unsigned long access, unsigned long vsid,
vpn = hpt_vpn(ea, vsid, ssize); vpn = hpt_vpn(ea, vsid, ssize);
hash = hpt_hash(vpn, shift, ssize); hash = hpt_hash(vpn, shift, ssize);
hpte_slot_array = get_hpte_slot_array(pmdp); hpte_slot_array = get_hpte_slot_array(pmdp);
if (psize == MMU_PAGE_4K) {
/*
* invalidate the old hpte entry if we have that mapped via 64K
* base page size. This is because demote_segment won't flush
* hash page table entries.
*/
if ((old_pmd & _PAGE_HASHPTE) && !(old_pmd & _PAGE_COMBO))
invalidate_old_hpte(vsid, ea, pmdp, MMU_PAGE_64K, ssize);
}
valid = hpte_valid(hpte_slot_array, index); valid = hpte_valid(hpte_slot_array, index);
if (valid) { if (valid) {
...@@ -107,11 +167,8 @@ int __hash_page_thp(unsigned long ea, unsigned long access, unsigned long vsid, ...@@ -107,11 +167,8 @@ int __hash_page_thp(unsigned long ea, unsigned long access, unsigned long vsid,
* safely update this here. * safely update this here.
*/ */
valid = 0; valid = 0;
new_pmd &= ~_PAGE_HPTEFLAGS;
hpte_slot_array[index] = 0; hpte_slot_array[index] = 0;
} else }
/* clear the busy bits and set the hash pte bits */
new_pmd = (new_pmd & ~_PAGE_HPTEFLAGS) | _PAGE_HASHPTE;
} }
if (!valid) { if (!valid) {
...@@ -119,11 +176,7 @@ int __hash_page_thp(unsigned long ea, unsigned long access, unsigned long vsid, ...@@ -119,11 +176,7 @@ int __hash_page_thp(unsigned long ea, unsigned long access, unsigned long vsid,
/* insert new entry */ /* insert new entry */
pa = pmd_pfn(__pmd(old_pmd)) << PAGE_SHIFT; pa = pmd_pfn(__pmd(old_pmd)) << PAGE_SHIFT;
repeat: new_pmd |= _PAGE_HASHPTE;
hpte_group = ((hash & htab_hash_mask) * HPTES_PER_GROUP) & ~0x7UL;
/* clear the busy bits and set the hash pte bits */
new_pmd = (new_pmd & ~_PAGE_HPTEFLAGS) | _PAGE_HASHPTE;
/* Add in WIMG bits */ /* Add in WIMG bits */
rflags |= (new_pmd & (_PAGE_WRITETHRU | _PAGE_NO_CACHE | rflags |= (new_pmd & (_PAGE_WRITETHRU | _PAGE_NO_CACHE |
...@@ -132,6 +185,8 @@ int __hash_page_thp(unsigned long ea, unsigned long access, unsigned long vsid, ...@@ -132,6 +185,8 @@ int __hash_page_thp(unsigned long ea, unsigned long access, unsigned long vsid,
* enable the memory coherence always * enable the memory coherence always
*/ */
rflags |= HPTE_R_M; rflags |= HPTE_R_M;
repeat:
hpte_group = ((hash & htab_hash_mask) * HPTES_PER_GROUP) & ~0x7UL;
/* Insert into the hash table, primary slot */ /* Insert into the hash table, primary slot */
slot = ppc_md.hpte_insert(hpte_group, vpn, pa, rflags, 0, slot = ppc_md.hpte_insert(hpte_group, vpn, pa, rflags, 0,
...@@ -171,6 +226,12 @@ int __hash_page_thp(unsigned long ea, unsigned long access, unsigned long vsid, ...@@ -171,6 +226,12 @@ int __hash_page_thp(unsigned long ea, unsigned long access, unsigned long vsid,
*/ */
mark_hpte_slot_valid(hpte_slot_array, index, slot); mark_hpte_slot_valid(hpte_slot_array, index, slot);
} }
/*
* Mark the pte with _PAGE_COMBO, if we are trying to hash it with
* base page size 4k.
*/
if (psize == MMU_PAGE_4K)
new_pmd |= _PAGE_COMBO;
/* /*
* The hpte valid is stored in the pgtable whose address is in the * The hpte valid is stored in the pgtable whose address is in the
* second half of the PMD. Order this against clearing of the busy bit in * second half of the PMD. Order this against clearing of the busy bit in
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment