Commit 75646c48 authored by Aneesh Kumar K.V's avatar Aneesh Kumar K.V Committed by Michael Ellerman

arch/powerpc/mm/hash: validate the pte entries before handling the hash fault

Make sure we are operating on THP and hugetlb entries in the respective hash
fault handling routines.

No functional change in this patch. If we walked the table wrongly before, we
will retry the access.
Signed-off-by: default avatarAneesh Kumar K.V <aneesh.kumar@linux.ibm.com>
Signed-off-by: default avatarMichael Ellerman <mpe@ellerman.id.au>
parent ae28f17b
...@@ -51,6 +51,12 @@ int __hash_page_thp(unsigned long ea, unsigned long access, unsigned long vsid, ...@@ -51,6 +51,12 @@ int __hash_page_thp(unsigned long ea, unsigned long access, unsigned long vsid,
new_pmd |= _PAGE_DIRTY; new_pmd |= _PAGE_DIRTY;
} while (!pmd_xchg(pmdp, __pmd(old_pmd), __pmd(new_pmd))); } while (!pmd_xchg(pmdp, __pmd(old_pmd), __pmd(new_pmd)));
/*
* Make sure this is thp or devmap entry
*/
if (!(old_pmd & (H_PAGE_THP_HUGE | _PAGE_DEVMAP)))
return 0;
rflags = htab_convert_pte_flags(new_pmd); rflags = htab_convert_pte_flags(new_pmd);
#if 0 #if 0
......
...@@ -62,6 +62,10 @@ int __hash_page_huge(unsigned long ea, unsigned long access, unsigned long vsid, ...@@ -62,6 +62,10 @@ int __hash_page_huge(unsigned long ea, unsigned long access, unsigned long vsid,
new_pte |= _PAGE_DIRTY; new_pte |= _PAGE_DIRTY;
} while(!pte_xchg(ptep, __pte(old_pte), __pte(new_pte))); } while(!pte_xchg(ptep, __pte(old_pte), __pte(new_pte)));
/* Make sure this is a hugetlb entry */
if (old_pte & (H_PAGE_THP_HUGE | _PAGE_DEVMAP))
return 0;
rflags = htab_convert_pte_flags(new_pte); rflags = htab_convert_pte_flags(new_pte);
if (unlikely(mmu_psize == MMU_PAGE_16G)) if (unlikely(mmu_psize == MMU_PAGE_16G))
offset = PTRS_PER_PUD; offset = PTRS_PER_PUD;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment