Commit da7ad366 authored by Aneesh Kumar K.V's avatar Aneesh Kumar K.V Committed by Michael Ellerman

powerpc/mm/book3s: Update pmd_present to look at _PAGE_PRESENT bit

With this patch we use 0x8000000000000000UL (_PAGE_PRESENT) to indicate a valid
pgd/pud/pmd entry. We also switch the p**_present() to look at this bit.

With pmd_present, we have a special case. We need to make sure we consider a
pmd marked invalid during THP split as present. Right now we clear the
_PAGE_PRESENT bit during a pmdp_invalidate. Inorder to consider this special
case we add a new pte bit _PAGE_INVALID (mapped to _RPAGE_SW0). This bit is
only used with _PAGE_PRESENT cleared. Hence we are not really losing a pte bit
for this special case. pmd_present is also updated to look at _PAGE_INVALID.
Signed-off-by: default avatarAneesh Kumar K.V <aneesh.kumar@linux.ibm.com>
Signed-off-by: default avatarMichael Ellerman <mpe@ellerman.id.au>
parent 8139046a
...@@ -18,6 +18,11 @@ ...@@ -18,6 +18,11 @@
#include <asm/book3s/64/hash-4k.h> #include <asm/book3s/64/hash-4k.h>
#endif #endif
/* Bits to set in a PMD/PUD/PGD entry valid bit*/
#define HASH_PMD_VAL_BITS (0x8000000000000000UL)
#define HASH_PUD_VAL_BITS (0x8000000000000000UL)
#define HASH_PGD_VAL_BITS (0x8000000000000000UL)
/* /*
* Size of EA range mapped by our pagetables. * Size of EA range mapped by our pagetables.
*/ */
......
...@@ -875,8 +875,16 @@ static inline int pmd_none(pmd_t pmd) ...@@ -875,8 +875,16 @@ static inline int pmd_none(pmd_t pmd)
static inline int pmd_present(pmd_t pmd) static inline int pmd_present(pmd_t pmd)
{ {
/*
* A pmd is considerent present if _PAGE_PRESENT is set.
* We also need to consider the pmd present which is marked
* invalid during a split. Hence we look for _PAGE_INVALID
* if we find _PAGE_PRESENT cleared.
*/
if (pmd_raw(pmd) & cpu_to_be64(_PAGE_PRESENT | _PAGE_INVALID))
return true;
return !pmd_none(pmd); return false;
} }
static inline int pmd_bad(pmd_t pmd) static inline int pmd_bad(pmd_t pmd)
...@@ -903,7 +911,7 @@ static inline int pud_none(pud_t pud) ...@@ -903,7 +911,7 @@ static inline int pud_none(pud_t pud)
static inline int pud_present(pud_t pud) static inline int pud_present(pud_t pud)
{ {
return !pud_none(pud); return (pud_raw(pud) & cpu_to_be64(_PAGE_PRESENT));
} }
extern struct page *pud_page(pud_t pud); extern struct page *pud_page(pud_t pud);
...@@ -950,7 +958,7 @@ static inline int pgd_none(pgd_t pgd) ...@@ -950,7 +958,7 @@ static inline int pgd_none(pgd_t pgd)
static inline int pgd_present(pgd_t pgd) static inline int pgd_present(pgd_t pgd)
{ {
return !pgd_none(pgd); return (pgd_raw(pgd) & cpu_to_be64(_PAGE_PRESENT));
} }
static inline pte_t pgd_pte(pgd_t pgd) static inline pte_t pgd_pte(pgd_t pgd)
......
...@@ -1001,9 +1001,9 @@ void __init hash__early_init_mmu(void) ...@@ -1001,9 +1001,9 @@ void __init hash__early_init_mmu(void)
* 4k use hugepd format, so for hash set then to * 4k use hugepd format, so for hash set then to
* zero * zero
*/ */
__pmd_val_bits = 0; __pmd_val_bits = HASH_PMD_VAL_BITS;
__pud_val_bits = 0; __pud_val_bits = HASH_PUD_VAL_BITS;
__pgd_val_bits = 0; __pgd_val_bits = HASH_PGD_VAL_BITS;
__kernel_virt_start = H_KERN_VIRT_START; __kernel_virt_start = H_KERN_VIRT_START;
__kernel_virt_size = H_KERN_VIRT_SIZE; __kernel_virt_size = H_KERN_VIRT_SIZE;
......
...@@ -69,7 +69,11 @@ void set_pmd_at(struct mm_struct *mm, unsigned long addr, ...@@ -69,7 +69,11 @@ void set_pmd_at(struct mm_struct *mm, unsigned long addr,
pmd_t *pmdp, pmd_t pmd) pmd_t *pmdp, pmd_t pmd)
{ {
#ifdef CONFIG_DEBUG_VM #ifdef CONFIG_DEBUG_VM
WARN_ON(pte_present(pmd_pte(*pmdp)) && !pte_protnone(pmd_pte(*pmdp))); /*
* Make sure hardware valid bit is not set. We don't do
* tlb flush for this update.
*/
WARN_ON(pte_val(pmd_pte(*pmdp)) & _PAGE_PRESENT);
assert_spin_locked(pmd_lockptr(mm, pmdp)); assert_spin_locked(pmd_lockptr(mm, pmdp));
WARN_ON(!(pmd_trans_huge(pmd) || pmd_devmap(pmd))); WARN_ON(!(pmd_trans_huge(pmd) || pmd_devmap(pmd)));
#endif #endif
...@@ -106,7 +110,7 @@ pmd_t pmdp_invalidate(struct vm_area_struct *vma, unsigned long address, ...@@ -106,7 +110,7 @@ pmd_t pmdp_invalidate(struct vm_area_struct *vma, unsigned long address,
{ {
unsigned long old_pmd; unsigned long old_pmd;
old_pmd = pmd_hugepage_update(vma->vm_mm, address, pmdp, _PAGE_PRESENT, 0); old_pmd = pmd_hugepage_update(vma->vm_mm, address, pmdp, _PAGE_PRESENT, _PAGE_INVALID);
flush_pmd_tlb_range(vma, address, address + HPAGE_PMD_SIZE); flush_pmd_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
/* /*
* This ensures that generic code that rely on IRQ disabling * This ensures that generic code that rely on IRQ disabling
......
...@@ -188,11 +188,10 @@ void set_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep, ...@@ -188,11 +188,10 @@ void set_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep,
pte_t pte) pte_t pte)
{ {
/* /*
* When handling numa faults, we already have the pte marked * Make sure hardware valid bit is not set. We don't do
* _PAGE_PRESENT, but we can be sure that it is not in hpte. * tlb flush for this update.
* Hence we can use set_pte_at for them.
*/ */
VM_WARN_ON(pte_present(*ptep) && !pte_protnone(*ptep)); VM_WARN_ON(pte_val(*ptep) & _PAGE_PRESENT);
/* Add the pte bit when trying to set a pte */ /* Add the pte bit when trying to set a pte */
pte = __pte(pte_val(pte) | _PAGE_PTE); pte = __pte(pte_val(pte) | _PAGE_PTE);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment