Commit 4a366f51 authored by Heiko Carstens's avatar Heiko Carstens Committed by Vasily Gorbik

s390/mm,pgtable: don't use pte_val()/pXd_val() as lvalue

Convert pgtable code so pte_val()/pXd_val() aren't used as lvalue
anymore. This allows in later step to convert pte_val()/pXd_val() to
functions, which in turn makes it impossible to use these macros to
modify page table entries like they have been used before.

Therefore a construct like this:

        pte_val(*pte) = __pa(addr) | prot;

which would directly write into a page table, isn't possible anymore
with the last step of this series.
Reviewed-by: default avatarAlexander Gordeev <agordeev@linux.ibm.com>
Signed-off-by: default avatarHeiko Carstens <hca@linux.ibm.com>
Signed-off-by: default avatarVasily Gorbik <gor@linux.ibm.com>
parent b8e3b379
...@@ -834,15 +834,13 @@ static inline int pte_soft_dirty(pte_t pte) ...@@ -834,15 +834,13 @@ static inline int pte_soft_dirty(pte_t pte)
static inline pte_t pte_mksoft_dirty(pte_t pte) static inline pte_t pte_mksoft_dirty(pte_t pte)
{ {
pte_val(pte) |= _PAGE_SOFT_DIRTY; return set_pte_bit(pte, __pgprot(_PAGE_SOFT_DIRTY));
return pte;
} }
#define pte_swp_mksoft_dirty pte_mksoft_dirty #define pte_swp_mksoft_dirty pte_mksoft_dirty
static inline pte_t pte_clear_soft_dirty(pte_t pte) static inline pte_t pte_clear_soft_dirty(pte_t pte)
{ {
pte_val(pte) &= ~_PAGE_SOFT_DIRTY; return clear_pte_bit(pte, __pgprot(_PAGE_SOFT_DIRTY));
return pte;
} }
#define pte_swp_clear_soft_dirty pte_clear_soft_dirty #define pte_swp_clear_soft_dirty pte_clear_soft_dirty
...@@ -853,14 +851,12 @@ static inline int pmd_soft_dirty(pmd_t pmd) ...@@ -853,14 +851,12 @@ static inline int pmd_soft_dirty(pmd_t pmd)
static inline pmd_t pmd_mksoft_dirty(pmd_t pmd) static inline pmd_t pmd_mksoft_dirty(pmd_t pmd)
{ {
pmd_val(pmd) |= _SEGMENT_ENTRY_SOFT_DIRTY; return set_pmd_bit(pmd, __pgprot(_SEGMENT_ENTRY_SOFT_DIRTY));
return pmd;
} }
static inline pmd_t pmd_clear_soft_dirty(pmd_t pmd) static inline pmd_t pmd_clear_soft_dirty(pmd_t pmd)
{ {
pmd_val(pmd) &= ~_SEGMENT_ENTRY_SOFT_DIRTY; return clear_pmd_bit(pmd, __pgprot(_SEGMENT_ENTRY_SOFT_DIRTY));
return pmd;
} }
/* /*
...@@ -970,79 +966,74 @@ static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *pt ...@@ -970,79 +966,74 @@ static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *pt
*/ */
static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
{ {
pte_val(pte) &= _PAGE_CHG_MASK; pte = clear_pte_bit(pte, __pgprot(~_PAGE_CHG_MASK));
pte_val(pte) |= pgprot_val(newprot); pte = set_pte_bit(pte, newprot);
/* /*
* newprot for PAGE_NONE, PAGE_RO, PAGE_RX, PAGE_RW and PAGE_RWX * newprot for PAGE_NONE, PAGE_RO, PAGE_RX, PAGE_RW and PAGE_RWX
* has the invalid bit set, clear it again for readable, young pages * has the invalid bit set, clear it again for readable, young pages
*/ */
if ((pte_val(pte) & _PAGE_YOUNG) && (pte_val(pte) & _PAGE_READ)) if ((pte_val(pte) & _PAGE_YOUNG) && (pte_val(pte) & _PAGE_READ))
pte_val(pte) &= ~_PAGE_INVALID; pte = clear_pte_bit(pte, __pgprot(_PAGE_INVALID));
/* /*
* newprot for PAGE_RO, PAGE_RX, PAGE_RW and PAGE_RWX has the page * newprot for PAGE_RO, PAGE_RX, PAGE_RW and PAGE_RWX has the page
* protection bit set, clear it again for writable, dirty pages * protection bit set, clear it again for writable, dirty pages
*/ */
if ((pte_val(pte) & _PAGE_DIRTY) && (pte_val(pte) & _PAGE_WRITE)) if ((pte_val(pte) & _PAGE_DIRTY) && (pte_val(pte) & _PAGE_WRITE))
pte_val(pte) &= ~_PAGE_PROTECT; pte = clear_pte_bit(pte, __pgprot(_PAGE_PROTECT));
return pte; return pte;
} }
static inline pte_t pte_wrprotect(pte_t pte) static inline pte_t pte_wrprotect(pte_t pte)
{ {
pte_val(pte) &= ~_PAGE_WRITE; pte = clear_pte_bit(pte, __pgprot(_PAGE_WRITE));
pte_val(pte) |= _PAGE_PROTECT; return set_pte_bit(pte, __pgprot(_PAGE_PROTECT));
return pte;
} }
static inline pte_t pte_mkwrite(pte_t pte) static inline pte_t pte_mkwrite(pte_t pte)
{ {
pte_val(pte) |= _PAGE_WRITE; pte = set_pte_bit(pte, __pgprot(_PAGE_WRITE));
if (pte_val(pte) & _PAGE_DIRTY) if (pte_val(pte) & _PAGE_DIRTY)
pte_val(pte) &= ~_PAGE_PROTECT; pte = clear_pte_bit(pte, __pgprot(_PAGE_PROTECT));
return pte; return pte;
} }
static inline pte_t pte_mkclean(pte_t pte) static inline pte_t pte_mkclean(pte_t pte)
{ {
pte_val(pte) &= ~_PAGE_DIRTY; pte = clear_pte_bit(pte, __pgprot(_PAGE_DIRTY));
pte_val(pte) |= _PAGE_PROTECT; return set_pte_bit(pte, __pgprot(_PAGE_PROTECT));
return pte;
} }
static inline pte_t pte_mkdirty(pte_t pte) static inline pte_t pte_mkdirty(pte_t pte)
{ {
pte_val(pte) |= _PAGE_DIRTY | _PAGE_SOFT_DIRTY; pte = set_pte_bit(pte, __pgprot(_PAGE_DIRTY | _PAGE_SOFT_DIRTY));
if (pte_val(pte) & _PAGE_WRITE) if (pte_val(pte) & _PAGE_WRITE)
pte_val(pte) &= ~_PAGE_PROTECT; pte = clear_pte_bit(pte, __pgprot(_PAGE_PROTECT));
return pte; return pte;
} }
static inline pte_t pte_mkold(pte_t pte) static inline pte_t pte_mkold(pte_t pte)
{ {
pte_val(pte) &= ~_PAGE_YOUNG; pte = clear_pte_bit(pte, __pgprot(_PAGE_YOUNG));
pte_val(pte) |= _PAGE_INVALID; return set_pte_bit(pte, __pgprot(_PAGE_INVALID));
return pte;
} }
static inline pte_t pte_mkyoung(pte_t pte) static inline pte_t pte_mkyoung(pte_t pte)
{ {
pte_val(pte) |= _PAGE_YOUNG; pte = set_pte_bit(pte, __pgprot(_PAGE_YOUNG));
if (pte_val(pte) & _PAGE_READ) if (pte_val(pte) & _PAGE_READ)
pte_val(pte) &= ~_PAGE_INVALID; pte = clear_pte_bit(pte, __pgprot(_PAGE_INVALID));
return pte; return pte;
} }
static inline pte_t pte_mkspecial(pte_t pte) static inline pte_t pte_mkspecial(pte_t pte)
{ {
pte_val(pte) |= _PAGE_SPECIAL; return set_pte_bit(pte, __pgprot(_PAGE_SPECIAL));
return pte;
} }
#ifdef CONFIG_HUGETLB_PAGE #ifdef CONFIG_HUGETLB_PAGE
static inline pte_t pte_mkhuge(pte_t pte) static inline pte_t pte_mkhuge(pte_t pte)
{ {
pte_val(pte) |= _PAGE_LARGE; return set_pte_bit(pte, __pgprot(_PAGE_LARGE));
return pte;
} }
#endif #endif
...@@ -1253,7 +1244,7 @@ static inline void set_pte_at(struct mm_struct *mm, unsigned long addr, ...@@ -1253,7 +1244,7 @@ static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
pte_t *ptep, pte_t entry) pte_t *ptep, pte_t entry)
{ {
if (pte_present(entry)) if (pte_present(entry))
pte_val(entry) &= ~_PAGE_UNUSED; entry = clear_pte_bit(entry, __pgprot(_PAGE_UNUSED));
if (mm_has_pgste(mm)) if (mm_has_pgste(mm))
ptep_set_pte_at(mm, addr, ptep, entry); ptep_set_pte_at(mm, addr, ptep, entry);
else else
...@@ -1268,9 +1259,9 @@ static inline pte_t mk_pte_phys(unsigned long physpage, pgprot_t pgprot) ...@@ -1268,9 +1259,9 @@ static inline pte_t mk_pte_phys(unsigned long physpage, pgprot_t pgprot)
{ {
pte_t __pte; pte_t __pte;
pte_val(__pte) = physpage | pgprot_val(pgprot); __pte = __pte(physpage | pgprot_val(pgprot));
if (!MACHINE_HAS_NX) if (!MACHINE_HAS_NX)
pte_val(__pte) &= ~_PAGE_NOEXEC; __pte = clear_pte_bit(__pte, __pgprot(_PAGE_NOEXEC));
return pte_mkyoung(__pte); return pte_mkyoung(__pte);
} }
...@@ -1410,61 +1401,57 @@ static inline bool gup_fast_permitted(unsigned long start, unsigned long end) ...@@ -1410,61 +1401,57 @@ static inline bool gup_fast_permitted(unsigned long start, unsigned long end)
static inline pmd_t pmd_wrprotect(pmd_t pmd) static inline pmd_t pmd_wrprotect(pmd_t pmd)
{ {
pmd_val(pmd) &= ~_SEGMENT_ENTRY_WRITE; pmd = clear_pmd_bit(pmd, __pgprot(_SEGMENT_ENTRY_WRITE));
pmd_val(pmd) |= _SEGMENT_ENTRY_PROTECT; return set_pmd_bit(pmd, __pgprot(_SEGMENT_ENTRY_PROTECT));
return pmd;
} }
static inline pmd_t pmd_mkwrite(pmd_t pmd) static inline pmd_t pmd_mkwrite(pmd_t pmd)
{ {
pmd_val(pmd) |= _SEGMENT_ENTRY_WRITE; pmd = set_pmd_bit(pmd, __pgprot(_SEGMENT_ENTRY_WRITE));
if (pmd_val(pmd) & _SEGMENT_ENTRY_DIRTY) if (pmd_val(pmd) & _SEGMENT_ENTRY_DIRTY)
pmd_val(pmd) &= ~_SEGMENT_ENTRY_PROTECT; pmd = clear_pmd_bit(pmd, __pgprot(_SEGMENT_ENTRY_PROTECT));
return pmd; return pmd;
} }
static inline pmd_t pmd_mkclean(pmd_t pmd) static inline pmd_t pmd_mkclean(pmd_t pmd)
{ {
pmd_val(pmd) &= ~_SEGMENT_ENTRY_DIRTY; pmd = clear_pmd_bit(pmd, __pgprot(_SEGMENT_ENTRY_DIRTY));
pmd_val(pmd) |= _SEGMENT_ENTRY_PROTECT; return set_pmd_bit(pmd, __pgprot(_SEGMENT_ENTRY_PROTECT));
return pmd;
} }
static inline pmd_t pmd_mkdirty(pmd_t pmd) static inline pmd_t pmd_mkdirty(pmd_t pmd)
{ {
pmd_val(pmd) |= _SEGMENT_ENTRY_DIRTY | _SEGMENT_ENTRY_SOFT_DIRTY; pmd = set_pmd_bit(pmd, __pgprot(_SEGMENT_ENTRY_DIRTY | _SEGMENT_ENTRY_SOFT_DIRTY));
if (pmd_val(pmd) & _SEGMENT_ENTRY_WRITE) if (pmd_val(pmd) & _SEGMENT_ENTRY_WRITE)
pmd_val(pmd) &= ~_SEGMENT_ENTRY_PROTECT; pmd = clear_pmd_bit(pmd, __pgprot(_SEGMENT_ENTRY_PROTECT));
return pmd; return pmd;
} }
static inline pud_t pud_wrprotect(pud_t pud) static inline pud_t pud_wrprotect(pud_t pud)
{ {
pud_val(pud) &= ~_REGION3_ENTRY_WRITE; pud = clear_pud_bit(pud, __pgprot(_REGION3_ENTRY_WRITE));
pud_val(pud) |= _REGION_ENTRY_PROTECT; return set_pud_bit(pud, __pgprot(_REGION_ENTRY_PROTECT));
return pud;
} }
static inline pud_t pud_mkwrite(pud_t pud) static inline pud_t pud_mkwrite(pud_t pud)
{ {
pud_val(pud) |= _REGION3_ENTRY_WRITE; pud = set_pud_bit(pud, __pgprot(_REGION3_ENTRY_WRITE));
if (pud_val(pud) & _REGION3_ENTRY_DIRTY) if (pud_val(pud) & _REGION3_ENTRY_DIRTY)
pud_val(pud) &= ~_REGION_ENTRY_PROTECT; pud = clear_pud_bit(pud, __pgprot(_REGION_ENTRY_PROTECT));
return pud; return pud;
} }
static inline pud_t pud_mkclean(pud_t pud) static inline pud_t pud_mkclean(pud_t pud)
{ {
pud_val(pud) &= ~_REGION3_ENTRY_DIRTY; pud = clear_pud_bit(pud, __pgprot(_REGION3_ENTRY_DIRTY));
pud_val(pud) |= _REGION_ENTRY_PROTECT; return set_pud_bit(pud, __pgprot(_REGION_ENTRY_PROTECT));
return pud;
} }
static inline pud_t pud_mkdirty(pud_t pud) static inline pud_t pud_mkdirty(pud_t pud)
{ {
pud_val(pud) |= _REGION3_ENTRY_DIRTY | _REGION3_ENTRY_SOFT_DIRTY; pud = set_pud_bit(pud, __pgprot(_REGION3_ENTRY_DIRTY | _REGION3_ENTRY_SOFT_DIRTY));
if (pud_val(pud) & _REGION3_ENTRY_WRITE) if (pud_val(pud) & _REGION3_ENTRY_WRITE)
pud_val(pud) &= ~_REGION_ENTRY_PROTECT; pud = clear_pud_bit(pud, __pgprot(_REGION_ENTRY_PROTECT));
return pud; return pud;
} }
...@@ -1488,37 +1475,39 @@ static inline unsigned long massage_pgprot_pmd(pgprot_t pgprot) ...@@ -1488,37 +1475,39 @@ static inline unsigned long massage_pgprot_pmd(pgprot_t pgprot)
static inline pmd_t pmd_mkyoung(pmd_t pmd) static inline pmd_t pmd_mkyoung(pmd_t pmd)
{ {
pmd_val(pmd) |= _SEGMENT_ENTRY_YOUNG; pmd = set_pmd_bit(pmd, __pgprot(_SEGMENT_ENTRY_YOUNG));
if (pmd_val(pmd) & _SEGMENT_ENTRY_READ) if (pmd_val(pmd) & _SEGMENT_ENTRY_READ)
pmd_val(pmd) &= ~_SEGMENT_ENTRY_INVALID; pmd = clear_pmd_bit(pmd, __pgprot(_SEGMENT_ENTRY_INVALID));
return pmd; return pmd;
} }
static inline pmd_t pmd_mkold(pmd_t pmd) static inline pmd_t pmd_mkold(pmd_t pmd)
{ {
pmd_val(pmd) &= ~_SEGMENT_ENTRY_YOUNG; pmd = clear_pmd_bit(pmd, __pgprot(_SEGMENT_ENTRY_YOUNG));
pmd_val(pmd) |= _SEGMENT_ENTRY_INVALID; return set_pmd_bit(pmd, __pgprot(_SEGMENT_ENTRY_INVALID));
return pmd;
} }
static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot) static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
{ {
pmd_val(pmd) &= _SEGMENT_ENTRY_ORIGIN_LARGE | unsigned long mask;
_SEGMENT_ENTRY_DIRTY | _SEGMENT_ENTRY_YOUNG |
_SEGMENT_ENTRY_LARGE | _SEGMENT_ENTRY_SOFT_DIRTY; mask = _SEGMENT_ENTRY_ORIGIN_LARGE;
pmd_val(pmd) |= massage_pgprot_pmd(newprot); mask |= _SEGMENT_ENTRY_DIRTY;
mask |= _SEGMENT_ENTRY_YOUNG;
mask |= _SEGMENT_ENTRY_LARGE;
mask |= _SEGMENT_ENTRY_SOFT_DIRTY;
pmd = __pmd(pmd_val(pmd) & mask);
pmd = set_pmd_bit(pmd, __pgprot(massage_pgprot_pmd(newprot)));
if (!(pmd_val(pmd) & _SEGMENT_ENTRY_DIRTY)) if (!(pmd_val(pmd) & _SEGMENT_ENTRY_DIRTY))
pmd_val(pmd) |= _SEGMENT_ENTRY_PROTECT; pmd = set_pmd_bit(pmd, __pgprot(_SEGMENT_ENTRY_PROTECT));
if (!(pmd_val(pmd) & _SEGMENT_ENTRY_YOUNG)) if (!(pmd_val(pmd) & _SEGMENT_ENTRY_YOUNG))
pmd_val(pmd) |= _SEGMENT_ENTRY_INVALID; pmd = set_pmd_bit(pmd, __pgprot(_SEGMENT_ENTRY_INVALID));
return pmd; return pmd;
} }
static inline pmd_t mk_pmd_phys(unsigned long physpage, pgprot_t pgprot) static inline pmd_t mk_pmd_phys(unsigned long physpage, pgprot_t pgprot)
{ {
pmd_t __pmd; return __pmd(physpage + massage_pgprot_pmd(pgprot));
pmd_val(__pmd) = physpage + massage_pgprot_pmd(pgprot);
return __pmd;
} }
#endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLB_PAGE */ #endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLB_PAGE */
...@@ -1640,16 +1629,15 @@ static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr, ...@@ -1640,16 +1629,15 @@ static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr,
pmd_t *pmdp, pmd_t entry) pmd_t *pmdp, pmd_t entry)
{ {
if (!MACHINE_HAS_NX) if (!MACHINE_HAS_NX)
pmd_val(entry) &= ~_SEGMENT_ENTRY_NOEXEC; entry = clear_pmd_bit(entry, __pgprot(_SEGMENT_ENTRY_NOEXEC));
set_pmd(pmdp, entry); set_pmd(pmdp, entry);
} }
static inline pmd_t pmd_mkhuge(pmd_t pmd) static inline pmd_t pmd_mkhuge(pmd_t pmd)
{ {
pmd_val(pmd) |= _SEGMENT_ENTRY_LARGE; pmd = set_pmd_bit(pmd, __pgprot(_SEGMENT_ENTRY_LARGE));
pmd_val(pmd) |= _SEGMENT_ENTRY_YOUNG; pmd = set_pmd_bit(pmd, __pgprot(_SEGMENT_ENTRY_YOUNG));
pmd_val(pmd) |= _SEGMENT_ENTRY_PROTECT; return set_pmd_bit(pmd, __pgprot(_SEGMENT_ENTRY_PROTECT));
return pmd;
} }
#define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR #define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR
...@@ -1745,12 +1733,12 @@ static inline int has_transparent_hugepage(void) ...@@ -1745,12 +1733,12 @@ static inline int has_transparent_hugepage(void)
static inline pte_t mk_swap_pte(unsigned long type, unsigned long offset) static inline pte_t mk_swap_pte(unsigned long type, unsigned long offset)
{ {
pte_t pte; unsigned long pteval;
pte_val(pte) = _PAGE_INVALID | _PAGE_PROTECT; pteval = _PAGE_INVALID | _PAGE_PROTECT;
pte_val(pte) |= (offset & __SWP_OFFSET_MASK) << __SWP_OFFSET_SHIFT; pteval |= (offset & __SWP_OFFSET_MASK) << __SWP_OFFSET_SHIFT;
pte_val(pte) |= (type & __SWP_TYPE_MASK) << __SWP_TYPE_SHIFT; pteval |= (type & __SWP_TYPE_MASK) << __SWP_TYPE_SHIFT;
return pte; return __pte(pteval);
} }
static inline unsigned long __swp_type(swp_entry_t entry) static inline unsigned long __swp_type(swp_entry_t entry)
......
...@@ -224,8 +224,8 @@ static inline pgste_t pgste_set_pte(pte_t *ptep, pgste_t pgste, pte_t entry) ...@@ -224,8 +224,8 @@ static inline pgste_t pgste_set_pte(pte_t *ptep, pgste_t pgste, pte_t entry)
* Without enhanced suppression-on-protection force * Without enhanced suppression-on-protection force
* the dirty bit on for all writable ptes. * the dirty bit on for all writable ptes.
*/ */
pte_val(entry) |= _PAGE_DIRTY; entry = set_pte_bit(entry, __pgprot(_PAGE_DIRTY));
pte_val(entry) &= ~_PAGE_PROTECT; entry = clear_pte_bit(entry, __pgprot(_PAGE_PROTECT));
} }
if (!(pte_val(entry) & _PAGE_PROTECT)) if (!(pte_val(entry) & _PAGE_PROTECT))
/* This pte allows write access, set user-dirty */ /* This pte allows write access, set user-dirty */
...@@ -275,7 +275,7 @@ static inline pte_t ptep_xchg_commit(struct mm_struct *mm, ...@@ -275,7 +275,7 @@ static inline pte_t ptep_xchg_commit(struct mm_struct *mm,
pgste = pgste_update_all(old, pgste, mm); pgste = pgste_update_all(old, pgste, mm);
if ((pgste_val(pgste) & _PGSTE_GPS_USAGE_MASK) == if ((pgste_val(pgste) & _PGSTE_GPS_USAGE_MASK) ==
_PGSTE_GPS_USAGE_UNUSED) _PGSTE_GPS_USAGE_UNUSED)
pte_val(old) |= _PAGE_UNUSED; old = set_pte_bit(old, __pgprot(_PAGE_UNUSED));
} }
pgste = pgste_set_pte(ptep, pgste, new); pgste = pgste_set_pte(ptep, pgste, new);
pgste_set_unlock(ptep, pgste); pgste_set_unlock(ptep, pgste);
...@@ -345,7 +345,7 @@ void ptep_modify_prot_commit(struct vm_area_struct *vma, unsigned long addr, ...@@ -345,7 +345,7 @@ void ptep_modify_prot_commit(struct vm_area_struct *vma, unsigned long addr,
struct mm_struct *mm = vma->vm_mm; struct mm_struct *mm = vma->vm_mm;
if (!MACHINE_HAS_NX) if (!MACHINE_HAS_NX)
pte_val(pte) &= ~_PAGE_NOEXEC; pte = clear_pte_bit(pte, __pgprot(_PAGE_NOEXEC));
if (mm_has_pgste(mm)) { if (mm_has_pgste(mm)) {
pgste = pgste_get(ptep); pgste = pgste_get(ptep);
pgste_set_key(ptep, pgste, pte, mm); pgste_set_key(ptep, pgste, pte, mm);
...@@ -646,12 +646,12 @@ int ptep_force_prot(struct mm_struct *mm, unsigned long addr, ...@@ -646,12 +646,12 @@ int ptep_force_prot(struct mm_struct *mm, unsigned long addr,
if (prot == PROT_NONE && !pte_i) { if (prot == PROT_NONE && !pte_i) {
ptep_flush_direct(mm, addr, ptep, nodat); ptep_flush_direct(mm, addr, ptep, nodat);
pgste = pgste_update_all(entry, pgste, mm); pgste = pgste_update_all(entry, pgste, mm);
pte_val(entry) |= _PAGE_INVALID; entry = set_pte_bit(entry, __pgprot(_PAGE_INVALID));
} }
if (prot == PROT_READ && !pte_p) { if (prot == PROT_READ && !pte_p) {
ptep_flush_direct(mm, addr, ptep, nodat); ptep_flush_direct(mm, addr, ptep, nodat);
pte_val(entry) &= ~_PAGE_INVALID; entry = clear_pte_bit(entry, __pgprot(_PAGE_INVALID));
pte_val(entry) |= _PAGE_PROTECT; entry = set_pte_bit(entry, __pgprot(_PAGE_PROTECT));
} }
pgste_val(pgste) |= bit; pgste_val(pgste) |= bit;
pgste = pgste_set_pte(ptep, pgste, entry); pgste = pgste_set_pte(ptep, pgste, entry);
...@@ -675,8 +675,8 @@ int ptep_shadow_pte(struct mm_struct *mm, unsigned long saddr, ...@@ -675,8 +675,8 @@ int ptep_shadow_pte(struct mm_struct *mm, unsigned long saddr,
!(pte_val(pte) & _PAGE_PROTECT))) { !(pte_val(pte) & _PAGE_PROTECT))) {
pgste_val(spgste) |= PGSTE_VSIE_BIT; pgste_val(spgste) |= PGSTE_VSIE_BIT;
tpgste = pgste_get_lock(tptep); tpgste = pgste_get_lock(tptep);
pte_val(tpte) = (pte_val(spte) & PAGE_MASK) | tpte = __pte((pte_val(spte) & PAGE_MASK) |
(pte_val(pte) & _PAGE_PROTECT); (pte_val(pte) & _PAGE_PROTECT));
/* don't touch the storage key - it belongs to parent pgste */ /* don't touch the storage key - it belongs to parent pgste */
tpgste = pgste_set_pte(tptep, tpgste, tpte); tpgste = pgste_set_pte(tptep, tpgste, tpte);
pgste_set_unlock(tptep, tpgste); pgste_set_unlock(tptep, tpgste);
...@@ -773,9 +773,9 @@ bool ptep_test_and_clear_uc(struct mm_struct *mm, unsigned long addr, ...@@ -773,9 +773,9 @@ bool ptep_test_and_clear_uc(struct mm_struct *mm, unsigned long addr,
nodat = !!(pgste_val(pgste) & _PGSTE_GPS_NODAT); nodat = !!(pgste_val(pgste) & _PGSTE_GPS_NODAT);
ptep_ipte_global(mm, addr, ptep, nodat); ptep_ipte_global(mm, addr, ptep, nodat);
if (MACHINE_HAS_ESOP || !(pte_val(pte) & _PAGE_WRITE)) if (MACHINE_HAS_ESOP || !(pte_val(pte) & _PAGE_WRITE))
pte_val(pte) |= _PAGE_PROTECT; pte = set_pte_bit(pte, __pgprot(_PAGE_PROTECT));
else else
pte_val(pte) |= _PAGE_INVALID; pte = set_pte_bit(pte, __pgprot(_PAGE_INVALID));
set_pte(ptep, pte); set_pte(ptep, pte);
} }
pgste_set_unlock(ptep, pgste); pgste_set_unlock(ptep, pgste);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment