Commit a16382ce authored by Paul Mundt's avatar Paul Mundt

sh64: _PAGE_SPECIAL support.

Now that sh64 has grown extended page flag support we finally have a free
bit for _PAGE_SPECIAL. Wire it up.
Signed-off-by: default avatarPaul Mundt <lethal@linux-sh.org>
parent 48ccb2ce
...@@ -169,6 +169,8 @@ extern void page_table_range_init(unsigned long start, unsigned long end, ...@@ -169,6 +169,8 @@ extern void page_table_range_init(unsigned long start, unsigned long end,
#define HAVE_ARCH_UNMAPPED_AREA #define HAVE_ARCH_UNMAPPED_AREA
#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN #define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
#define __HAVE_ARCH_PTE_SPECIAL
#include <asm-generic/pgtable.h> #include <asm-generic/pgtable.h>
#endif /* __ASM_SH_PGTABLE_H */ #endif /* __ASM_SH_PGTABLE_H */
...@@ -378,8 +378,6 @@ PTE_BIT_FUNC(low, mkold, &= ~_PAGE_ACCESSED); ...@@ -378,8 +378,6 @@ PTE_BIT_FUNC(low, mkold, &= ~_PAGE_ACCESSED);
PTE_BIT_FUNC(low, mkyoung, |= _PAGE_ACCESSED); PTE_BIT_FUNC(low, mkyoung, |= _PAGE_ACCESSED);
PTE_BIT_FUNC(low, mkspecial, |= _PAGE_SPECIAL); PTE_BIT_FUNC(low, mkspecial, |= _PAGE_SPECIAL);
#define __HAVE_ARCH_PTE_SPECIAL
/* /*
* Macro and implementation to make a page protection as uncachable. * Macro and implementation to make a page protection as uncachable.
*/ */
......
...@@ -132,6 +132,7 @@ static __inline__ void set_pte(pte_t *pteptr, pte_t pteval) ...@@ -132,6 +132,7 @@ static __inline__ void set_pte(pte_t *pteptr, pte_t pteval)
* anything above the PPN field. * anything above the PPN field.
*/ */
#define _PAGE_WIRED _PAGE_EXT(0x001) /* software: wire the tlb entry */ #define _PAGE_WIRED _PAGE_EXT(0x001) /* software: wire the tlb entry */
#define _PAGE_SPECIAL _PAGE_EXT(0x002)
#define _PAGE_CLEAR_FLAGS (_PAGE_PRESENT | _PAGE_FILE | _PAGE_SHARED | \ #define _PAGE_CLEAR_FLAGS (_PAGE_PRESENT | _PAGE_FILE | _PAGE_SHARED | \
_PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_WIRED) _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_WIRED)
...@@ -175,7 +176,8 @@ static __inline__ void set_pte(pte_t *pteptr, pte_t pteval) ...@@ -175,7 +176,8 @@ static __inline__ void set_pte(pte_t *pteptr, pte_t pteval)
/* Default flags for a User page */ /* Default flags for a User page */
#define _PAGE_TABLE (_KERNPG_TABLE | _PAGE_USER) #define _PAGE_TABLE (_KERNPG_TABLE | _PAGE_USER)
#define _PAGE_CHG_MASK (PTE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY) #define _PAGE_CHG_MASK (PTE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY | \
_PAGE_SPECIAL)
/* /*
* We have full permissions (Read/Write/Execute/Shared). * We have full permissions (Read/Write/Execute/Shared).
...@@ -265,7 +267,7 @@ static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY; } ...@@ -265,7 +267,7 @@ static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY; }
static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED; } static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED; }
static inline int pte_file(pte_t pte) { return pte_val(pte) & _PAGE_FILE; } static inline int pte_file(pte_t pte) { return pte_val(pte) & _PAGE_FILE; }
static inline int pte_write(pte_t pte) { return pte_val(pte) & _PAGE_WRITE; } static inline int pte_write(pte_t pte) { return pte_val(pte) & _PAGE_WRITE; }
static inline int pte_special(pte_t pte){ return 0; } static inline int pte_special(pte_t pte){ return pte_val(pte) & _PAGE_SPECIAL; }
static inline pte_t pte_wrprotect(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) & ~_PAGE_WRITE)); return pte; } static inline pte_t pte_wrprotect(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) & ~_PAGE_WRITE)); return pte; }
static inline pte_t pte_mkclean(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) & ~_PAGE_DIRTY)); return pte; } static inline pte_t pte_mkclean(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) & ~_PAGE_DIRTY)); return pte; }
...@@ -274,8 +276,7 @@ static inline pte_t pte_mkwrite(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) | ...@@ -274,8 +276,7 @@ static inline pte_t pte_mkwrite(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) |
static inline pte_t pte_mkdirty(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) | _PAGE_DIRTY)); return pte; } static inline pte_t pte_mkdirty(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) | _PAGE_DIRTY)); return pte; }
static inline pte_t pte_mkyoung(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) | _PAGE_ACCESSED)); return pte; } static inline pte_t pte_mkyoung(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) | _PAGE_ACCESSED)); return pte; }
static inline pte_t pte_mkhuge(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) | _PAGE_SZHUGE)); return pte; } static inline pte_t pte_mkhuge(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) | _PAGE_SZHUGE)); return pte; }
static inline pte_t pte_mkspecial(pte_t pte) { return pte; } static inline pte_t pte_mkspecial(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) | _PAGE_SPECIAL)); return pte; }
/* /*
* Conversion functions: convert a page and protection to a page entry. * Conversion functions: convert a page and protection to a page entry.
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment