Commit 41e0d491 authored by David Hildenbrand's avatar David Hildenbrand Committed by Andrew Morton

csky/mm: support __HAVE_ARCH_PTE_SWP_EXCLUSIVE

Let's support __HAVE_ARCH_PTE_SWP_EXCLUSIVE by stealing one bit from the
offset.  This reduces the maximum swap space per file to 16 GiB (was 32
GiB).

We might actually be able to reuse one of the other software bits
(_PAGE_READ / PAGE_WRITE) instead, because we only have to keep
pte_present(), pte_none() and HW happy.  For now, let's keep it simple
because there might be something non-obvious.

Link: https://lkml.kernel.org/r/20230113171026.582290-6-david@redhat.comSigned-off-by: default avatarDavid Hildenbrand <david@redhat.com>
Cc: Guo Ren <guoren@kernel.org>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
parent 20aae9ef
...@@ -10,6 +10,9 @@ ...@@ -10,6 +10,9 @@
#define _PAGE_ACCESSED (1<<3) #define _PAGE_ACCESSED (1<<3)
#define _PAGE_MODIFIED (1<<4) #define _PAGE_MODIFIED (1<<4)
/* We borrow bit 9 to store the exclusive marker in swap PTEs. */
#define _PAGE_SWP_EXCLUSIVE (1<<9)
/* implemented in hardware */ /* implemented in hardware */
#define _PAGE_GLOBAL (1<<6) #define _PAGE_GLOBAL (1<<6)
#define _PAGE_VALID (1<<7) #define _PAGE_VALID (1<<7)
...@@ -26,7 +29,8 @@ ...@@ -26,7 +29,8 @@
#define _PAGE_PROT_NONE _PAGE_READ #define _PAGE_PROT_NONE _PAGE_READ
/* /*
* Encode and decode a swap entry * Encode/decode swap entries and swap PTEs. Swap PTEs are all PTEs that
* are !pte_none() && !pte_present().
* *
* Format of swap PTE: * Format of swap PTE:
* bit 0: _PAGE_PRESENT (zero) * bit 0: _PAGE_PRESENT (zero)
...@@ -35,15 +39,16 @@ ...@@ -35,15 +39,16 @@
* bit 6: _PAGE_GLOBAL (zero) * bit 6: _PAGE_GLOBAL (zero)
* bit 7: _PAGE_VALID (zero) * bit 7: _PAGE_VALID (zero)
* bit 8: swap type[4] * bit 8: swap type[4]
* bit 9 - 31: swap offset * bit 9: exclusive marker
* bit 10 - 31: swap offset
*/ */
#define __swp_type(x) ((((x).val >> 2) & 0xf) | \ #define __swp_type(x) ((((x).val >> 2) & 0xf) | \
(((x).val >> 4) & 0x10)) (((x).val >> 4) & 0x10))
#define __swp_offset(x) ((x).val >> 9) #define __swp_offset(x) ((x).val >> 10)
#define __swp_entry(type, offset) ((swp_entry_t) { \ #define __swp_entry(type, offset) ((swp_entry_t) { \
((type & 0xf) << 2) | \ ((type & 0xf) << 2) | \
((type & 0x10) << 4) | \ ((type & 0x10) << 4) | \
((offset) << 9)}) ((offset) << 10)})
#define HAVE_ARCH_UNMAPPED_AREA #define HAVE_ARCH_UNMAPPED_AREA
......
...@@ -10,6 +10,9 @@ ...@@ -10,6 +10,9 @@
#define _PAGE_PRESENT (1<<10) #define _PAGE_PRESENT (1<<10)
#define _PAGE_MODIFIED (1<<11) #define _PAGE_MODIFIED (1<<11)
/* We borrow bit 7 to store the exclusive marker in swap PTEs. */
#define _PAGE_SWP_EXCLUSIVE (1<<7)
/* implemented in hardware */ /* implemented in hardware */
#define _PAGE_GLOBAL (1<<0) #define _PAGE_GLOBAL (1<<0)
#define _PAGE_VALID (1<<1) #define _PAGE_VALID (1<<1)
...@@ -26,23 +29,25 @@ ...@@ -26,23 +29,25 @@
#define _PAGE_PROT_NONE _PAGE_WRITE #define _PAGE_PROT_NONE _PAGE_WRITE
/* /*
* Encode and decode a swap entry * Encode/decode swap entries and swap PTEs. Swap PTEs are all PTEs that
* are !pte_none() && !pte_present().
* *
* Format of swap PTE: * Format of swap PTE:
* bit 0: _PAGE_GLOBAL (zero) * bit 0: _PAGE_GLOBAL (zero)
* bit 1: _PAGE_VALID (zero) * bit 1: _PAGE_VALID (zero)
* bit 2 - 6: swap type * bit 2 - 6: swap type
* bit 7 - 8: swap offset[0 - 1] * bit 7: exclusive marker
* bit 8: swap offset[0]
* bit 9: _PAGE_WRITE (zero) * bit 9: _PAGE_WRITE (zero)
* bit 10: _PAGE_PRESENT (zero) * bit 10: _PAGE_PRESENT (zero)
* bit 11 - 31: swap offset[2 - 22] * bit 11 - 31: swap offset[1 - 21]
*/ */
#define __swp_type(x) (((x).val >> 2) & 0x1f) #define __swp_type(x) (((x).val >> 2) & 0x1f)
#define __swp_offset(x) ((((x).val >> 7) & 0x3) | \ #define __swp_offset(x) ((((x).val >> 8) & 0x1) | \
(((x).val >> 9) & 0x7ffffc)) (((x).val >> 10) & 0x3ffffe))
#define __swp_entry(type, offset) ((swp_entry_t) { \ #define __swp_entry(type, offset) ((swp_entry_t) { \
((type & 0x1f) << 2) | \ ((type & 0x1f) << 2) | \
((offset & 0x3) << 7) | \ ((offset & 0x1) << 8) | \
((offset & 0x7ffffc) << 9)}) ((offset & 0x3ffffe) << 10)})
#endif /* __ASM_CSKY_PGTABLE_BITS_H */ #endif /* __ASM_CSKY_PGTABLE_BITS_H */
...@@ -200,6 +200,24 @@ static inline pte_t pte_mkyoung(pte_t pte) ...@@ -200,6 +200,24 @@ static inline pte_t pte_mkyoung(pte_t pte)
return pte; return pte;
} }
#define __HAVE_ARCH_PTE_SWP_EXCLUSIVE
static inline int pte_swp_exclusive(pte_t pte)
{
return pte_val(pte) & _PAGE_SWP_EXCLUSIVE;
}
static inline pte_t pte_swp_mkexclusive(pte_t pte)
{
pte_val(pte) |= _PAGE_SWP_EXCLUSIVE;
return pte;
}
static inline pte_t pte_swp_clear_exclusive(pte_t pte)
{
pte_val(pte) &= ~_PAGE_SWP_EXCLUSIVE;
return pte;
}
#define __HAVE_PHYS_MEM_ACCESS_PROT #define __HAVE_PHYS_MEM_ACCESS_PROT
struct file; struct file;
extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn, extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment