Commit 950fe885 authored by David Hildenbrand's avatar David Hildenbrand Committed by Andrew Morton

mm: remove __HAVE_ARCH_PTE_SWP_EXCLUSIVE

__HAVE_ARCH_PTE_SWP_EXCLUSIVE is now supported by all architectures that
support swp PTEs, so let's drop it.

Link: https://lkml.kernel.org/r/20230113171026.582290-27-david@redhat.comSigned-off-by: default avatarDavid Hildenbrand <david@redhat.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
parent f5c3fe30
...@@ -328,7 +328,6 @@ extern inline pte_t mk_swap_pte(unsigned long type, unsigned long offset) ...@@ -328,7 +328,6 @@ extern inline pte_t mk_swap_pte(unsigned long type, unsigned long offset)
#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) }) #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
#define __swp_entry_to_pte(x) ((pte_t) { (x).val }) #define __swp_entry_to_pte(x) ((pte_t) { (x).val })
#define __HAVE_ARCH_PTE_SWP_EXCLUSIVE
static inline int pte_swp_exclusive(pte_t pte) static inline int pte_swp_exclusive(pte_t pte)
{ {
return pte_val(pte) & _PAGE_SWP_EXCLUSIVE; return pte_val(pte) & _PAGE_SWP_EXCLUSIVE;
......
...@@ -132,7 +132,6 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, ...@@ -132,7 +132,6 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long address,
#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) }) #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
#define __swp_entry_to_pte(x) ((pte_t) { (x).val }) #define __swp_entry_to_pte(x) ((pte_t) { (x).val })
#define __HAVE_ARCH_PTE_SWP_EXCLUSIVE
static inline int pte_swp_exclusive(pte_t pte) static inline int pte_swp_exclusive(pte_t pte)
{ {
return pte_val(pte) & _PAGE_SWP_EXCLUSIVE; return pte_val(pte) & _PAGE_SWP_EXCLUSIVE;
......
...@@ -298,7 +298,6 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) ...@@ -298,7 +298,6 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) }) #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
#define __swp_entry_to_pte(swp) __pte((swp).val) #define __swp_entry_to_pte(swp) __pte((swp).val)
#define __HAVE_ARCH_PTE_SWP_EXCLUSIVE
static inline int pte_swp_exclusive(pte_t pte) static inline int pte_swp_exclusive(pte_t pte)
{ {
return pte_isset(pte, L_PTE_SWP_EXCLUSIVE); return pte_isset(pte, L_PTE_SWP_EXCLUSIVE);
......
...@@ -417,7 +417,6 @@ static inline pgprot_t mk_pmd_sect_prot(pgprot_t prot) ...@@ -417,7 +417,6 @@ static inline pgprot_t mk_pmd_sect_prot(pgprot_t prot)
return __pgprot((pgprot_val(prot) & ~PMD_TABLE_BIT) | PMD_TYPE_SECT); return __pgprot((pgprot_val(prot) & ~PMD_TABLE_BIT) | PMD_TYPE_SECT);
} }
#define __HAVE_ARCH_PTE_SWP_EXCLUSIVE
static inline pte_t pte_swp_mkexclusive(pte_t pte) static inline pte_t pte_swp_mkexclusive(pte_t pte)
{ {
return set_pte_bit(pte, __pgprot(PTE_SWP_EXCLUSIVE)); return set_pte_bit(pte, __pgprot(PTE_SWP_EXCLUSIVE));
......
...@@ -200,7 +200,6 @@ static inline pte_t pte_mkyoung(pte_t pte) ...@@ -200,7 +200,6 @@ static inline pte_t pte_mkyoung(pte_t pte)
return pte; return pte;
} }
#define __HAVE_ARCH_PTE_SWP_EXCLUSIVE
static inline int pte_swp_exclusive(pte_t pte) static inline int pte_swp_exclusive(pte_t pte)
{ {
return pte_val(pte) & _PAGE_SWP_EXCLUSIVE; return pte_val(pte) & _PAGE_SWP_EXCLUSIVE;
......
...@@ -397,7 +397,6 @@ static inline unsigned long pmd_page_vaddr(pmd_t pmd) ...@@ -397,7 +397,6 @@ static inline unsigned long pmd_page_vaddr(pmd_t pmd)
(((type & 0x1f) << 1) | \ (((type & 0x1f) << 1) | \
((offset & 0x3ffff8) << 10) | ((offset & 0x7) << 7)) }) ((offset & 0x3ffff8) << 10) | ((offset & 0x7) << 7)) })
#define __HAVE_ARCH_PTE_SWP_EXCLUSIVE
static inline int pte_swp_exclusive(pte_t pte) static inline int pte_swp_exclusive(pte_t pte)
{ {
return pte_val(pte) & _PAGE_SWP_EXCLUSIVE; return pte_val(pte) & _PAGE_SWP_EXCLUSIVE;
......
...@@ -424,7 +424,6 @@ extern void paging_init (void); ...@@ -424,7 +424,6 @@ extern void paging_init (void);
#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) }) #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
#define __swp_entry_to_pte(x) ((pte_t) { (x).val }) #define __swp_entry_to_pte(x) ((pte_t) { (x).val })
#define __HAVE_ARCH_PTE_SWP_EXCLUSIVE
static inline int pte_swp_exclusive(pte_t pte) static inline int pte_swp_exclusive(pte_t pte)
{ {
return pte_val(pte) & _PAGE_SWP_EXCLUSIVE; return pte_val(pte) & _PAGE_SWP_EXCLUSIVE;
......
...@@ -276,7 +276,6 @@ static inline pte_t mk_swap_pte(unsigned long type, unsigned long offset) ...@@ -276,7 +276,6 @@ static inline pte_t mk_swap_pte(unsigned long type, unsigned long offset)
#define __pmd_to_swp_entry(pmd) ((swp_entry_t) { pmd_val(pmd) }) #define __pmd_to_swp_entry(pmd) ((swp_entry_t) { pmd_val(pmd) })
#define __swp_entry_to_pmd(x) ((pmd_t) { (x).val | _PAGE_HUGE }) #define __swp_entry_to_pmd(x) ((pmd_t) { (x).val | _PAGE_HUGE })
#define __HAVE_ARCH_PTE_SWP_EXCLUSIVE
static inline int pte_swp_exclusive(pte_t pte) static inline int pte_swp_exclusive(pte_t pte)
{ {
return pte_val(pte) & _PAGE_SWP_EXCLUSIVE; return pte_val(pte) & _PAGE_SWP_EXCLUSIVE;
......
...@@ -275,7 +275,6 @@ extern pgd_t kernel_pg_dir[PTRS_PER_PGD]; ...@@ -275,7 +275,6 @@ extern pgd_t kernel_pg_dir[PTRS_PER_PGD];
#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) }) #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
#define __swp_entry_to_pte(x) (__pte((x).val)) #define __swp_entry_to_pte(x) (__pte((x).val))
#define __HAVE_ARCH_PTE_SWP_EXCLUSIVE
static inline int pte_swp_exclusive(pte_t pte) static inline int pte_swp_exclusive(pte_t pte)
{ {
return pte_val(pte) & _PAGE_SWP_EXCLUSIVE; return pte_val(pte) & _PAGE_SWP_EXCLUSIVE;
......
...@@ -190,7 +190,6 @@ extern pgd_t kernel_pg_dir[128]; ...@@ -190,7 +190,6 @@ extern pgd_t kernel_pg_dir[128];
#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) }) #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
#define __swp_entry_to_pte(x) ((pte_t) { (x).val }) #define __swp_entry_to_pte(x) ((pte_t) { (x).val })
#define __HAVE_ARCH_PTE_SWP_EXCLUSIVE
static inline int pte_swp_exclusive(pte_t pte) static inline int pte_swp_exclusive(pte_t pte)
{ {
return pte_val(pte) & _PAGE_SWP_EXCLUSIVE; return pte_val(pte) & _PAGE_SWP_EXCLUSIVE;
......
...@@ -174,7 +174,6 @@ extern pgd_t kernel_pg_dir[PTRS_PER_PGD]; ...@@ -174,7 +174,6 @@ extern pgd_t kernel_pg_dir[PTRS_PER_PGD];
#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) }) #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
#define __swp_entry_to_pte(x) ((pte_t) { (x).val }) #define __swp_entry_to_pte(x) ((pte_t) { (x).val })
#define __HAVE_ARCH_PTE_SWP_EXCLUSIVE
static inline int pte_swp_exclusive(pte_t pte) static inline int pte_swp_exclusive(pte_t pte)
{ {
return pte_val(pte) & _PAGE_SWP_EXCLUSIVE; return pte_val(pte) & _PAGE_SWP_EXCLUSIVE;
......
...@@ -412,7 +412,6 @@ extern pgd_t swapper_pg_dir[PTRS_PER_PGD]; ...@@ -412,7 +412,6 @@ extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) >> 2 }) #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) >> 2 })
#define __swp_entry_to_pte(x) ((pte_t) { (x).val << 2 }) #define __swp_entry_to_pte(x) ((pte_t) { (x).val << 2 })
#define __HAVE_ARCH_PTE_SWP_EXCLUSIVE
static inline int pte_swp_exclusive(pte_t pte) static inline int pte_swp_exclusive(pte_t pte)
{ {
return pte_val(pte) & _PAGE_SWP_EXCLUSIVE; return pte_val(pte) & _PAGE_SWP_EXCLUSIVE;
......
...@@ -528,7 +528,6 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) ...@@ -528,7 +528,6 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
} }
#endif #endif
#define __HAVE_ARCH_PTE_SWP_EXCLUSIVE
#if defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32) #if defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32)
static inline int pte_swp_exclusive(pte_t pte) static inline int pte_swp_exclusive(pte_t pte)
{ {
......
...@@ -253,7 +253,6 @@ static inline unsigned long pmd_page_vaddr(pmd_t pmd) ...@@ -253,7 +253,6 @@ static inline unsigned long pmd_page_vaddr(pmd_t pmd)
#define __swp_entry_to_pte(swp) ((pte_t) { (swp).val }) #define __swp_entry_to_pte(swp) ((pte_t) { (swp).val })
#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) }) #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
#define __HAVE_ARCH_PTE_SWP_EXCLUSIVE
static inline int pte_swp_exclusive(pte_t pte) static inline int pte_swp_exclusive(pte_t pte)
{ {
return pte_val(pte) & _PAGE_SWP_EXCLUSIVE; return pte_val(pte) & _PAGE_SWP_EXCLUSIVE;
......
...@@ -408,7 +408,6 @@ static inline void update_mmu_cache(struct vm_area_struct *vma, ...@@ -408,7 +408,6 @@ static inline void update_mmu_cache(struct vm_area_struct *vma,
#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) }) #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
#define __swp_entry_to_pte(x) ((pte_t) { (x).val }) #define __swp_entry_to_pte(x) ((pte_t) { (x).val })
#define __HAVE_ARCH_PTE_SWP_EXCLUSIVE
static inline int pte_swp_exclusive(pte_t pte) static inline int pte_swp_exclusive(pte_t pte)
{ {
return pte_val(pte) & _PAGE_SWP_EXCLUSIVE; return pte_val(pte) & _PAGE_SWP_EXCLUSIVE;
......
...@@ -422,7 +422,6 @@ extern void paging_init (void); ...@@ -422,7 +422,6 @@ extern void paging_init (void);
#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) }) #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
#define __swp_entry_to_pte(x) ((pte_t) { (x).val }) #define __swp_entry_to_pte(x) ((pte_t) { (x).val })
#define __HAVE_ARCH_PTE_SWP_EXCLUSIVE
static inline int pte_swp_exclusive(pte_t pte) static inline int pte_swp_exclusive(pte_t pte)
{ {
return pte_val(pte) & _PAGE_SWP_EXCLUSIVE; return pte_val(pte) & _PAGE_SWP_EXCLUSIVE;
......
...@@ -386,7 +386,6 @@ static inline void __ptep_set_access_flags(struct vm_area_struct *vma, ...@@ -386,7 +386,6 @@ static inline void __ptep_set_access_flags(struct vm_area_struct *vma,
#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) >> 3 }) #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) >> 3 })
#define __swp_entry_to_pte(x) ((pte_t) { (x).val << 3 }) #define __swp_entry_to_pte(x) ((pte_t) { (x).val << 3 })
#define __HAVE_ARCH_PTE_SWP_EXCLUSIVE
static inline int pte_swp_exclusive(pte_t pte) static inline int pte_swp_exclusive(pte_t pte)
{ {
return pte_val(pte) & _PAGE_SWP_EXCLUSIVE; return pte_val(pte) & _PAGE_SWP_EXCLUSIVE;
......
...@@ -717,7 +717,6 @@ static inline pte_t pte_swp_clear_soft_dirty(pte_t pte) ...@@ -717,7 +717,6 @@ static inline pte_t pte_swp_clear_soft_dirty(pte_t pte)
} }
#endif /* CONFIG_HAVE_ARCH_SOFT_DIRTY */ #endif /* CONFIG_HAVE_ARCH_SOFT_DIRTY */
#define __HAVE_ARCH_PTE_SWP_EXCLUSIVE
static inline pte_t pte_swp_mkexclusive(pte_t pte) static inline pte_t pte_swp_mkexclusive(pte_t pte)
{ {
return __pte_raw(pte_raw(pte) | cpu_to_be64(_PAGE_SWP_EXCLUSIVE)); return __pte_raw(pte_raw(pte) | cpu_to_be64(_PAGE_SWP_EXCLUSIVE));
......
...@@ -151,7 +151,6 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) ...@@ -151,7 +151,6 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
return __pte((pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot)); return __pte((pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot));
} }
#define __HAVE_ARCH_PTE_SWP_EXCLUSIVE
static inline int pte_swp_exclusive(pte_t pte) static inline int pte_swp_exclusive(pte_t pte)
{ {
return pte_val(pte) & _PAGE_SWP_EXCLUSIVE; return pte_val(pte) & _PAGE_SWP_EXCLUSIVE;
......
...@@ -752,7 +752,6 @@ static inline pmd_t pmdp_establish(struct vm_area_struct *vma, ...@@ -752,7 +752,6 @@ static inline pmd_t pmdp_establish(struct vm_area_struct *vma,
#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) }) #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
#define __swp_entry_to_pte(x) ((pte_t) { (x).val }) #define __swp_entry_to_pte(x) ((pte_t) { (x).val })
#define __HAVE_ARCH_PTE_SWP_EXCLUSIVE
static inline int pte_swp_exclusive(pte_t pte) static inline int pte_swp_exclusive(pte_t pte)
{ {
return pte_val(pte) & _PAGE_SWP_EXCLUSIVE; return pte_val(pte) & _PAGE_SWP_EXCLUSIVE;
......
...@@ -812,7 +812,6 @@ static inline int pmd_protnone(pmd_t pmd) ...@@ -812,7 +812,6 @@ static inline int pmd_protnone(pmd_t pmd)
} }
#endif #endif
#define __HAVE_ARCH_PTE_SWP_EXCLUSIVE
static inline int pte_swp_exclusive(pte_t pte) static inline int pte_swp_exclusive(pte_t pte)
{ {
return pte_val(pte) & _PAGE_SWP_EXCLUSIVE; return pte_val(pte) & _PAGE_SWP_EXCLUSIVE;
......
...@@ -479,7 +479,6 @@ static inline unsigned long pmd_page_vaddr(pmd_t pmd) ...@@ -479,7 +479,6 @@ static inline unsigned long pmd_page_vaddr(pmd_t pmd)
/* In both cases, we borrow bit 6 to store the exclusive marker in swap PTEs. */ /* In both cases, we borrow bit 6 to store the exclusive marker in swap PTEs. */
#define _PAGE_SWP_EXCLUSIVE _PAGE_USER #define _PAGE_SWP_EXCLUSIVE _PAGE_USER
#define __HAVE_ARCH_PTE_SWP_EXCLUSIVE
static inline int pte_swp_exclusive(pte_t pte) static inline int pte_swp_exclusive(pte_t pte)
{ {
return pte.pte_low & _PAGE_SWP_EXCLUSIVE; return pte.pte_low & _PAGE_SWP_EXCLUSIVE;
......
...@@ -353,7 +353,6 @@ static inline swp_entry_t __swp_entry(unsigned long type, unsigned long offset) ...@@ -353,7 +353,6 @@ static inline swp_entry_t __swp_entry(unsigned long type, unsigned long offset)
#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) }) #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
#define __swp_entry_to_pte(x) ((pte_t) { (x).val }) #define __swp_entry_to_pte(x) ((pte_t) { (x).val })
#define __HAVE_ARCH_PTE_SWP_EXCLUSIVE
static inline int pte_swp_exclusive(pte_t pte) static inline int pte_swp_exclusive(pte_t pte)
{ {
return pte_val(pte) & SRMMU_SWP_EXCLUSIVE; return pte_val(pte) & SRMMU_SWP_EXCLUSIVE;
......
...@@ -989,7 +989,6 @@ pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp); ...@@ -989,7 +989,6 @@ pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp);
#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) }) #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
#define __swp_entry_to_pte(x) ((pte_t) { (x).val }) #define __swp_entry_to_pte(x) ((pte_t) { (x).val })
#define __HAVE_ARCH_PTE_SWP_EXCLUSIVE
static inline int pte_swp_exclusive(pte_t pte) static inline int pte_swp_exclusive(pte_t pte)
{ {
return pte_val(pte) & _PAGE_SWP_EXCLUSIVE; return pte_val(pte) & _PAGE_SWP_EXCLUSIVE;
......
...@@ -313,7 +313,6 @@ extern pte_t *virt_to_pte(struct mm_struct *mm, unsigned long addr); ...@@ -313,7 +313,6 @@ extern pte_t *virt_to_pte(struct mm_struct *mm, unsigned long addr);
((swp_entry_t) { pte_val(pte_mkuptodate(pte)) }) ((swp_entry_t) { pte_val(pte_mkuptodate(pte)) })
#define __swp_entry_to_pte(x) ((pte_t) { (x).val }) #define __swp_entry_to_pte(x) ((pte_t) { (x).val })
#define __HAVE_ARCH_PTE_SWP_EXCLUSIVE
static inline int pte_swp_exclusive(pte_t pte) static inline int pte_swp_exclusive(pte_t pte)
{ {
return pte_get_bits(pte, _PAGE_SWP_EXCLUSIVE); return pte_get_bits(pte, _PAGE_SWP_EXCLUSIVE);
......
...@@ -1299,7 +1299,6 @@ static inline void update_mmu_cache_pud(struct vm_area_struct *vma, ...@@ -1299,7 +1299,6 @@ static inline void update_mmu_cache_pud(struct vm_area_struct *vma,
unsigned long addr, pud_t *pud) unsigned long addr, pud_t *pud)
{ {
} }
#define __HAVE_ARCH_PTE_SWP_EXCLUSIVE
static inline pte_t pte_swp_mkexclusive(pte_t pte) static inline pte_t pte_swp_mkexclusive(pte_t pte)
{ {
return pte_set_flags(pte, _PAGE_SWP_EXCLUSIVE); return pte_set_flags(pte, _PAGE_SWP_EXCLUSIVE);
......
...@@ -360,7 +360,6 @@ ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep) ...@@ -360,7 +360,6 @@ ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) }) #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
#define __swp_entry_to_pte(x) ((pte_t) { (x).val }) #define __swp_entry_to_pte(x) ((pte_t) { (x).val })
#define __HAVE_ARCH_PTE_SWP_EXCLUSIVE
static inline int pte_swp_exclusive(pte_t pte) static inline int pte_swp_exclusive(pte_t pte)
{ {
return pte_val(pte) & _PAGE_SWP_EXCLUSIVE; return pte_val(pte) & _PAGE_SWP_EXCLUSIVE;
......
...@@ -1064,35 +1064,6 @@ static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot) ...@@ -1064,35 +1064,6 @@ static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot)
#define arch_start_context_switch(prev) do {} while (0) #define arch_start_context_switch(prev) do {} while (0)
#endif #endif
/*
* When replacing an anonymous page by a real (!non) swap entry, we clear
* PG_anon_exclusive from the page and instead remember whether the flag was
* set in the swp pte. During fork(), we have to mark the entry as !exclusive
* (possibly shared). On swapin, we use that information to restore
* PG_anon_exclusive, which is very helpful in cases where we might have
* additional (e.g., FOLL_GET) references on a page and wouldn't be able to
* detect exclusivity.
*
* These functions don't apply to non-swap entries (e.g., migration, hwpoison,
* ...).
*/
#ifndef __HAVE_ARCH_PTE_SWP_EXCLUSIVE
static inline pte_t pte_swp_mkexclusive(pte_t pte)
{
return pte;
}
static inline int pte_swp_exclusive(pte_t pte)
{
return false;
}
static inline pte_t pte_swp_clear_exclusive(pte_t pte)
{
return pte;
}
#endif
#ifdef CONFIG_HAVE_ARCH_SOFT_DIRTY #ifdef CONFIG_HAVE_ARCH_SOFT_DIRTY
#ifndef CONFIG_ARCH_ENABLE_THP_MIGRATION #ifndef CONFIG_ARCH_ENABLE_THP_MIGRATION
static inline pmd_t pmd_swp_mksoft_dirty(pmd_t pmd) static inline pmd_t pmd_swp_mksoft_dirty(pmd_t pmd)
......
...@@ -810,7 +810,6 @@ static void __init pmd_swap_soft_dirty_tests(struct pgtable_debug_args *args) { ...@@ -810,7 +810,6 @@ static void __init pmd_swap_soft_dirty_tests(struct pgtable_debug_args *args) {
static void __init pte_swap_exclusive_tests(struct pgtable_debug_args *args) static void __init pte_swap_exclusive_tests(struct pgtable_debug_args *args)
{ {
#ifdef __HAVE_ARCH_PTE_SWP_EXCLUSIVE
unsigned long max_swap_offset; unsigned long max_swap_offset;
swp_entry_t entry, entry2; swp_entry_t entry, entry2;
pte_t pte; pte_t pte;
...@@ -841,7 +840,6 @@ static void __init pte_swap_exclusive_tests(struct pgtable_debug_args *args) ...@@ -841,7 +840,6 @@ static void __init pte_swap_exclusive_tests(struct pgtable_debug_args *args)
WARN_ON(!is_swap_pte(pte)); WARN_ON(!is_swap_pte(pte));
entry2 = pte_to_swp_entry(pte); entry2 = pte_to_swp_entry(pte);
WARN_ON(memcmp(&entry, &entry2, sizeof(entry))); WARN_ON(memcmp(&entry, &entry2, sizeof(entry)));
#endif /* __HAVE_ARCH_PTE_SWP_EXCLUSIVE */
} }
static void __init pte_swap_tests(struct pgtable_debug_args *args) static void __init pte_swap_tests(struct pgtable_debug_args *args)
......
...@@ -3864,10 +3864,6 @@ vm_fault_t do_swap_page(struct vm_fault *vmf) ...@@ -3864,10 +3864,6 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
* the swap entry concurrently) for certainly exclusive pages. * the swap entry concurrently) for certainly exclusive pages.
*/ */
if (!folio_test_ksm(folio)) { if (!folio_test_ksm(folio)) {
/*
* Note that pte_swp_exclusive() == false for architectures
* without __HAVE_ARCH_PTE_SWP_EXCLUSIVE.
*/
exclusive = pte_swp_exclusive(vmf->orig_pte); exclusive = pte_swp_exclusive(vmf->orig_pte);
if (folio != swapcache) { if (folio != swapcache) {
/* /*
......
...@@ -1710,17 +1710,6 @@ static bool try_to_unmap_one(struct folio *folio, struct vm_area_struct *vma, ...@@ -1710,17 +1710,6 @@ static bool try_to_unmap_one(struct folio *folio, struct vm_area_struct *vma,
page_vma_mapped_walk_done(&pvmw); page_vma_mapped_walk_done(&pvmw);
break; break;
} }
/*
* Note: We *don't* remember if the page was mapped
* exclusively in the swap pte if the architecture
* doesn't support __HAVE_ARCH_PTE_SWP_EXCLUSIVE. In
* that case, swapin code has to re-determine that
* manually and might detect the page as possibly
* shared, for example, if there are other references on
* the page or if the page is under writeback. We made
* sure that there are no GUP pins on the page that
* would rely on it, so for GUP pins this is fine.
*/
if (list_empty(&mm->mmlist)) { if (list_empty(&mm->mmlist)) {
spin_lock(&mmlist_lock); spin_lock(&mmlist_lock);
if (list_empty(&mm->mmlist)) if (list_empty(&mm->mmlist))
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment