Commit 99c29133 authored by Gerald Schaefer's avatar Gerald Schaefer Committed by Andrew Morton

mm: add PTE pointer parameter to flush_tlb_fix_spurious_fault()

s390 can do more fine-grained handling of spurious TLB protection faults,
when there also is the PTE pointer available.

Therefore, pass on the PTE pointer to flush_tlb_fix_spurious_fault() as an
additional parameter.

This will add no functional change to other architectures, but those with
private flush_tlb_fix_spurious_fault() implementations need to be made
aware of the new parameter.

Link: https://lkml.kernel.org/r/20230306161548.661740-1-gerald.schaefer@linux.ibm.comSigned-off-by: default avatarGerald Schaefer <gerald.schaefer@linux.ibm.com>
Reviewed-by: default avatarAlexander Gordeev <agordeev@linux.ibm.com>
Acked-by: Catalin Marinas <catalin.marinas@arm.com>	[arm64]
Acked-by: Michael Ellerman <mpe@ellerman.id.au>		[powerpc]
Acked-by: default avatarDavid Hildenbrand <david@redhat.com>
Cc: Anshuman Khandual <anshuman.khandual@arm.com>
Cc: Borislav Petkov (AMD) <bp@alien8.de>
Cc: Christophe Leroy <christophe.leroy@csgroup.eu>
Cc: Dave Hansen <dave.hansen@linux.intel.com>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Matthew Wilcox (Oracle) <willy@infradead.org>
Cc: Nicholas Piggin <npiggin@gmail.com>
Cc: Thomas Bogendoerfer <tsbogend@alpha.franken.de>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Will Deacon <will@kernel.org>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
parent e1807d5d
...@@ -57,7 +57,7 @@ static inline bool arch_thp_swp_supported(void) ...@@ -57,7 +57,7 @@ static inline bool arch_thp_swp_supported(void)
* fault on one CPU which has been handled concurrently by another CPU * fault on one CPU which has been handled concurrently by another CPU
* does not need to perform additional invalidation. * does not need to perform additional invalidation.
*/ */
#define flush_tlb_fix_spurious_fault(vma, address) do { } while (0) #define flush_tlb_fix_spurious_fault(vma, address, ptep) do { } while (0)
/* /*
* ZERO_PAGE is a global shared page that is always zero: used * ZERO_PAGE is a global shared page that is always zero: used
......
...@@ -469,7 +469,8 @@ static inline pgprot_t pgprot_writecombine(pgprot_t _prot) ...@@ -469,7 +469,8 @@ static inline pgprot_t pgprot_writecombine(pgprot_t _prot)
} }
static inline void flush_tlb_fix_spurious_fault(struct vm_area_struct *vma, static inline void flush_tlb_fix_spurious_fault(struct vm_area_struct *vma,
unsigned long address) unsigned long address,
pte_t *ptep)
{ {
} }
......
...@@ -121,7 +121,8 @@ static inline void flush_tlb_page(struct vm_area_struct *vma, ...@@ -121,7 +121,8 @@ static inline void flush_tlb_page(struct vm_area_struct *vma,
#define flush_tlb_fix_spurious_fault flush_tlb_fix_spurious_fault #define flush_tlb_fix_spurious_fault flush_tlb_fix_spurious_fault
static inline void flush_tlb_fix_spurious_fault(struct vm_area_struct *vma, static inline void flush_tlb_fix_spurious_fault(struct vm_area_struct *vma,
unsigned long address) unsigned long address,
pte_t *ptep)
{ {
/* /*
* Book3S 64 does not require spurious fault flushes because the PTE * Book3S 64 does not require spurious fault flushes because the PTE
......
...@@ -1239,7 +1239,8 @@ static inline int pte_allow_rdp(pte_t old, pte_t new) ...@@ -1239,7 +1239,8 @@ static inline int pte_allow_rdp(pte_t old, pte_t new)
} }
static inline void flush_tlb_fix_spurious_fault(struct vm_area_struct *vma, static inline void flush_tlb_fix_spurious_fault(struct vm_area_struct *vma,
unsigned long address) unsigned long address,
pte_t *ptep)
{ {
/* /*
* RDP might not have propagated the PTE protection reset to all CPUs, * RDP might not have propagated the PTE protection reset to all CPUs,
...@@ -1247,11 +1248,12 @@ static inline void flush_tlb_fix_spurious_fault(struct vm_area_struct *vma, ...@@ -1247,11 +1248,12 @@ static inline void flush_tlb_fix_spurious_fault(struct vm_area_struct *vma,
* NOTE: This will also be called when a racing pagetable update on * NOTE: This will also be called when a racing pagetable update on
* another thread already installed the correct PTE. Both cases cannot * another thread already installed the correct PTE. Both cases cannot
* really be distinguished. * really be distinguished.
* Therefore, only do the local TLB flush when RDP can be used, to avoid * Therefore, only do the local TLB flush when RDP can be used, and the
* unnecessary overhead. * PTE does not have _PAGE_PROTECT set, to avoid unnecessary overhead.
* A local RDP can be used to do the flush.
*/ */
if (MACHINE_HAS_RDP) if (MACHINE_HAS_RDP && !(pte_val(*ptep) & _PAGE_PROTECT))
asm volatile("ptlb" : : : "memory"); __ptep_rdp(address, ptep, 0, 0, 1);
} }
#define flush_tlb_fix_spurious_fault flush_tlb_fix_spurious_fault #define flush_tlb_fix_spurious_fault flush_tlb_fix_spurious_fault
......
...@@ -1097,7 +1097,7 @@ static inline void ptep_set_wrprotect(struct mm_struct *mm, ...@@ -1097,7 +1097,7 @@ static inline void ptep_set_wrprotect(struct mm_struct *mm,
clear_bit(_PAGE_BIT_RW, (unsigned long *)&ptep->pte); clear_bit(_PAGE_BIT_RW, (unsigned long *)&ptep->pte);
} }
#define flush_tlb_fix_spurious_fault(vma, address) do { } while (0) #define flush_tlb_fix_spurious_fault(vma, address, ptep) do { } while (0)
#define mk_pmd(page, pgprot) pfn_pmd(page_to_pfn(page), (pgprot)) #define mk_pmd(page, pgprot) pfn_pmd(page_to_pfn(page), (pgprot))
......
...@@ -817,7 +817,7 @@ static inline void arch_swap_restore(swp_entry_t entry, struct folio *folio) ...@@ -817,7 +817,7 @@ static inline void arch_swap_restore(swp_entry_t entry, struct folio *folio)
#endif #endif
#ifndef flush_tlb_fix_spurious_fault #ifndef flush_tlb_fix_spurious_fault
#define flush_tlb_fix_spurious_fault(vma, address) flush_tlb_page(vma, address) #define flush_tlb_fix_spurious_fault(vma, address, ptep) flush_tlb_page(vma, address)
#endif #endif
/* /*
......
...@@ -4944,7 +4944,8 @@ static vm_fault_t handle_pte_fault(struct vm_fault *vmf) ...@@ -4944,7 +4944,8 @@ static vm_fault_t handle_pte_fault(struct vm_fault *vmf)
* with threads. * with threads.
*/ */
if (vmf->flags & FAULT_FLAG_WRITE) if (vmf->flags & FAULT_FLAG_WRITE)
flush_tlb_fix_spurious_fault(vmf->vma, vmf->address); flush_tlb_fix_spurious_fault(vmf->vma, vmf->address,
vmf->pte);
} }
unlock: unlock:
pte_unmap_unlock(vmf->pte, vmf->ptl); pte_unmap_unlock(vmf->pte, vmf->ptl);
......
...@@ -69,7 +69,7 @@ int ptep_set_access_flags(struct vm_area_struct *vma, ...@@ -69,7 +69,7 @@ int ptep_set_access_flags(struct vm_area_struct *vma,
int changed = !pte_same(*ptep, entry); int changed = !pte_same(*ptep, entry);
if (changed) { if (changed) {
set_pte_at(vma->vm_mm, address, ptep, entry); set_pte_at(vma->vm_mm, address, ptep, entry);
flush_tlb_fix_spurious_fault(vma, address); flush_tlb_fix_spurious_fault(vma, address, ptep);
} }
return changed; return changed;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment