Commit 40b88644 authored by Michael Ellerman's avatar Michael Ellerman Committed by Andrew Morton

mm: remove arch_unmap()

Now that powerpc no longer uses arch_unmap() to handle VDSO unmapping,
there are no meaningful implementions left.  Drop support for it entirely,
and update comments which refer to it.

Link: https://lkml.kernel.org/r/20240812082605.743814-3-mpe@ellerman.id.auSigned-off-by: default avatarMichael Ellerman <mpe@ellerman.id.au>
Suggested-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
Acked-by: default avatarDavid Hildenbrand <david@redhat.com>
Reviewed-by: default avatarThomas Gleixner <tglx@linutronix.de>
Reviewed-by: default avatarLiam R. Howlett <Liam.Howlett@oracle.com>
Cc: Christophe Leroy <christophe.leroy@csgroup.eu>
Cc: Jeff Xu <jeffxu@google.com>
Cc: Nicholas Piggin <npiggin@gmail.com>
Cc: Pedro Falcato <pedro.falcato@gmail.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
parent 5463bafa
......@@ -260,11 +260,6 @@ static inline void enter_lazy_tlb(struct mm_struct *mm,
extern void arch_exit_mmap(struct mm_struct *mm);
static inline void arch_unmap(struct mm_struct *mm,
unsigned long start, unsigned long end)
{
}
#ifdef CONFIG_PPC_MEM_KEYS
bool arch_vma_access_permitted(struct vm_area_struct *vma, bool write,
bool execute, bool foreign);
......
......@@ -232,11 +232,6 @@ static inline bool is_64bit_mm(struct mm_struct *mm)
}
#endif
static inline void arch_unmap(struct mm_struct *mm, unsigned long start,
unsigned long end)
{
}
/*
* We only want to enforce protection keys on the current process
* because we effectively have no access to PKRU for other
......
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Define generic no-op hooks for arch_dup_mmap, arch_exit_mmap
* and arch_unmap to be included in asm-FOO/mmu_context.h for any
* arch FOO which doesn't need to hook these.
* Define generic no-op hooks for arch_dup_mmap and arch_exit_mmap
* to be included in asm-FOO/mmu_context.h for any arch FOO which
* doesn't need to hook these.
*/
#ifndef _ASM_GENERIC_MM_HOOKS_H
#define _ASM_GENERIC_MM_HOOKS_H
......@@ -17,11 +17,6 @@ static inline void arch_exit_mmap(struct mm_struct *mm)
{
}
static inline void arch_unmap(struct mm_struct *mm,
unsigned long start, unsigned long end)
{
}
static inline bool arch_vma_access_permitted(struct vm_area_struct *vma,
bool write, bool execute, bool foreign)
{
......
......@@ -1743,14 +1743,12 @@ int do_vma_munmap(struct vma_iterator *vmi, struct vm_area_struct *vma,
struct mm_struct *mm = vma->vm_mm;
/*
* Check if memory is sealed before arch_unmap.
* Prevent unmapping a sealed VMA.
* Check if memory is sealed, prevent unmapping a sealed VMA.
* can_modify_mm assumes we have acquired the lock on MM.
*/
if (unlikely(!can_modify_mm(mm, start, end)))
return -EPERM;
arch_unmap(mm, start, end);
return do_vmi_align_munmap(vmi, vma, mm, start, end, uf, unlock);
}
......
......@@ -841,7 +841,7 @@ do_vmi_align_munmap(struct vma_iterator *vmi, struct vm_area_struct *vma,
*
* This function takes a @mas that is either pointing to the previous VMA or set
* to MA_START and sets it up to remove the mapping(s). The @len will be
* aligned and any arch_unmap work will be preformed.
* aligned.
*
* Return: 0 on success and drops the lock if so directed, error and leaves the
* lock held otherwise.
......@@ -861,16 +861,12 @@ int do_vmi_munmap(struct vma_iterator *vmi, struct mm_struct *mm,
return -EINVAL;
/*
* Check if memory is sealed before arch_unmap.
* Prevent unmapping a sealed VMA.
* Check if memory is sealed, prevent unmapping a sealed VMA.
* can_modify_mm assumes we have acquired the lock on MM.
*/
if (unlikely(!can_modify_mm(mm, start, end)))
return -EPERM;
/* arch_unmap() might do unmaps itself. */
arch_unmap(mm, start, end);
/* Find the first overlapping VMA */
vma = vma_find(vmi, end);
if (!vma) {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment