Commit 73f693c3 authored by Joerg Roedel's avatar Joerg Roedel Committed by Linus Torvalds

mm: remove vmalloc_sync_(un)mappings()

These functions are not needed anymore because the vmalloc and ioremap
mappings are now synchronized when they are created or torn down.

Remove all callers and function definitions.
Signed-off-by: default avatarJoerg Roedel <jroedel@suse.de>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Tested-by: default avatarSteven Rostedt (VMware) <rostedt@goodmis.org>
Acked-by: default avatarAndy Lutomirski <luto@kernel.org>
Acked-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Cc: Arnd Bergmann <arnd@arndb.de>
Cc: Christoph Hellwig <hch@lst.de>
Cc: Dave Hansen <dave.hansen@linux.intel.com>
Cc: "H . Peter Anvin" <hpa@zytor.com>
Cc: Ingo Molnar <mingo@elte.hu>
Cc: Matthew Wilcox (Oracle) <willy@infradead.org>
Cc: Michal Hocko <mhocko@kernel.org>
Cc: "Rafael J. Wysocki" <rjw@rjwysocki.net>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Vlastimil Babka <vbabka@suse.cz>
Link: http://lkml.kernel.org/r/20200515140023.25469-7-joro@8bytes.orgSigned-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 86cf69f1
...@@ -214,26 +214,6 @@ void arch_sync_kernel_mappings(unsigned long start, unsigned long end) ...@@ -214,26 +214,6 @@ void arch_sync_kernel_mappings(unsigned long start, unsigned long end)
} }
} }
static void vmalloc_sync(void)
{
unsigned long address;
if (SHARED_KERNEL_PMD)
return;
arch_sync_kernel_mappings(VMALLOC_START, VMALLOC_END);
}
void vmalloc_sync_mappings(void)
{
vmalloc_sync();
}
void vmalloc_sync_unmappings(void)
{
vmalloc_sync();
}
/* /*
* 32-bit: * 32-bit:
* *
...@@ -336,23 +316,6 @@ static void dump_pagetable(unsigned long address) ...@@ -336,23 +316,6 @@ static void dump_pagetable(unsigned long address)
#else /* CONFIG_X86_64: */ #else /* CONFIG_X86_64: */
void vmalloc_sync_mappings(void)
{
/*
* 64-bit mappings might allocate new p4d/pud pages
* that need to be propagated to all tasks' PGDs.
*/
sync_global_pgds(VMALLOC_START & PGDIR_MASK, VMALLOC_END);
}
void vmalloc_sync_unmappings(void)
{
/*
* Unmappings never allocate or free p4d/pud pages.
* No work is required here.
*/
}
/* /*
* 64-bit: * 64-bit:
* *
......
...@@ -167,12 +167,6 @@ int ghes_estatus_pool_init(int num_ghes) ...@@ -167,12 +167,6 @@ int ghes_estatus_pool_init(int num_ghes)
if (!addr) if (!addr)
goto err_pool_alloc; goto err_pool_alloc;
/*
* New allocation must be visible in all pgd before it can be found by
* an NMI allocating from the pool.
*/
vmalloc_sync_mappings();
rc = gen_pool_add(ghes_estatus_pool, addr, PAGE_ALIGN(len), -1); rc = gen_pool_add(ghes_estatus_pool, addr, PAGE_ALIGN(len), -1);
if (rc) if (rc)
goto err_pool_add; goto err_pool_add;
......
...@@ -130,8 +130,6 @@ extern int remap_vmalloc_range_partial(struct vm_area_struct *vma, ...@@ -130,8 +130,6 @@ extern int remap_vmalloc_range_partial(struct vm_area_struct *vma,
extern int remap_vmalloc_range(struct vm_area_struct *vma, void *addr, extern int remap_vmalloc_range(struct vm_area_struct *vma, void *addr,
unsigned long pgoff); unsigned long pgoff);
void vmalloc_sync_mappings(void);
void vmalloc_sync_unmappings(void);
/* /*
* Architectures can set this mask to a combination of PGTBL_P?D_MODIFIED values * Architectures can set this mask to a combination of PGTBL_P?D_MODIFIED values
......
...@@ -519,7 +519,6 @@ NOKPROBE_SYMBOL(notify_die); ...@@ -519,7 +519,6 @@ NOKPROBE_SYMBOL(notify_die);
int register_die_notifier(struct notifier_block *nb) int register_die_notifier(struct notifier_block *nb)
{ {
vmalloc_sync_mappings();
return atomic_notifier_chain_register(&die_chain, nb); return atomic_notifier_chain_register(&die_chain, nb);
} }
EXPORT_SYMBOL_GPL(register_die_notifier); EXPORT_SYMBOL_GPL(register_die_notifier);
......
...@@ -8527,18 +8527,6 @@ static int allocate_trace_buffers(struct trace_array *tr, int size) ...@@ -8527,18 +8527,6 @@ static int allocate_trace_buffers(struct trace_array *tr, int size)
allocate_snapshot = false; allocate_snapshot = false;
#endif #endif
/*
* Because of some magic with the way alloc_percpu() works on
* x86_64, we need to synchronize the pgd of all the tables,
* otherwise the trace events that happen in x86_64 page fault
* handlers can't cope with accessing the chance that a
* alloc_percpu()'d memory might be touched in the page fault trace
* event. Oh, and we need to audit all other alloc_percpu() and vmalloc()
* calls in tracing, because something might get triggered within a
* page fault trace event!
*/
vmalloc_sync_mappings();
return 0; return 0;
} }
......
...@@ -371,18 +371,6 @@ void vm_unmap_aliases(void) ...@@ -371,18 +371,6 @@ void vm_unmap_aliases(void)
} }
EXPORT_SYMBOL_GPL(vm_unmap_aliases); EXPORT_SYMBOL_GPL(vm_unmap_aliases);
/*
* Implement a stub for vmalloc_sync_[un]mapping() if the architecture
* chose not to have one.
*/
void __weak vmalloc_sync_mappings(void)
{
}
void __weak vmalloc_sync_unmappings(void)
{
}
struct vm_struct *alloc_vm_area(size_t size, pte_t **ptes) struct vm_struct *alloc_vm_area(size_t size, pte_t **ptes)
{ {
BUG(); BUG();
......
...@@ -1353,12 +1353,6 @@ static bool __purge_vmap_area_lazy(unsigned long start, unsigned long end) ...@@ -1353,12 +1353,6 @@ static bool __purge_vmap_area_lazy(unsigned long start, unsigned long end)
if (unlikely(valist == NULL)) if (unlikely(valist == NULL))
return false; return false;
/*
* First make sure the mappings are removed from all page-tables
* before they are freed.
*/
vmalloc_sync_unmappings();
/* /*
* TODO: to calculate a flush range without looping. * TODO: to calculate a flush range without looping.
* The list can be up to lazy_max_pages() elements. * The list can be up to lazy_max_pages() elements.
...@@ -3089,21 +3083,6 @@ int remap_vmalloc_range(struct vm_area_struct *vma, void *addr, ...@@ -3089,21 +3083,6 @@ int remap_vmalloc_range(struct vm_area_struct *vma, void *addr,
} }
EXPORT_SYMBOL(remap_vmalloc_range); EXPORT_SYMBOL(remap_vmalloc_range);
/*
* Implement stubs for vmalloc_sync_[un]mappings () if the architecture chose
* not to have one.
*
* The purpose of this function is to make sure the vmalloc area
* mappings are identical in all page-tables in the system.
*/
void __weak vmalloc_sync_mappings(void)
{
}
void __weak vmalloc_sync_unmappings(void)
{
}
static int f(pte_t *pte, unsigned long addr, void *data) static int f(pte_t *pte, unsigned long addr, void *data)
{ {
pte_t ***p = data; pte_t ***p = data;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment