Commit ed1f324c authored by Christoph Hellwig's avatar Christoph Hellwig Committed by Linus Torvalds

mm: remove map_vm_range

Switch all callers to map_kernel_range, which symmetric to the unmap side
(as well as the _noflush versions).
Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Acked-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Cc: Christian Borntraeger <borntraeger@de.ibm.com>
Cc: Christophe Leroy <christophe.leroy@c-s.fr>
Cc: Daniel Vetter <daniel.vetter@ffwll.ch>
Cc: David Airlie <airlied@linux.ie>
Cc: Gao Xiang <xiang@kernel.org>
Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Cc: Haiyang Zhang <haiyangz@microsoft.com>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: "K. Y. Srinivasan" <kys@microsoft.com>
Cc: Laura Abbott <labbott@redhat.com>
Cc: Mark Rutland <mark.rutland@arm.com>
Cc: Michael Kelley <mikelley@microsoft.com>
Cc: Minchan Kim <minchan@kernel.org>
Cc: Nitin Gupta <ngupta@vflare.org>
Cc: Robin Murphy <robin.murphy@arm.com>
Cc: Sakari Ailus <sakari.ailus@linux.intel.com>
Cc: Stephen Hemminger <sthemmin@microsoft.com>
Cc: Sumit Semwal <sumit.semwal@linaro.org>
Cc: Wei Liu <wei.liu@kernel.org>
Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: Heiko Carstens <heiko.carstens@de.ibm.com>
Cc: Paul Mackerras <paulus@ozlabs.org>
Cc: Vasily Gorbik <gor@linux.ibm.com>
Cc: Will Deacon <will@kernel.org>
Link: http://lkml.kernel.org/r/20200414131348.444715-17-hch@lst.deSigned-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 60bb4465
...@@ -213,7 +213,7 @@ Here are the routines, one by one: ...@@ -213,7 +213,7 @@ Here are the routines, one by one:
there will be no entries in the cache for the kernel address there will be no entries in the cache for the kernel address
space for virtual addresses in the range 'start' to 'end-1'. space for virtual addresses in the range 'start' to 'end-1'.
The first of these two routines is invoked after map_vm_area() The first of these two routines is invoked after map_kernel_range()
has installed the page table entries. The second is invoked has installed the page table entries. The second is invoked
before unmap_kernel_range() deletes the page table entries. before unmap_kernel_range() deletes the page table entries.
......
...@@ -168,11 +168,11 @@ extern struct vm_struct *__get_vm_area_caller(unsigned long size, ...@@ -168,11 +168,11 @@ extern struct vm_struct *__get_vm_area_caller(unsigned long size,
extern struct vm_struct *remove_vm_area(const void *addr); extern struct vm_struct *remove_vm_area(const void *addr);
extern struct vm_struct *find_vm_area(const void *addr); extern struct vm_struct *find_vm_area(const void *addr);
extern int map_vm_area(struct vm_struct *area, pgprot_t prot,
struct page **pages);
#ifdef CONFIG_MMU #ifdef CONFIG_MMU
extern int map_kernel_range_noflush(unsigned long start, unsigned long size, extern int map_kernel_range_noflush(unsigned long start, unsigned long size,
pgprot_t prot, struct page **pages); pgprot_t prot, struct page **pages);
int map_kernel_range(unsigned long start, unsigned long size, pgprot_t prot,
struct page **pages);
extern void unmap_kernel_range_noflush(unsigned long addr, unsigned long size); extern void unmap_kernel_range_noflush(unsigned long addr, unsigned long size);
extern void unmap_kernel_range(unsigned long addr, unsigned long size); extern void unmap_kernel_range(unsigned long addr, unsigned long size);
static inline void set_vm_flush_reset_perms(void *addr) static inline void set_vm_flush_reset_perms(void *addr)
...@@ -189,14 +189,12 @@ map_kernel_range_noflush(unsigned long start, unsigned long size, ...@@ -189,14 +189,12 @@ map_kernel_range_noflush(unsigned long start, unsigned long size,
{ {
return size >> PAGE_SHIFT; return size >> PAGE_SHIFT;
} }
#define map_kernel_range map_kernel_range_noflush
static inline void static inline void
unmap_kernel_range_noflush(unsigned long addr, unsigned long size) unmap_kernel_range_noflush(unsigned long addr, unsigned long size)
{ {
} }
static inline void #define unmap_kernel_range unmap_kernel_range_noflush
unmap_kernel_range(unsigned long addr, unsigned long size)
{
}
static inline void set_vm_flush_reset_perms(void *addr) static inline void set_vm_flush_reset_perms(void *addr)
{ {
} }
......
...@@ -273,8 +273,8 @@ int map_kernel_range_noflush(unsigned long addr, unsigned long size, ...@@ -273,8 +273,8 @@ int map_kernel_range_noflush(unsigned long addr, unsigned long size,
return 0; return 0;
} }
static int map_kernel_range(unsigned long start, unsigned long size, int map_kernel_range(unsigned long start, unsigned long size, pgprot_t prot,
pgprot_t prot, struct page **pages) struct page **pages)
{ {
int ret; int ret;
...@@ -2028,16 +2028,6 @@ void unmap_kernel_range(unsigned long addr, unsigned long size) ...@@ -2028,16 +2028,6 @@ void unmap_kernel_range(unsigned long addr, unsigned long size)
flush_tlb_kernel_range(addr, end); flush_tlb_kernel_range(addr, end);
} }
int map_vm_area(struct vm_struct *area, pgprot_t prot, struct page **pages)
{
unsigned long addr = (unsigned long)area->addr;
int err;
err = map_kernel_range(addr, get_vm_area_size(area), prot, pages);
return err > 0 ? 0 : err;
}
static inline void setup_vmalloc_vm_locked(struct vm_struct *vm, static inline void setup_vmalloc_vm_locked(struct vm_struct *vm,
struct vmap_area *va, unsigned long flags, const void *caller) struct vmap_area *va, unsigned long flags, const void *caller)
{ {
...@@ -2409,7 +2399,8 @@ void *vmap(struct page **pages, unsigned int count, ...@@ -2409,7 +2399,8 @@ void *vmap(struct page **pages, unsigned int count,
if (!area) if (!area)
return NULL; return NULL;
if (map_vm_area(area, prot, pages)) { if (map_kernel_range((unsigned long)area->addr, size, prot,
pages) < 0) {
vunmap(area->addr); vunmap(area->addr);
return NULL; return NULL;
} }
...@@ -2472,8 +2463,10 @@ static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask, ...@@ -2472,8 +2463,10 @@ static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask,
} }
atomic_long_add(area->nr_pages, &nr_vmalloc_pages); atomic_long_add(area->nr_pages, &nr_vmalloc_pages);
if (map_vm_area(area, prot, pages)) if (map_kernel_range((unsigned long)area->addr, get_vm_area_size(area),
prot, pages) < 0)
goto fail; goto fail;
return area->addr; return area->addr;
fail: fail:
......
...@@ -1138,7 +1138,9 @@ static inline void __zs_cpu_down(struct mapping_area *area) ...@@ -1138,7 +1138,9 @@ static inline void __zs_cpu_down(struct mapping_area *area)
static inline void *__zs_map_object(struct mapping_area *area, static inline void *__zs_map_object(struct mapping_area *area,
struct page *pages[2], int off, int size) struct page *pages[2], int off, int size)
{ {
BUG_ON(map_vm_area(area->vm, PAGE_KERNEL, pages)); unsigned long addr = (unsigned long)area->vm->addr;
BUG_ON(map_kernel_range(addr, PAGE_SIZE * 2, PAGE_KERNEL, pages) < 0);
area->vm_addr = area->vm->addr; area->vm_addr = area->vm->addr;
return area->vm_addr + off; return area->vm_addr + off;
} }
......
...@@ -190,8 +190,7 @@ EXPORT_SYMBOL(ceph_compare_options); ...@@ -190,8 +190,7 @@ EXPORT_SYMBOL(ceph_compare_options);
* kvmalloc() doesn't fall back to the vmalloc allocator unless flags are * kvmalloc() doesn't fall back to the vmalloc allocator unless flags are
* compatible with (a superset of) GFP_KERNEL. This is because while the * compatible with (a superset of) GFP_KERNEL. This is because while the
* actual pages are allocated with the specified flags, the page table pages * actual pages are allocated with the specified flags, the page table pages
* are always allocated with GFP_KERNEL. map_vm_area() doesn't even take * are always allocated with GFP_KERNEL.
* flags because GFP_KERNEL is hard-coded in {p4d,pud,pmd,pte}_alloc().
* *
* ceph_kvmalloc() may be called with GFP_KERNEL, GFP_NOFS or GFP_NOIO. * ceph_kvmalloc() may be called with GFP_KERNEL, GFP_NOFS or GFP_NOIO.
*/ */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment