Commit d4efd79a authored by Christoph Hellwig's avatar Christoph Hellwig Committed by Linus Torvalds

mm: remove the prot argument from vm_map_ram

This is always PAGE_KERNEL - for long term mappings with other properties
vmap should be used.
Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Acked-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Cc: Christian Borntraeger <borntraeger@de.ibm.com>
Cc: Christophe Leroy <christophe.leroy@c-s.fr>
Cc: Daniel Vetter <daniel.vetter@ffwll.ch>
Cc: David Airlie <airlied@linux.ie>
Cc: Gao Xiang <xiang@kernel.org>
Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Cc: Haiyang Zhang <haiyangz@microsoft.com>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: "K. Y. Srinivasan" <kys@microsoft.com>
Cc: Laura Abbott <labbott@redhat.com>
Cc: Mark Rutland <mark.rutland@arm.com>
Cc: Michael Kelley <mikelley@microsoft.com>
Cc: Minchan Kim <minchan@kernel.org>
Cc: Nitin Gupta <ngupta@vflare.org>
Cc: Robin Murphy <robin.murphy@arm.com>
Cc: Sakari Ailus <sakari.ailus@linux.intel.com>
Cc: Stephen Hemminger <sthemmin@microsoft.com>
Cc: Sumit Semwal <sumit.semwal@linaro.org>
Cc: Wei Liu <wei.liu@kernel.org>
Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: Heiko Carstens <heiko.carstens@de.ibm.com>
Cc: Paul Mackerras <paulus@ozlabs.org>
Cc: Vasily Gorbik <gor@linux.ibm.com>
Cc: Will Deacon <will@kernel.org>
Link: http://lkml.kernel.org/r/20200414131348.444715-19-hch@lst.deSigned-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 855e57a1
...@@ -66,7 +66,7 @@ static void *mock_dmabuf_vmap(struct dma_buf *dma_buf) ...@@ -66,7 +66,7 @@ static void *mock_dmabuf_vmap(struct dma_buf *dma_buf)
{ {
struct mock_dmabuf *mock = to_mock(dma_buf); struct mock_dmabuf *mock = to_mock(dma_buf);
return vm_map_ram(mock->pages, mock->npages, 0, PAGE_KERNEL); return vm_map_ram(mock->pages, mock->npages, 0);
} }
static void mock_dmabuf_vunmap(struct dma_buf *dma_buf, void *vaddr) static void mock_dmabuf_vunmap(struct dma_buf *dma_buf, void *vaddr)
......
...@@ -309,8 +309,7 @@ static void *vb2_dma_sg_vaddr(void *buf_priv) ...@@ -309,8 +309,7 @@ static void *vb2_dma_sg_vaddr(void *buf_priv)
if (buf->db_attach) if (buf->db_attach)
buf->vaddr = dma_buf_vmap(buf->db_attach->dmabuf); buf->vaddr = dma_buf_vmap(buf->db_attach->dmabuf);
else else
buf->vaddr = vm_map_ram(buf->pages, buf->vaddr = vm_map_ram(buf->pages, buf->num_pages, -1);
buf->num_pages, -1, PAGE_KERNEL);
} }
/* add offset in case userptr is not page-aligned */ /* add offset in case userptr is not page-aligned */
......
...@@ -107,8 +107,7 @@ static void *vb2_vmalloc_get_userptr(struct device *dev, unsigned long vaddr, ...@@ -107,8 +107,7 @@ static void *vb2_vmalloc_get_userptr(struct device *dev, unsigned long vaddr,
buf->vaddr = (__force void *) buf->vaddr = (__force void *)
ioremap(__pfn_to_phys(nums[0]), size + offset); ioremap(__pfn_to_phys(nums[0]), size + offset);
} else { } else {
buf->vaddr = vm_map_ram(frame_vector_pages(vec), n_pages, -1, buf->vaddr = vm_map_ram(frame_vector_pages(vec), n_pages, -1);
PAGE_KERNEL);
} }
if (!buf->vaddr) if (!buf->vaddr)
......
...@@ -274,7 +274,7 @@ static int z_erofs_decompress_generic(struct z_erofs_decompress_req *rq, ...@@ -274,7 +274,7 @@ static int z_erofs_decompress_generic(struct z_erofs_decompress_req *rq,
i = 0; i = 0;
while (1) { while (1) {
dst = vm_map_ram(rq->out, nrpages_out, -1, PAGE_KERNEL); dst = vm_map_ram(rq->out, nrpages_out, -1);
/* retry two more times (totally 3 times) */ /* retry two more times (totally 3 times) */
if (dst || ++i >= 3) if (dst || ++i >= 3)
......
...@@ -477,7 +477,7 @@ _xfs_buf_map_pages( ...@@ -477,7 +477,7 @@ _xfs_buf_map_pages(
nofs_flag = memalloc_nofs_save(); nofs_flag = memalloc_nofs_save();
do { do {
bp->b_addr = vm_map_ram(bp->b_pages, bp->b_page_count, bp->b_addr = vm_map_ram(bp->b_pages, bp->b_page_count,
-1, PAGE_KERNEL); -1);
if (bp->b_addr) if (bp->b_addr)
break; break;
vm_unmap_aliases(); vm_unmap_aliases();
......
...@@ -88,8 +88,7 @@ struct vmap_area { ...@@ -88,8 +88,7 @@ struct vmap_area {
* Highlevel APIs for driver use * Highlevel APIs for driver use
*/ */
extern void vm_unmap_ram(const void *mem, unsigned int count); extern void vm_unmap_ram(const void *mem, unsigned int count);
extern void *vm_map_ram(struct page **pages, unsigned int count, extern void *vm_map_ram(struct page **pages, unsigned int count, int node);
int node, pgprot_t prot);
extern void vm_unmap_aliases(void); extern void vm_unmap_aliases(void);
#ifdef CONFIG_MMU #ifdef CONFIG_MMU
......
...@@ -351,7 +351,7 @@ void vunmap(const void *addr) ...@@ -351,7 +351,7 @@ void vunmap(const void *addr)
} }
EXPORT_SYMBOL(vunmap); EXPORT_SYMBOL(vunmap);
void *vm_map_ram(struct page **pages, unsigned int count, int node, pgprot_t prot) void *vm_map_ram(struct page **pages, unsigned int count, int node)
{ {
BUG(); BUG();
return NULL; return NULL;
......
...@@ -1835,7 +1835,7 @@ EXPORT_SYMBOL(vm_unmap_ram); ...@@ -1835,7 +1835,7 @@ EXPORT_SYMBOL(vm_unmap_ram);
* *
* Returns: a pointer to the address that has been mapped, or %NULL on failure * Returns: a pointer to the address that has been mapped, or %NULL on failure
*/ */
void *vm_map_ram(struct page **pages, unsigned int count, int node, pgprot_t prot) void *vm_map_ram(struct page **pages, unsigned int count, int node)
{ {
unsigned long size = (unsigned long)count << PAGE_SHIFT; unsigned long size = (unsigned long)count << PAGE_SHIFT;
unsigned long addr; unsigned long addr;
...@@ -1859,7 +1859,7 @@ void *vm_map_ram(struct page **pages, unsigned int count, int node, pgprot_t pro ...@@ -1859,7 +1859,7 @@ void *vm_map_ram(struct page **pages, unsigned int count, int node, pgprot_t pro
kasan_unpoison_vmalloc(mem, size); kasan_unpoison_vmalloc(mem, size);
if (map_kernel_range(addr, size, prot, pages) < 0) { if (map_kernel_range(addr, size, PAGE_KERNEL, pages) < 0) {
vm_unmap_ram(mem, count); vm_unmap_ram(mem, count);
return NULL; return NULL;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment