Commit 915ecc22 authored by Ira Weiny's avatar Ira Weiny Committed by Linus Torvalds

drm: remove drm specific kmap_atomic code

kmap_atomic_prot() is now exported by all architectures.  Use this
function rather than open coding a driver specific kmap_atomic.

[arnd@arndb.de: include linux/highmem.h]
  Link: http://lkml.kernel.org/r/20200508220150.649044-1-arnd@arndb.deSigned-off-by: default avatarIra Weiny <ira.weiny@intel.com>
Signed-off-by: default avatarArnd Bergmann <arnd@arndb.de>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Reviewed-by: default avatarChristian König <christian.koenig@amd.com>
Reviewed-by: default avatarChristoph Hellwig <hch@lst.de>
Acked-by: default avatarDaniel Vetter <daniel.vetter@ffwll.ch>
Cc: Al Viro <viro@zeniv.linux.org.uk>
Cc: Andy Lutomirski <luto@kernel.org>
Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Chris Zankel <chris@zankel.net>
Cc: Dan Williams <dan.j.williams@intel.com>
Cc: Dave Hansen <dave.hansen@linux.intel.com>
Cc: "David S. Miller" <davem@davemloft.net>
Cc: Helge Deller <deller@gmx.de>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: "James E.J. Bottomley" <James.Bottomley@HansenPartnership.com>
Cc: Max Filippov <jcmvbkbc@gmail.com>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Bogendoerfer <tsbogend@alpha.franken.de>
Cc: Thomas Gleixner <tglx@linutronix.de>
Link: http://lkml.kernel.org/r/20200507150004.1423069-12-ira.weiny@intel.comSigned-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 20b271df
...@@ -257,54 +257,6 @@ static int ttm_copy_io_page(void *dst, void *src, unsigned long page) ...@@ -257,54 +257,6 @@ static int ttm_copy_io_page(void *dst, void *src, unsigned long page)
return 0; return 0;
} }
#ifdef CONFIG_X86
#define __ttm_kmap_atomic_prot(__page, __prot) kmap_atomic_prot(__page, __prot)
#define __ttm_kunmap_atomic(__addr) kunmap_atomic(__addr)
#else
#define __ttm_kmap_atomic_prot(__page, __prot) vmap(&__page, 1, 0, __prot)
#define __ttm_kunmap_atomic(__addr) vunmap(__addr)
#endif
/**
* ttm_kmap_atomic_prot - Efficient kernel map of a single page with
* specified page protection.
*
* @page: The page to map.
* @prot: The page protection.
*
* This function maps a TTM page using the kmap_atomic api if available,
* otherwise falls back to vmap. The user must make sure that the
* specified page does not have an aliased mapping with a different caching
* policy unless the architecture explicitly allows it. Also mapping and
* unmapping using this api must be correctly nested. Unmapping should
* occur in the reverse order of mapping.
*/
void *ttm_kmap_atomic_prot(struct page *page, pgprot_t prot)
{
if (pgprot_val(prot) == pgprot_val(PAGE_KERNEL))
return kmap_atomic(page);
else
return __ttm_kmap_atomic_prot(page, prot);
}
EXPORT_SYMBOL(ttm_kmap_atomic_prot);
/**
* ttm_kunmap_atomic_prot - Unmap a page that was mapped using
* ttm_kmap_atomic_prot.
*
* @addr: The virtual address from the map.
* @prot: The page protection.
*/
void ttm_kunmap_atomic_prot(void *addr, pgprot_t prot)
{
if (pgprot_val(prot) == pgprot_val(PAGE_KERNEL))
kunmap_atomic(addr);
else
__ttm_kunmap_atomic(addr);
}
EXPORT_SYMBOL(ttm_kunmap_atomic_prot);
static int ttm_copy_io_ttm_page(struct ttm_tt *ttm, void *src, static int ttm_copy_io_ttm_page(struct ttm_tt *ttm, void *src,
unsigned long page, unsigned long page,
pgprot_t prot) pgprot_t prot)
...@@ -316,13 +268,13 @@ static int ttm_copy_io_ttm_page(struct ttm_tt *ttm, void *src, ...@@ -316,13 +268,13 @@ static int ttm_copy_io_ttm_page(struct ttm_tt *ttm, void *src,
return -ENOMEM; return -ENOMEM;
src = (void *)((unsigned long)src + (page << PAGE_SHIFT)); src = (void *)((unsigned long)src + (page << PAGE_SHIFT));
dst = ttm_kmap_atomic_prot(d, prot); dst = kmap_atomic_prot(d, prot);
if (!dst) if (!dst)
return -ENOMEM; return -ENOMEM;
memcpy_fromio(dst, src, PAGE_SIZE); memcpy_fromio(dst, src, PAGE_SIZE);
ttm_kunmap_atomic_prot(dst, prot); kunmap_atomic(dst);
return 0; return 0;
} }
...@@ -338,13 +290,13 @@ static int ttm_copy_ttm_io_page(struct ttm_tt *ttm, void *dst, ...@@ -338,13 +290,13 @@ static int ttm_copy_ttm_io_page(struct ttm_tt *ttm, void *dst,
return -ENOMEM; return -ENOMEM;
dst = (void *)((unsigned long)dst + (page << PAGE_SHIFT)); dst = (void *)((unsigned long)dst + (page << PAGE_SHIFT));
src = ttm_kmap_atomic_prot(s, prot); src = kmap_atomic_prot(s, prot);
if (!src) if (!src)
return -ENOMEM; return -ENOMEM;
memcpy_toio(dst, src, PAGE_SIZE); memcpy_toio(dst, src, PAGE_SIZE);
ttm_kunmap_atomic_prot(src, prot); kunmap_atomic(src);
return 0; return 0;
} }
......
...@@ -27,6 +27,7 @@ ...@@ -27,6 +27,7 @@
**************************************************************************/ **************************************************************************/
#include "vmwgfx_drv.h" #include "vmwgfx_drv.h"
#include <linux/highmem.h>
/* /*
* Template that implements find_first_diff() for a generic * Template that implements find_first_diff() for a generic
...@@ -374,12 +375,12 @@ static int vmw_bo_cpu_blit_line(struct vmw_bo_blit_line_data *d, ...@@ -374,12 +375,12 @@ static int vmw_bo_cpu_blit_line(struct vmw_bo_blit_line_data *d,
copy_size = min_t(u32, copy_size, PAGE_SIZE - src_page_offset); copy_size = min_t(u32, copy_size, PAGE_SIZE - src_page_offset);
if (unmap_src) { if (unmap_src) {
ttm_kunmap_atomic_prot(d->src_addr, d->src_prot); kunmap_atomic(d->src_addr);
d->src_addr = NULL; d->src_addr = NULL;
} }
if (unmap_dst) { if (unmap_dst) {
ttm_kunmap_atomic_prot(d->dst_addr, d->dst_prot); kunmap_atomic(d->dst_addr);
d->dst_addr = NULL; d->dst_addr = NULL;
} }
...@@ -388,8 +389,8 @@ static int vmw_bo_cpu_blit_line(struct vmw_bo_blit_line_data *d, ...@@ -388,8 +389,8 @@ static int vmw_bo_cpu_blit_line(struct vmw_bo_blit_line_data *d,
return -EINVAL; return -EINVAL;
d->dst_addr = d->dst_addr =
ttm_kmap_atomic_prot(d->dst_pages[dst_page], kmap_atomic_prot(d->dst_pages[dst_page],
d->dst_prot); d->dst_prot);
if (!d->dst_addr) if (!d->dst_addr)
return -ENOMEM; return -ENOMEM;
...@@ -401,8 +402,8 @@ static int vmw_bo_cpu_blit_line(struct vmw_bo_blit_line_data *d, ...@@ -401,8 +402,8 @@ static int vmw_bo_cpu_blit_line(struct vmw_bo_blit_line_data *d,
return -EINVAL; return -EINVAL;
d->src_addr = d->src_addr =
ttm_kmap_atomic_prot(d->src_pages[src_page], kmap_atomic_prot(d->src_pages[src_page],
d->src_prot); d->src_prot);
if (!d->src_addr) if (!d->src_addr)
return -ENOMEM; return -ENOMEM;
...@@ -499,9 +500,9 @@ int vmw_bo_cpu_blit(struct ttm_buffer_object *dst, ...@@ -499,9 +500,9 @@ int vmw_bo_cpu_blit(struct ttm_buffer_object *dst,
} }
out: out:
if (d.src_addr) if (d.src_addr)
ttm_kunmap_atomic_prot(d.src_addr, d.src_prot); kunmap_atomic(d.src_addr);
if (d.dst_addr) if (d.dst_addr)
ttm_kunmap_atomic_prot(d.dst_addr, d.dst_prot); kunmap_atomic(d.dst_addr);
return ret; return ret;
} }
...@@ -668,10 +668,6 @@ int ttm_bo_mmap_obj(struct vm_area_struct *vma, struct ttm_buffer_object *bo); ...@@ -668,10 +668,6 @@ int ttm_bo_mmap_obj(struct vm_area_struct *vma, struct ttm_buffer_object *bo);
int ttm_bo_mmap(struct file *filp, struct vm_area_struct *vma, int ttm_bo_mmap(struct file *filp, struct vm_area_struct *vma,
struct ttm_bo_device *bdev); struct ttm_bo_device *bdev);
void *ttm_kmap_atomic_prot(struct page *page, pgprot_t prot);
void ttm_kunmap_atomic_prot(void *addr, pgprot_t prot);
/** /**
* ttm_bo_io * ttm_bo_io
* *
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment