Commit dbf7bff0 authored by Daniel Vetter's avatar Daniel Vetter

drm/i915: merge shmem_pread slow&fast-path

With the previous rewrite, they've become essential identical.

v2: Simplify the page_do_bit17_swizzling logic as suggested by Chris
Wilson.
Tested-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Signed-off-by: default avatarDaniel Vetter <daniel.vetter@ffwll.ch>
parent e244a443
...@@ -239,66 +239,6 @@ static int i915_gem_object_needs_bit17_swizzle(struct drm_i915_gem_object *obj) ...@@ -239,66 +239,6 @@ static int i915_gem_object_needs_bit17_swizzle(struct drm_i915_gem_object *obj)
obj->tiling_mode != I915_TILING_NONE; obj->tiling_mode != I915_TILING_NONE;
} }
/**
* This is the fast shmem pread path, which attempts to copy_from_user directly
* from the backing pages of the object to the user's address space. On a
* fault, it fails so we can fall back to i915_gem_shmem_pwrite_slow().
*/
static int
i915_gem_shmem_pread_fast(struct drm_device *dev,
struct drm_i915_gem_object *obj,
struct drm_i915_gem_pread *args,
struct drm_file *file)
{
struct address_space *mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping;
ssize_t remain;
loff_t offset;
char __user *user_data;
int page_offset, page_length;
user_data = (char __user *) (uintptr_t) args->data_ptr;
remain = args->size;
offset = args->offset;
while (remain > 0) {
struct page *page;
char *vaddr;
int ret;
/* Operation in this page
*
* page_offset = offset within page
* page_length = bytes to copy for this page
*/
page_offset = offset_in_page(offset);
page_length = remain;
if ((page_offset + remain) > PAGE_SIZE)
page_length = PAGE_SIZE - page_offset;
page = shmem_read_mapping_page(mapping, offset >> PAGE_SHIFT);
if (IS_ERR(page))
return PTR_ERR(page);
vaddr = kmap_atomic(page);
ret = __copy_to_user_inatomic(user_data,
vaddr + page_offset,
page_length);
kunmap_atomic(vaddr);
mark_page_accessed(page);
page_cache_release(page);
if (ret)
return -EFAULT;
remain -= page_length;
user_data += page_length;
offset += page_length;
}
return 0;
}
static inline int static inline int
__copy_to_user_swizzled(char __user *cpu_vaddr, __copy_to_user_swizzled(char __user *cpu_vaddr,
const char *gpu_vaddr, int gpu_offset, const char *gpu_vaddr, int gpu_offset,
...@@ -351,14 +291,8 @@ __copy_from_user_swizzled(char __user *gpu_vaddr, int gpu_offset, ...@@ -351,14 +291,8 @@ __copy_from_user_swizzled(char __user *gpu_vaddr, int gpu_offset,
return 0; return 0;
} }
/**
* This is the fallback shmem pread path, which allocates temporary storage
* in kernel space to copy_to_user into outside of the struct_mutex, so we
* can copy out of the object's backing pages while holding the struct mutex
* and not take page faults.
*/
static int static int
i915_gem_shmem_pread_slow(struct drm_device *dev, i915_gem_shmem_pread(struct drm_device *dev,
struct drm_i915_gem_object *obj, struct drm_i915_gem_object *obj,
struct drm_i915_gem_pread *args, struct drm_i915_gem_pread *args,
struct drm_file *file) struct drm_file *file)
...@@ -369,6 +303,7 @@ i915_gem_shmem_pread_slow(struct drm_device *dev, ...@@ -369,6 +303,7 @@ i915_gem_shmem_pread_slow(struct drm_device *dev,
loff_t offset; loff_t offset;
int shmem_page_offset, page_length, ret = 0; int shmem_page_offset, page_length, ret = 0;
int obj_do_bit17_swizzling, page_do_bit17_swizzling; int obj_do_bit17_swizzling, page_do_bit17_swizzling;
int hit_slowpath = 0;
user_data = (char __user *) (uintptr_t) args->data_ptr; user_data = (char __user *) (uintptr_t) args->data_ptr;
remain = args->size; remain = args->size;
...@@ -377,8 +312,6 @@ i915_gem_shmem_pread_slow(struct drm_device *dev, ...@@ -377,8 +312,6 @@ i915_gem_shmem_pread_slow(struct drm_device *dev,
offset = args->offset; offset = args->offset;
mutex_unlock(&dev->struct_mutex);
while (remain > 0) { while (remain > 0) {
struct page *page; struct page *page;
char *vaddr; char *vaddr;
...@@ -402,6 +335,20 @@ i915_gem_shmem_pread_slow(struct drm_device *dev, ...@@ -402,6 +335,20 @@ i915_gem_shmem_pread_slow(struct drm_device *dev,
page_do_bit17_swizzling = obj_do_bit17_swizzling && page_do_bit17_swizzling = obj_do_bit17_swizzling &&
(page_to_phys(page) & (1 << 17)) != 0; (page_to_phys(page) & (1 << 17)) != 0;
if (!page_do_bit17_swizzling) {
vaddr = kmap_atomic(page);
ret = __copy_to_user_inatomic(user_data,
vaddr + shmem_page_offset,
page_length);
kunmap_atomic(vaddr);
if (ret == 0)
goto next_page;
}
hit_slowpath = 1;
mutex_unlock(&dev->struct_mutex);
vaddr = kmap(page); vaddr = kmap(page);
if (page_do_bit17_swizzling) if (page_do_bit17_swizzling)
ret = __copy_to_user_swizzled(user_data, ret = __copy_to_user_swizzled(user_data,
...@@ -413,6 +360,8 @@ i915_gem_shmem_pread_slow(struct drm_device *dev, ...@@ -413,6 +360,8 @@ i915_gem_shmem_pread_slow(struct drm_device *dev,
page_length); page_length);
kunmap(page); kunmap(page);
mutex_lock(&dev->struct_mutex);
next_page:
mark_page_accessed(page); mark_page_accessed(page);
page_cache_release(page); page_cache_release(page);
...@@ -427,10 +376,11 @@ i915_gem_shmem_pread_slow(struct drm_device *dev, ...@@ -427,10 +376,11 @@ i915_gem_shmem_pread_slow(struct drm_device *dev,
} }
out: out:
mutex_lock(&dev->struct_mutex); if (hit_slowpath) {
/* Fixup: Kill any reinstated backing storage pages */ /* Fixup: Kill any reinstated backing storage pages */
if (obj->madv == __I915_MADV_PURGED) if (obj->madv == __I915_MADV_PURGED)
i915_gem_object_truncate(obj); i915_gem_object_truncate(obj);
}
return ret; return ret;
} }
...@@ -486,11 +436,7 @@ i915_gem_pread_ioctl(struct drm_device *dev, void *data, ...@@ -486,11 +436,7 @@ i915_gem_pread_ioctl(struct drm_device *dev, void *data,
if (ret) if (ret)
goto out; goto out;
ret = -EFAULT; ret = i915_gem_shmem_pread(dev, obj, args, file);
if (!i915_gem_object_needs_bit17_swizzle(obj))
ret = i915_gem_shmem_pread_fast(dev, obj, args, file);
if (ret == -EFAULT)
ret = i915_gem_shmem_pread_slow(dev, obj, args, file);
out: out:
drm_gem_object_unreference(&obj->base); drm_gem_object_unreference(&obj->base);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment