Commit 692a576b authored by Daniel Vetter's avatar Daniel Vetter

drm/i915: don't call shmem_read_mapping unnecessarily

This speeds up pwrite and pread from ~120 µs ro ~100 µs for
reading/writing 1mb on my snb (if the backing storage pages
are already pinned, of course).

v2: Chris Wilson pointed out a glaring page reference bug - I've
unconditionally dropped the reference. With that fixed (and the
associated reduction of dirt in dmesg) it's now even a notch faster.

v3: Unconditionaly grab a page reference when dropping
dev->struct_mutex to simplify the code-flow.
Tested-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Signed-off-by: default avatarDaniel Vetter <daniel.vetter@ffwll.ch>
parent 3ae53783
...@@ -301,6 +301,7 @@ i915_gem_shmem_pread(struct drm_device *dev, ...@@ -301,6 +301,7 @@ i915_gem_shmem_pread(struct drm_device *dev,
int obj_do_bit17_swizzling, page_do_bit17_swizzling; int obj_do_bit17_swizzling, page_do_bit17_swizzling;
int hit_slowpath = 0; int hit_slowpath = 0;
int needs_clflush = 0; int needs_clflush = 0;
int release_page;
user_data = (char __user *) (uintptr_t) args->data_ptr; user_data = (char __user *) (uintptr_t) args->data_ptr;
remain = args->size; remain = args->size;
...@@ -335,10 +336,16 @@ i915_gem_shmem_pread(struct drm_device *dev, ...@@ -335,10 +336,16 @@ i915_gem_shmem_pread(struct drm_device *dev,
if ((shmem_page_offset + page_length) > PAGE_SIZE) if ((shmem_page_offset + page_length) > PAGE_SIZE)
page_length = PAGE_SIZE - shmem_page_offset; page_length = PAGE_SIZE - shmem_page_offset;
page = shmem_read_mapping_page(mapping, offset >> PAGE_SHIFT); if (obj->pages) {
if (IS_ERR(page)) { page = obj->pages[offset >> PAGE_SHIFT];
ret = PTR_ERR(page); release_page = 0;
goto out; } else {
page = shmem_read_mapping_page(mapping, offset >> PAGE_SHIFT);
if (IS_ERR(page)) {
ret = PTR_ERR(page);
goto out;
}
release_page = 1;
} }
page_do_bit17_swizzling = obj_do_bit17_swizzling && page_do_bit17_swizzling = obj_do_bit17_swizzling &&
...@@ -358,7 +365,7 @@ i915_gem_shmem_pread(struct drm_device *dev, ...@@ -358,7 +365,7 @@ i915_gem_shmem_pread(struct drm_device *dev,
} }
hit_slowpath = 1; hit_slowpath = 1;
page_cache_get(page);
mutex_unlock(&dev->struct_mutex); mutex_unlock(&dev->struct_mutex);
vaddr = kmap(page); vaddr = kmap(page);
...@@ -377,9 +384,11 @@ i915_gem_shmem_pread(struct drm_device *dev, ...@@ -377,9 +384,11 @@ i915_gem_shmem_pread(struct drm_device *dev,
kunmap(page); kunmap(page);
mutex_lock(&dev->struct_mutex); mutex_lock(&dev->struct_mutex);
page_cache_release(page);
next_page: next_page:
mark_page_accessed(page); mark_page_accessed(page);
page_cache_release(page); if (release_page)
page_cache_release(page);
if (ret) { if (ret) {
ret = -EFAULT; ret = -EFAULT;
...@@ -660,6 +669,7 @@ i915_gem_shmem_pwrite(struct drm_device *dev, ...@@ -660,6 +669,7 @@ i915_gem_shmem_pwrite(struct drm_device *dev,
int shmem_page_offset, page_length, ret = 0; int shmem_page_offset, page_length, ret = 0;
int obj_do_bit17_swizzling, page_do_bit17_swizzling; int obj_do_bit17_swizzling, page_do_bit17_swizzling;
int hit_slowpath = 0; int hit_slowpath = 0;
int release_page;
user_data = (char __user *) (uintptr_t) args->data_ptr; user_data = (char __user *) (uintptr_t) args->data_ptr;
remain = args->size; remain = args->size;
...@@ -684,10 +694,16 @@ i915_gem_shmem_pwrite(struct drm_device *dev, ...@@ -684,10 +694,16 @@ i915_gem_shmem_pwrite(struct drm_device *dev,
if ((shmem_page_offset + page_length) > PAGE_SIZE) if ((shmem_page_offset + page_length) > PAGE_SIZE)
page_length = PAGE_SIZE - shmem_page_offset; page_length = PAGE_SIZE - shmem_page_offset;
page = shmem_read_mapping_page(mapping, offset >> PAGE_SHIFT); if (obj->pages) {
if (IS_ERR(page)) { page = obj->pages[offset >> PAGE_SHIFT];
ret = PTR_ERR(page); release_page = 0;
goto out; } else {
page = shmem_read_mapping_page(mapping, offset >> PAGE_SHIFT);
if (IS_ERR(page)) {
ret = PTR_ERR(page);
goto out;
}
release_page = 1;
} }
page_do_bit17_swizzling = obj_do_bit17_swizzling && page_do_bit17_swizzling = obj_do_bit17_swizzling &&
...@@ -705,7 +721,7 @@ i915_gem_shmem_pwrite(struct drm_device *dev, ...@@ -705,7 +721,7 @@ i915_gem_shmem_pwrite(struct drm_device *dev,
} }
hit_slowpath = 1; hit_slowpath = 1;
page_cache_get(page);
mutex_unlock(&dev->struct_mutex); mutex_unlock(&dev->struct_mutex);
vaddr = kmap(page); vaddr = kmap(page);
...@@ -720,10 +736,12 @@ i915_gem_shmem_pwrite(struct drm_device *dev, ...@@ -720,10 +736,12 @@ i915_gem_shmem_pwrite(struct drm_device *dev,
kunmap(page); kunmap(page);
mutex_lock(&dev->struct_mutex); mutex_lock(&dev->struct_mutex);
page_cache_release(page);
next_page: next_page:
set_page_dirty(page); set_page_dirty(page);
mark_page_accessed(page); mark_page_accessed(page);
page_cache_release(page); if (release_page)
page_cache_release(page);
if (ret) { if (ret) {
ret = -EFAULT; ret = -EFAULT;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment