Commit e7e58eb5 authored by Daniel Vetter's avatar Daniel Vetter

drm/i915: mark pwrite/pread slowpaths with unlikely

Beside helping the compiler untangle this maze they double-up as
documentation for which parts of the code aren't performance-critical
but just around to keep old (but already dead-slow) userspace from
breaking.
Reviewed-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Signed-Off-by: default avatarDaniel Vetter <daniel.vetter@ffwll.ch>
parent 23c18c71
...@@ -298,7 +298,7 @@ shmem_pread_fast(struct page *page, int shmem_page_offset, int page_length, ...@@ -298,7 +298,7 @@ shmem_pread_fast(struct page *page, int shmem_page_offset, int page_length,
char *vaddr; char *vaddr;
int ret; int ret;
if (page_do_bit17_swizzling) if (unlikely(page_do_bit17_swizzling))
return -EINVAL; return -EINVAL;
vaddr = kmap_atomic(page); vaddr = kmap_atomic(page);
...@@ -317,7 +317,7 @@ static void ...@@ -317,7 +317,7 @@ static void
shmem_clflush_swizzled_range(char *addr, unsigned long length, shmem_clflush_swizzled_range(char *addr, unsigned long length,
bool swizzled) bool swizzled)
{ {
if (swizzled) { if (unlikely(swizzled)) {
unsigned long start = (unsigned long) addr; unsigned long start = (unsigned long) addr;
unsigned long end = (unsigned long) addr + length; unsigned long end = (unsigned long) addr + length;
...@@ -629,7 +629,7 @@ shmem_pwrite_fast(struct page *page, int shmem_page_offset, int page_length, ...@@ -629,7 +629,7 @@ shmem_pwrite_fast(struct page *page, int shmem_page_offset, int page_length,
char *vaddr; char *vaddr;
int ret; int ret;
if (page_do_bit17_swizzling) if (unlikely(page_do_bit17_swizzling))
return -EINVAL; return -EINVAL;
vaddr = kmap_atomic(page); vaddr = kmap_atomic(page);
...@@ -660,7 +660,7 @@ shmem_pwrite_slow(struct page *page, int shmem_page_offset, int page_length, ...@@ -660,7 +660,7 @@ shmem_pwrite_slow(struct page *page, int shmem_page_offset, int page_length,
int ret; int ret;
vaddr = kmap(page); vaddr = kmap(page);
if (needs_clflush_before || page_do_bit17_swizzling) if (unlikely(needs_clflush_before || page_do_bit17_swizzling))
shmem_clflush_swizzled_range(vaddr + shmem_page_offset, shmem_clflush_swizzled_range(vaddr + shmem_page_offset,
page_length, page_length,
page_do_bit17_swizzling); page_do_bit17_swizzling);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment