Commit ee286370 authored by Chris Wilson's avatar Chris Wilson Committed by Daniel Vetter

drm/i915: Cache last obj->pages location for i915_gem_object_get_page()

The biggest user of i915_gem_object_get_page() is the relocation
processing during execbuffer. Typically userspace passes in a set of
relocations in sorted order. Sadly, we alternate between relocations
increasing from the start of the buffers, and relocations decreasing
from the end. However the majority of consecutive lookups will still be
in the same page. We could cache the start of the last sg chain, however
for most callers, the entire sgl is inside a single chain and so we see
no improve from the extra layer of caching.

v2: Avoid the double increment inside unlikely()

References: https://bugs.freedesktop.org/show_bug.cgi?id=88308Signed-off-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Cc: John Harrison <John.C.Harrison@Intel.com>
Signed-off-by: default avatarDaniel Vetter <daniel.vetter@ffwll.ch>
parent f9fc42f4
...@@ -1986,6 +1986,10 @@ struct drm_i915_gem_object { ...@@ -1986,6 +1986,10 @@ struct drm_i915_gem_object {
struct sg_table *pages; struct sg_table *pages;
int pages_pin_count; int pages_pin_count;
struct get_page {
struct scatterlist *sg;
int last;
} get_page;
/* prime dma-buf support */ /* prime dma-buf support */
void *dma_buf_vmapping; void *dma_buf_vmapping;
...@@ -2656,15 +2660,32 @@ int i915_gem_obj_prepare_shmem_read(struct drm_i915_gem_object *obj, ...@@ -2656,15 +2660,32 @@ int i915_gem_obj_prepare_shmem_read(struct drm_i915_gem_object *obj,
int *needs_clflush); int *needs_clflush);
int __must_check i915_gem_object_get_pages(struct drm_i915_gem_object *obj); int __must_check i915_gem_object_get_pages(struct drm_i915_gem_object *obj);
static inline struct page *i915_gem_object_get_page(struct drm_i915_gem_object *obj, int n)
{
struct sg_page_iter sg_iter;
for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, n) static inline int __sg_page_count(struct scatterlist *sg)
return sg_page_iter_page(&sg_iter); {
return sg->length >> PAGE_SHIFT;
}
static inline struct page *
i915_gem_object_get_page(struct drm_i915_gem_object *obj, int n)
{
if (WARN_ON(n >= obj->base.size >> PAGE_SHIFT))
return NULL; return NULL;
if (n < obj->get_page.last) {
obj->get_page.sg = obj->pages->sgl;
obj->get_page.last = 0;
}
while (obj->get_page.last + __sg_page_count(obj->get_page.sg) <= n) {
obj->get_page.last += __sg_page_count(obj->get_page.sg++);
if (unlikely(sg_is_chain(obj->get_page.sg)))
obj->get_page.sg = sg_chain_ptr(obj->get_page.sg);
}
return nth_page(sg_page(obj->get_page.sg), n - obj->get_page.last);
} }
static inline void i915_gem_object_pin_pages(struct drm_i915_gem_object *obj) static inline void i915_gem_object_pin_pages(struct drm_i915_gem_object *obj)
{ {
BUG_ON(obj->pages == NULL); BUG_ON(obj->pages == NULL);
......
...@@ -2178,6 +2178,10 @@ i915_gem_object_get_pages(struct drm_i915_gem_object *obj) ...@@ -2178,6 +2178,10 @@ i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
return ret; return ret;
list_add_tail(&obj->global_list, &dev_priv->mm.unbound_list); list_add_tail(&obj->global_list, &dev_priv->mm.unbound_list);
obj->get_page.sg = obj->pages->sgl;
obj->get_page.last = 0;
return 0; return 0;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment