Commit 03ade511 authored by Chris Wilson's avatar Chris Wilson Committed by Daniel Vetter

drm/i915: Inline check required for object syncing prior to execbuf

This trims a little overhead from the common case of not needing to
synchronize between rings.

v2: execlists is special and likes to duplicate code.
Signed-off-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Signed-off-by: default avatarDaniel Vetter <daniel.vetter@ffwll.ch>
parent b4716185
...@@ -889,6 +889,7 @@ static int ...@@ -889,6 +889,7 @@ static int
i915_gem_execbuffer_move_to_gpu(struct intel_engine_cs *ring, i915_gem_execbuffer_move_to_gpu(struct intel_engine_cs *ring,
struct list_head *vmas) struct list_head *vmas)
{ {
const unsigned other_rings = ~intel_ring_flag(ring);
struct i915_vma *vma; struct i915_vma *vma;
uint32_t flush_domains = 0; uint32_t flush_domains = 0;
bool flush_chipset = false; bool flush_chipset = false;
...@@ -896,9 +897,12 @@ i915_gem_execbuffer_move_to_gpu(struct intel_engine_cs *ring, ...@@ -896,9 +897,12 @@ i915_gem_execbuffer_move_to_gpu(struct intel_engine_cs *ring,
list_for_each_entry(vma, vmas, exec_list) { list_for_each_entry(vma, vmas, exec_list) {
struct drm_i915_gem_object *obj = vma->obj; struct drm_i915_gem_object *obj = vma->obj;
ret = i915_gem_object_sync(obj, ring);
if (ret) if (obj->active & other_rings) {
return ret; ret = i915_gem_object_sync(obj, ring);
if (ret)
return ret;
}
if (obj->base.write_domain & I915_GEM_DOMAIN_CPU) if (obj->base.write_domain & I915_GEM_DOMAIN_CPU)
flush_chipset |= i915_gem_clflush_object(obj, false); flush_chipset |= i915_gem_clflush_object(obj, false);
......
...@@ -628,6 +628,7 @@ static int execlists_move_to_gpu(struct intel_ringbuffer *ringbuf, ...@@ -628,6 +628,7 @@ static int execlists_move_to_gpu(struct intel_ringbuffer *ringbuf,
struct list_head *vmas) struct list_head *vmas)
{ {
struct intel_engine_cs *ring = ringbuf->ring; struct intel_engine_cs *ring = ringbuf->ring;
const unsigned other_rings = ~intel_ring_flag(ring);
struct i915_vma *vma; struct i915_vma *vma;
uint32_t flush_domains = 0; uint32_t flush_domains = 0;
bool flush_chipset = false; bool flush_chipset = false;
...@@ -636,9 +637,11 @@ static int execlists_move_to_gpu(struct intel_ringbuffer *ringbuf, ...@@ -636,9 +637,11 @@ static int execlists_move_to_gpu(struct intel_ringbuffer *ringbuf,
list_for_each_entry(vma, vmas, exec_list) { list_for_each_entry(vma, vmas, exec_list) {
struct drm_i915_gem_object *obj = vma->obj; struct drm_i915_gem_object *obj = vma->obj;
ret = i915_gem_object_sync(obj, ring); if (obj->active & other_rings) {
if (ret) ret = i915_gem_object_sync(obj, ring);
return ret; if (ret)
return ret;
}
if (obj->base.write_domain & I915_GEM_DOMAIN_CPU) if (obj->base.write_domain & I915_GEM_DOMAIN_CPU)
flush_chipset |= i915_gem_clflush_object(obj, false); flush_chipset |= i915_gem_clflush_object(obj, false);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment