Commit 2889caa9 authored by Chris Wilson's avatar Chris Wilson

drm/i915: Eliminate lots of iterations over the execobjects array

The major scaling bottleneck in execbuffer is the processing of the
execobjects. Creating an auxiliary list is inefficient when compared to
using the execobject array we already have allocated.

Reservation is then split into phases. As we lookup up the VMA, we
try and bind it back into active location. Only if that fails, do we add
it to the unbound list for phase 2. In phase 2, we try and add all those
objects that could not fit into their previous location, with fallback
to retrying all objects and evicting the VM in case of severe
fragmentation. (This is the same as before, except that phase 1 is now
done inline with looking up the VMA to avoid an iteration over the
execobject array. In the ideal case, we eliminate the separate reservation
phase). During the reservation phase, we only evict from the VM between
passes (rather than currently as we try to fit every new VMA). In
testing with Unreal Engine's Atlantis demo which stresses the eviction
logic on gen7 class hardware, this speed up the framerate by a factor of
2.

The second loop amalgamation is between move_to_gpu and move_to_active.
As we always submit the request, even if incomplete, we can use the
current request to track active VMA as we perform the flushes and
synchronisation required.

The next big advancement is to avoid copying back to the user any
execobjects and relocations that are not changed.

v2: Add a Theory of Operation spiel.
v3: Fall back to slow relocations in preparation for flushing userptrs.
v4: Document struct members, factor out eb_validate_vma(), add a few
more comments to explain some magic and hide other magic behind macros.
Signed-off-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: default avatarJoonas Lahtinen <joonas.lahtinen@linux.intel.com>
parent 071750e5
...@@ -3581,7 +3581,7 @@ int __must_check i915_gem_evict_something(struct i915_address_space *vm, ...@@ -3581,7 +3581,7 @@ int __must_check i915_gem_evict_something(struct i915_address_space *vm,
int __must_check i915_gem_evict_for_node(struct i915_address_space *vm, int __must_check i915_gem_evict_for_node(struct i915_address_space *vm,
struct drm_mm_node *node, struct drm_mm_node *node,
unsigned int flags); unsigned int flags);
int i915_gem_evict_vm(struct i915_address_space *vm, bool do_idle); int i915_gem_evict_vm(struct i915_address_space *vm);
/* belongs in i915_gem_gtt.h */ /* belongs in i915_gem_gtt.h */
static inline void i915_gem_chipset_flush(struct drm_i915_private *dev_priv) static inline void i915_gem_chipset_flush(struct drm_i915_private *dev_priv)
......
...@@ -50,6 +50,29 @@ static bool ggtt_is_idle(struct drm_i915_private *dev_priv) ...@@ -50,6 +50,29 @@ static bool ggtt_is_idle(struct drm_i915_private *dev_priv)
return true; return true;
} }
static int ggtt_flush(struct drm_i915_private *i915)
{
int err;
/* Not everything in the GGTT is tracked via vma (otherwise we
* could evict as required with minimal stalling) so we are forced
* to idle the GPU and explicitly retire outstanding requests in
* the hopes that we can then remove contexts and the like only
* bound by their active reference.
*/
err = i915_gem_switch_to_kernel_context(i915);
if (err)
return err;
err = i915_gem_wait_for_idle(i915,
I915_WAIT_INTERRUPTIBLE |
I915_WAIT_LOCKED);
if (err)
return err;
return 0;
}
static bool static bool
mark_free(struct drm_mm_scan *scan, mark_free(struct drm_mm_scan *scan,
struct i915_vma *vma, struct i915_vma *vma,
...@@ -175,19 +198,7 @@ i915_gem_evict_something(struct i915_address_space *vm, ...@@ -175,19 +198,7 @@ i915_gem_evict_something(struct i915_address_space *vm,
return intel_has_pending_fb_unpin(dev_priv) ? -EAGAIN : -ENOSPC; return intel_has_pending_fb_unpin(dev_priv) ? -EAGAIN : -ENOSPC;
} }
/* Not everything in the GGTT is tracked via vma (otherwise we ret = ggtt_flush(dev_priv);
* could evict as required with minimal stalling) so we are forced
* to idle the GPU and explicitly retire outstanding requests in
* the hopes that we can then remove contexts and the like only
* bound by their active reference.
*/
ret = i915_gem_switch_to_kernel_context(dev_priv);
if (ret)
return ret;
ret = i915_gem_wait_for_idle(dev_priv,
I915_WAIT_INTERRUPTIBLE |
I915_WAIT_LOCKED);
if (ret) if (ret)
return ret; return ret;
...@@ -337,10 +348,8 @@ int i915_gem_evict_for_node(struct i915_address_space *vm, ...@@ -337,10 +348,8 @@ int i915_gem_evict_for_node(struct i915_address_space *vm,
/** /**
* i915_gem_evict_vm - Evict all idle vmas from a vm * i915_gem_evict_vm - Evict all idle vmas from a vm
* @vm: Address space to cleanse * @vm: Address space to cleanse
* @do_idle: Boolean directing whether to idle first.
* *
* This function evicts all idles vmas from a vm. If all unpinned vmas should be * This function evicts all vmas from a vm.
* evicted the @do_idle needs to be set to true.
* *
* This is used by the execbuf code as a last-ditch effort to defragment the * This is used by the execbuf code as a last-ditch effort to defragment the
* address space. * address space.
...@@ -348,37 +357,50 @@ int i915_gem_evict_for_node(struct i915_address_space *vm, ...@@ -348,37 +357,50 @@ int i915_gem_evict_for_node(struct i915_address_space *vm,
* To clarify: This is for freeing up virtual address space, not for freeing * To clarify: This is for freeing up virtual address space, not for freeing
* memory in e.g. the shrinker. * memory in e.g. the shrinker.
*/ */
int i915_gem_evict_vm(struct i915_address_space *vm, bool do_idle) int i915_gem_evict_vm(struct i915_address_space *vm)
{ {
struct list_head *phases[] = {
&vm->inactive_list,
&vm->active_list,
NULL
}, **phase;
struct list_head eviction_list;
struct i915_vma *vma, *next; struct i915_vma *vma, *next;
int ret; int ret;
lockdep_assert_held(&vm->i915->drm.struct_mutex); lockdep_assert_held(&vm->i915->drm.struct_mutex);
trace_i915_gem_evict_vm(vm); trace_i915_gem_evict_vm(vm);
if (do_idle) { /* Switch back to the default context in order to unpin
struct drm_i915_private *dev_priv = vm->i915; * the existing context objects. However, such objects only
* pin themselves inside the global GTT and performing the
if (i915_is_ggtt(vm)) { * switch otherwise is ineffective.
ret = i915_gem_switch_to_kernel_context(dev_priv); */
if (ret) if (i915_is_ggtt(vm)) {
return ret; ret = ggtt_flush(vm->i915);
}
ret = i915_gem_wait_for_idle(dev_priv,
I915_WAIT_INTERRUPTIBLE |
I915_WAIT_LOCKED);
if (ret) if (ret)
return ret; return ret;
WARN_ON(!list_empty(&vm->active_list));
} }
list_for_each_entry_safe(vma, next, &vm->inactive_list, vm_link) INIT_LIST_HEAD(&eviction_list);
if (!i915_vma_is_pinned(vma)) phase = phases;
WARN_ON(i915_vma_unbind(vma)); do {
list_for_each_entry(vma, *phase, vm_link) {
if (i915_vma_is_pinned(vma))
continue;
return 0; __i915_vma_pin(vma);
list_add(&vma->evict_link, &eviction_list);
}
} while (*++phase);
ret = 0;
list_for_each_entry_safe(vma, next, &eviction_list, evict_link) {
__i915_vma_unpin(vma);
if (ret == 0)
ret = i915_vma_unbind(vma);
}
return ret;
} }
#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST) #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
......
This diff is collapsed.
...@@ -463,7 +463,7 @@ i915_vma_insert(struct i915_vma *vma, u64 size, u64 alignment, u64 flags) ...@@ -463,7 +463,7 @@ i915_vma_insert(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
size, obj->base.size, size, obj->base.size,
flags & PIN_MAPPABLE ? "mappable" : "total", flags & PIN_MAPPABLE ? "mappable" : "total",
end); end);
return -E2BIG; return -ENOSPC;
} }
ret = i915_gem_object_pin_pages(obj); ret = i915_gem_object_pin_pages(obj);
......
...@@ -103,6 +103,7 @@ struct i915_vma { ...@@ -103,6 +103,7 @@ struct i915_vma {
/** This vma's place in the execbuf reservation list */ /** This vma's place in the execbuf reservation list */
struct list_head exec_link; struct list_head exec_link;
struct list_head reloc_link;
/** This vma's place in the eviction list */ /** This vma's place in the eviction list */
struct list_head evict_link; struct list_head evict_link;
......
...@@ -304,7 +304,7 @@ static int igt_evict_vm(void *arg) ...@@ -304,7 +304,7 @@ static int igt_evict_vm(void *arg)
goto cleanup; goto cleanup;
/* Everything is pinned, nothing should happen */ /* Everything is pinned, nothing should happen */
err = i915_gem_evict_vm(&ggtt->base, false); err = i915_gem_evict_vm(&ggtt->base);
if (err) { if (err) {
pr_err("i915_gem_evict_vm on a full GGTT returned err=%d]\n", pr_err("i915_gem_evict_vm on a full GGTT returned err=%d]\n",
err); err);
...@@ -313,7 +313,7 @@ static int igt_evict_vm(void *arg) ...@@ -313,7 +313,7 @@ static int igt_evict_vm(void *arg)
unpin_ggtt(i915); unpin_ggtt(i915);
err = i915_gem_evict_vm(&ggtt->base, false); err = i915_gem_evict_vm(&ggtt->base);
if (err) { if (err) {
pr_err("i915_gem_evict_vm on a full GGTT returned err=%d]\n", pr_err("i915_gem_evict_vm on a full GGTT returned err=%d]\n",
err); err);
......
...@@ -224,14 +224,6 @@ static bool assert_pin_valid(const struct i915_vma *vma, ...@@ -224,14 +224,6 @@ static bool assert_pin_valid(const struct i915_vma *vma,
return true; return true;
} }
__maybe_unused
static bool assert_pin_e2big(const struct i915_vma *vma,
const struct pin_mode *mode,
int result)
{
return result == -E2BIG;
}
__maybe_unused __maybe_unused
static bool assert_pin_enospc(const struct i915_vma *vma, static bool assert_pin_enospc(const struct i915_vma *vma,
const struct pin_mode *mode, const struct pin_mode *mode,
...@@ -255,7 +247,6 @@ static int igt_vma_pin1(void *arg) ...@@ -255,7 +247,6 @@ static int igt_vma_pin1(void *arg)
#define VALID(sz, fl) { .size = (sz), .flags = (fl), .assert = assert_pin_valid, .string = #sz ", " #fl ", (valid) " } #define VALID(sz, fl) { .size = (sz), .flags = (fl), .assert = assert_pin_valid, .string = #sz ", " #fl ", (valid) " }
#define __INVALID(sz, fl, check, eval) { .size = (sz), .flags = (fl), .assert = (check), .string = #sz ", " #fl ", (invalid " #eval ")" } #define __INVALID(sz, fl, check, eval) { .size = (sz), .flags = (fl), .assert = (check), .string = #sz ", " #fl ", (invalid " #eval ")" }
#define INVALID(sz, fl) __INVALID(sz, fl, assert_pin_einval, EINVAL) #define INVALID(sz, fl) __INVALID(sz, fl, assert_pin_einval, EINVAL)
#define TOOBIG(sz, fl) __INVALID(sz, fl, assert_pin_e2big, E2BIG)
#define NOSPACE(sz, fl) __INVALID(sz, fl, assert_pin_enospc, ENOSPC) #define NOSPACE(sz, fl) __INVALID(sz, fl, assert_pin_enospc, ENOSPC)
VALID(0, PIN_GLOBAL), VALID(0, PIN_GLOBAL),
VALID(0, PIN_GLOBAL | PIN_MAPPABLE), VALID(0, PIN_GLOBAL | PIN_MAPPABLE),
...@@ -276,11 +267,11 @@ static int igt_vma_pin1(void *arg) ...@@ -276,11 +267,11 @@ static int igt_vma_pin1(void *arg)
VALID(8192, PIN_GLOBAL), VALID(8192, PIN_GLOBAL),
VALID(i915->ggtt.mappable_end - 4096, PIN_GLOBAL | PIN_MAPPABLE), VALID(i915->ggtt.mappable_end - 4096, PIN_GLOBAL | PIN_MAPPABLE),
VALID(i915->ggtt.mappable_end, PIN_GLOBAL | PIN_MAPPABLE), VALID(i915->ggtt.mappable_end, PIN_GLOBAL | PIN_MAPPABLE),
TOOBIG(i915->ggtt.mappable_end + 4096, PIN_GLOBAL | PIN_MAPPABLE), NOSPACE(i915->ggtt.mappable_end + 4096, PIN_GLOBAL | PIN_MAPPABLE),
VALID(i915->ggtt.base.total - 4096, PIN_GLOBAL), VALID(i915->ggtt.base.total - 4096, PIN_GLOBAL),
VALID(i915->ggtt.base.total, PIN_GLOBAL), VALID(i915->ggtt.base.total, PIN_GLOBAL),
TOOBIG(i915->ggtt.base.total + 4096, PIN_GLOBAL), NOSPACE(i915->ggtt.base.total + 4096, PIN_GLOBAL),
TOOBIG(round_down(U64_MAX, PAGE_SIZE), PIN_GLOBAL), NOSPACE(round_down(U64_MAX, PAGE_SIZE), PIN_GLOBAL),
INVALID(8192, PIN_GLOBAL | PIN_MAPPABLE | PIN_OFFSET_FIXED | (i915->ggtt.mappable_end - 4096)), INVALID(8192, PIN_GLOBAL | PIN_MAPPABLE | PIN_OFFSET_FIXED | (i915->ggtt.mappable_end - 4096)),
INVALID(8192, PIN_GLOBAL | PIN_OFFSET_FIXED | (i915->ggtt.base.total - 4096)), INVALID(8192, PIN_GLOBAL | PIN_OFFSET_FIXED | (i915->ggtt.base.total - 4096)),
INVALID(8192, PIN_GLOBAL | PIN_OFFSET_FIXED | (round_down(U64_MAX, PAGE_SIZE) - 4096)), INVALID(8192, PIN_GLOBAL | PIN_OFFSET_FIXED | (round_down(U64_MAX, PAGE_SIZE) - 4096)),
...@@ -300,7 +291,6 @@ static int igt_vma_pin1(void *arg) ...@@ -300,7 +291,6 @@ static int igt_vma_pin1(void *arg)
#endif #endif
{ }, { },
#undef NOSPACE #undef NOSPACE
#undef TOOBIG
#undef INVALID #undef INVALID
#undef __INVALID #undef __INVALID
#undef VALID #undef VALID
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment