Commit 8f2a1057 authored by Chris Wilson's avatar Chris Wilson

drm/i915: Explicitly pin the logical context for execbuf

In order to separate the reservation phase of building a request from
its emission phase, we need to pull some of the request alloc activities
from deep inside i915_request to the surface, GEM_EXECBUFFER.

v2: Be frivolous, use a local drm_i915_private.
Signed-off-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: default avatarTvrtko Ursulin <tvrtko.ursulin@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190425050143.811-1-chris@chris-wilson.co.uk
parent 79ffac85
...@@ -34,6 +34,8 @@ ...@@ -34,6 +34,8 @@
#include <drm/drm_syncobj.h> #include <drm/drm_syncobj.h>
#include <drm/i915_drm.h> #include <drm/i915_drm.h>
#include "gt/intel_gt_pm.h"
#include "i915_drv.h" #include "i915_drv.h"
#include "i915_gem_clflush.h" #include "i915_gem_clflush.h"
#include "i915_trace.h" #include "i915_trace.h"
...@@ -236,7 +238,8 @@ struct i915_execbuffer { ...@@ -236,7 +238,8 @@ struct i915_execbuffer {
unsigned int *flags; unsigned int *flags;
struct intel_engine_cs *engine; /** engine to queue the request to */ struct intel_engine_cs *engine; /** engine to queue the request to */
struct i915_gem_context *ctx; /** context for building the request */ struct intel_context *context; /* logical state for the request */
struct i915_gem_context *gem_context; /** caller's context */
struct i915_address_space *vm; /** GTT and vma for the request */ struct i915_address_space *vm; /** GTT and vma for the request */
struct i915_request *request; /** our request to build */ struct i915_request *request; /** our request to build */
...@@ -738,7 +741,7 @@ static int eb_select_context(struct i915_execbuffer *eb) ...@@ -738,7 +741,7 @@ static int eb_select_context(struct i915_execbuffer *eb)
if (unlikely(!ctx)) if (unlikely(!ctx))
return -ENOENT; return -ENOENT;
eb->ctx = ctx; eb->gem_context = ctx;
if (ctx->ppgtt) { if (ctx->ppgtt) {
eb->vm = &ctx->ppgtt->vm; eb->vm = &ctx->ppgtt->vm;
eb->invalid_flags |= EXEC_OBJECT_NEEDS_GTT; eb->invalid_flags |= EXEC_OBJECT_NEEDS_GTT;
...@@ -784,7 +787,6 @@ static struct i915_request *__eb_wait_for_ring(struct intel_ring *ring) ...@@ -784,7 +787,6 @@ static struct i915_request *__eb_wait_for_ring(struct intel_ring *ring)
static int eb_wait_for_ring(const struct i915_execbuffer *eb) static int eb_wait_for_ring(const struct i915_execbuffer *eb)
{ {
const struct intel_context *ce;
struct i915_request *rq; struct i915_request *rq;
int ret = 0; int ret = 0;
...@@ -794,11 +796,7 @@ static int eb_wait_for_ring(const struct i915_execbuffer *eb) ...@@ -794,11 +796,7 @@ static int eb_wait_for_ring(const struct i915_execbuffer *eb)
* keeping all of their resources pinned. * keeping all of their resources pinned.
*/ */
ce = intel_context_lookup(eb->ctx, eb->engine); rq = __eb_wait_for_ring(eb->context->ring);
if (!ce || !ce->ring) /* first use, assume empty! */
return 0;
rq = __eb_wait_for_ring(ce->ring);
if (rq) { if (rq) {
mutex_unlock(&eb->i915->drm.struct_mutex); mutex_unlock(&eb->i915->drm.struct_mutex);
...@@ -817,15 +815,15 @@ static int eb_wait_for_ring(const struct i915_execbuffer *eb) ...@@ -817,15 +815,15 @@ static int eb_wait_for_ring(const struct i915_execbuffer *eb)
static int eb_lookup_vmas(struct i915_execbuffer *eb) static int eb_lookup_vmas(struct i915_execbuffer *eb)
{ {
struct radix_tree_root *handles_vma = &eb->ctx->handles_vma; struct radix_tree_root *handles_vma = &eb->gem_context->handles_vma;
struct drm_i915_gem_object *obj; struct drm_i915_gem_object *obj;
unsigned int i, batch; unsigned int i, batch;
int err; int err;
if (unlikely(i915_gem_context_is_closed(eb->ctx))) if (unlikely(i915_gem_context_is_closed(eb->gem_context)))
return -ENOENT; return -ENOENT;
if (unlikely(i915_gem_context_is_banned(eb->ctx))) if (unlikely(i915_gem_context_is_banned(eb->gem_context)))
return -EIO; return -EIO;
INIT_LIST_HEAD(&eb->relocs); INIT_LIST_HEAD(&eb->relocs);
...@@ -870,8 +868,8 @@ static int eb_lookup_vmas(struct i915_execbuffer *eb) ...@@ -870,8 +868,8 @@ static int eb_lookup_vmas(struct i915_execbuffer *eb)
if (!vma->open_count++) if (!vma->open_count++)
i915_vma_reopen(vma); i915_vma_reopen(vma);
list_add(&lut->obj_link, &obj->lut_list); list_add(&lut->obj_link, &obj->lut_list);
list_add(&lut->ctx_link, &eb->ctx->handles_list); list_add(&lut->ctx_link, &eb->gem_context->handles_list);
lut->ctx = eb->ctx; lut->ctx = eb->gem_context;
lut->handle = handle; lut->handle = handle;
add_vma: add_vma:
...@@ -1227,7 +1225,7 @@ static int __reloc_gpu_alloc(struct i915_execbuffer *eb, ...@@ -1227,7 +1225,7 @@ static int __reloc_gpu_alloc(struct i915_execbuffer *eb,
if (err) if (err)
goto err_unmap; goto err_unmap;
rq = i915_request_alloc(eb->engine, eb->ctx); rq = i915_request_create(eb->context);
if (IS_ERR(rq)) { if (IS_ERR(rq)) {
err = PTR_ERR(rq); err = PTR_ERR(rq);
goto err_unpin; goto err_unpin;
...@@ -2088,31 +2086,65 @@ static const enum intel_engine_id user_ring_map[I915_USER_RINGS + 1] = { ...@@ -2088,31 +2086,65 @@ static const enum intel_engine_id user_ring_map[I915_USER_RINGS + 1] = {
[I915_EXEC_VEBOX] = VECS0 [I915_EXEC_VEBOX] = VECS0
}; };
static struct intel_engine_cs * static int eb_pin_context(struct i915_execbuffer *eb,
eb_select_engine(struct drm_i915_private *dev_priv, struct intel_engine_cs *engine)
{
struct intel_context *ce;
int err;
/*
* ABI: Before userspace accesses the GPU (e.g. execbuffer), report
* EIO if the GPU is already wedged.
*/
err = i915_terminally_wedged(eb->i915);
if (err)
return err;
/*
* Pinning the contexts may generate requests in order to acquire
* GGTT space, so do this first before we reserve a seqno for
* ourselves.
*/
ce = intel_context_pin(eb->gem_context, engine);
if (IS_ERR(ce))
return PTR_ERR(ce);
eb->engine = engine;
eb->context = ce;
return 0;
}
static void eb_unpin_context(struct i915_execbuffer *eb)
{
intel_context_unpin(eb->context);
}
static int
eb_select_engine(struct i915_execbuffer *eb,
struct drm_file *file, struct drm_file *file,
struct drm_i915_gem_execbuffer2 *args) struct drm_i915_gem_execbuffer2 *args)
{ {
struct drm_i915_private *i915 = eb->i915;
unsigned int user_ring_id = args->flags & I915_EXEC_RING_MASK; unsigned int user_ring_id = args->flags & I915_EXEC_RING_MASK;
struct intel_engine_cs *engine; struct intel_engine_cs *engine;
if (user_ring_id > I915_USER_RINGS) { if (user_ring_id > I915_USER_RINGS) {
DRM_DEBUG("execbuf with unknown ring: %u\n", user_ring_id); DRM_DEBUG("execbuf with unknown ring: %u\n", user_ring_id);
return NULL; return -EINVAL;
} }
if ((user_ring_id != I915_EXEC_BSD) && if ((user_ring_id != I915_EXEC_BSD) &&
((args->flags & I915_EXEC_BSD_MASK) != 0)) { ((args->flags & I915_EXEC_BSD_MASK) != 0)) {
DRM_DEBUG("execbuf with non bsd ring but with invalid " DRM_DEBUG("execbuf with non bsd ring but with invalid "
"bsd dispatch flags: %d\n", (int)(args->flags)); "bsd dispatch flags: %d\n", (int)(args->flags));
return NULL; return -EINVAL;
} }
if (user_ring_id == I915_EXEC_BSD && HAS_ENGINE(dev_priv, VCS1)) { if (user_ring_id == I915_EXEC_BSD && HAS_ENGINE(i915, VCS1)) {
unsigned int bsd_idx = args->flags & I915_EXEC_BSD_MASK; unsigned int bsd_idx = args->flags & I915_EXEC_BSD_MASK;
if (bsd_idx == I915_EXEC_BSD_DEFAULT) { if (bsd_idx == I915_EXEC_BSD_DEFAULT) {
bsd_idx = gen8_dispatch_bsd_engine(dev_priv, file); bsd_idx = gen8_dispatch_bsd_engine(i915, file);
} else if (bsd_idx >= I915_EXEC_BSD_RING1 && } else if (bsd_idx >= I915_EXEC_BSD_RING1 &&
bsd_idx <= I915_EXEC_BSD_RING2) { bsd_idx <= I915_EXEC_BSD_RING2) {
bsd_idx >>= I915_EXEC_BSD_SHIFT; bsd_idx >>= I915_EXEC_BSD_SHIFT;
...@@ -2120,20 +2152,20 @@ eb_select_engine(struct drm_i915_private *dev_priv, ...@@ -2120,20 +2152,20 @@ eb_select_engine(struct drm_i915_private *dev_priv,
} else { } else {
DRM_DEBUG("execbuf with unknown bsd ring: %u\n", DRM_DEBUG("execbuf with unknown bsd ring: %u\n",
bsd_idx); bsd_idx);
return NULL; return -EINVAL;
} }
engine = dev_priv->engine[_VCS(bsd_idx)]; engine = i915->engine[_VCS(bsd_idx)];
} else { } else {
engine = dev_priv->engine[user_ring_map[user_ring_id]]; engine = i915->engine[user_ring_map[user_ring_id]];
} }
if (!engine) { if (!engine) {
DRM_DEBUG("execbuf with invalid ring: %u\n", user_ring_id); DRM_DEBUG("execbuf with invalid ring: %u\n", user_ring_id);
return NULL; return -EINVAL;
} }
return engine; return eb_pin_context(eb, engine);
} }
static void static void
...@@ -2275,7 +2307,6 @@ i915_gem_do_execbuffer(struct drm_device *dev, ...@@ -2275,7 +2307,6 @@ i915_gem_do_execbuffer(struct drm_device *dev,
struct i915_execbuffer eb; struct i915_execbuffer eb;
struct dma_fence *in_fence = NULL; struct dma_fence *in_fence = NULL;
struct sync_file *out_fence = NULL; struct sync_file *out_fence = NULL;
intel_wakeref_t wakeref;
int out_fence_fd = -1; int out_fence_fd = -1;
int err; int err;
...@@ -2335,12 +2366,6 @@ i915_gem_do_execbuffer(struct drm_device *dev, ...@@ -2335,12 +2366,6 @@ i915_gem_do_execbuffer(struct drm_device *dev,
if (unlikely(err)) if (unlikely(err))
goto err_destroy; goto err_destroy;
eb.engine = eb_select_engine(eb.i915, file, args);
if (!eb.engine) {
err = -EINVAL;
goto err_engine;
}
/* /*
* Take a local wakeref for preparing to dispatch the execbuf as * Take a local wakeref for preparing to dispatch the execbuf as
* we expect to access the hardware fairly frequently in the * we expect to access the hardware fairly frequently in the
...@@ -2348,16 +2373,20 @@ i915_gem_do_execbuffer(struct drm_device *dev, ...@@ -2348,16 +2373,20 @@ i915_gem_do_execbuffer(struct drm_device *dev,
* wakeref that we hold until the GPU has been idle for at least * wakeref that we hold until the GPU has been idle for at least
* 100ms. * 100ms.
*/ */
wakeref = intel_runtime_pm_get(eb.i915); intel_gt_pm_get(eb.i915);
err = i915_mutex_lock_interruptible(dev); err = i915_mutex_lock_interruptible(dev);
if (err) if (err)
goto err_rpm; goto err_rpm;
err = eb_wait_for_ring(&eb); /* may temporarily drop struct_mutex */ err = eb_select_engine(&eb, file, args);
if (unlikely(err)) if (unlikely(err))
goto err_unlock; goto err_unlock;
err = eb_wait_for_ring(&eb); /* may temporarily drop struct_mutex */
if (unlikely(err))
goto err_engine;
err = eb_relocate(&eb); err = eb_relocate(&eb);
if (err) { if (err) {
/* /*
...@@ -2441,7 +2470,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, ...@@ -2441,7 +2470,7 @@ i915_gem_do_execbuffer(struct drm_device *dev,
GEM_BUG_ON(eb.reloc_cache.rq); GEM_BUG_ON(eb.reloc_cache.rq);
/* Allocate a request for this batch buffer nice and early. */ /* Allocate a request for this batch buffer nice and early. */
eb.request = i915_request_alloc(eb.engine, eb.ctx); eb.request = i915_request_create(eb.context);
if (IS_ERR(eb.request)) { if (IS_ERR(eb.request)) {
err = PTR_ERR(eb.request); err = PTR_ERR(eb.request);
goto err_batch_unpin; goto err_batch_unpin;
...@@ -2479,8 +2508,8 @@ i915_gem_do_execbuffer(struct drm_device *dev, ...@@ -2479,8 +2508,8 @@ i915_gem_do_execbuffer(struct drm_device *dev,
trace_i915_request_queue(eb.request, eb.batch_flags); trace_i915_request_queue(eb.request, eb.batch_flags);
err = eb_submit(&eb); err = eb_submit(&eb);
err_request: err_request:
i915_request_add(eb.request);
add_to_client(eb.request, file); add_to_client(eb.request, file);
i915_request_add(eb.request);
if (fences) if (fences)
signal_fence_array(&eb, fences); signal_fence_array(&eb, fences);
...@@ -2502,12 +2531,13 @@ i915_gem_do_execbuffer(struct drm_device *dev, ...@@ -2502,12 +2531,13 @@ i915_gem_do_execbuffer(struct drm_device *dev,
err_vma: err_vma:
if (eb.exec) if (eb.exec)
eb_release_vmas(&eb); eb_release_vmas(&eb);
err_engine:
eb_unpin_context(&eb);
err_unlock: err_unlock:
mutex_unlock(&dev->struct_mutex); mutex_unlock(&dev->struct_mutex);
err_rpm: err_rpm:
intel_runtime_pm_put(eb.i915, wakeref); intel_gt_pm_put(eb.i915);
err_engine: i915_gem_context_put(eb.gem_context);
i915_gem_context_put(eb.ctx);
err_destroy: err_destroy:
eb_destroy(&eb); eb_destroy(&eb);
err_out_fence: err_out_fence:
......
...@@ -785,7 +785,6 @@ i915_request_alloc(struct intel_engine_cs *engine, struct i915_gem_context *ctx) ...@@ -785,7 +785,6 @@ i915_request_alloc(struct intel_engine_cs *engine, struct i915_gem_context *ctx)
struct drm_i915_private *i915 = engine->i915; struct drm_i915_private *i915 = engine->i915;
struct intel_context *ce; struct intel_context *ce;
struct i915_request *rq; struct i915_request *rq;
int ret;
/* /*
* Preempt contexts are reserved for exclusive use to inject a * Preempt contexts are reserved for exclusive use to inject a
...@@ -794,14 +793,6 @@ i915_request_alloc(struct intel_engine_cs *engine, struct i915_gem_context *ctx) ...@@ -794,14 +793,6 @@ i915_request_alloc(struct intel_engine_cs *engine, struct i915_gem_context *ctx)
*/ */
GEM_BUG_ON(ctx == i915->preempt_context); GEM_BUG_ON(ctx == i915->preempt_context);
/*
* ABI: Before userspace accesses the GPU (e.g. execbuffer), report
* EIO if the GPU is already wedged.
*/
ret = i915_terminally_wedged(i915);
if (ret)
return ERR_PTR(ret);
/* /*
* Pinning the contexts may generate requests in order to acquire * Pinning the contexts may generate requests in order to acquire
* GGTT space, so do this first before we reserve a seqno for * GGTT space, so do this first before we reserve a seqno for
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment