Commit 8e4ba491 authored by Maarten Lankhorst's avatar Maarten Lankhorst Committed by Joonas Lahtinen

drm/i915: Parse command buffer earlier in eb_relocate(slow)

We want to introduce backoff logic, but we need to lock the
pool object as well for command parsing. Because of this, we
will need backoff logic for the engine pool obj, move the batch
validation up slightly to eb_lookup_vmas, and the actual command
parsing in a separate function which can get called from execbuf
relocation fast and slowpath.
Signed-off-by: default avatarMaarten Lankhorst <maarten.lankhorst@linux.intel.com>
Reviewed-by: default avatarThomas Hellström <thomas.hellstrom@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20200819140904.1708856-8-maarten.lankhorst@linux.intel.comSigned-off-by: default avatarJoonas Lahtinen <joonas.lahtinen@linux.intel.com>
parent 1af343cd
...@@ -296,6 +296,8 @@ struct i915_execbuffer { ...@@ -296,6 +296,8 @@ struct i915_execbuffer {
unsigned long num_fences; unsigned long num_fences;
}; };
static int eb_parse(struct i915_execbuffer *eb);
static inline bool eb_use_cmdparser(const struct i915_execbuffer *eb) static inline bool eb_use_cmdparser(const struct i915_execbuffer *eb)
{ {
return intel_engine_requires_cmd_parser(eb->engine) || return intel_engine_requires_cmd_parser(eb->engine) ||
...@@ -843,6 +845,7 @@ static struct i915_vma *eb_lookup_vma(struct i915_execbuffer *eb, u32 handle) ...@@ -843,6 +845,7 @@ static struct i915_vma *eb_lookup_vma(struct i915_execbuffer *eb, u32 handle)
static int eb_lookup_vmas(struct i915_execbuffer *eb) static int eb_lookup_vmas(struct i915_execbuffer *eb)
{ {
struct drm_i915_private *i915 = eb->i915;
unsigned int batch = eb_batch_index(eb); unsigned int batch = eb_batch_index(eb);
unsigned int i; unsigned int i;
int err = 0; int err = 0;
...@@ -856,18 +859,37 @@ static int eb_lookup_vmas(struct i915_execbuffer *eb) ...@@ -856,18 +859,37 @@ static int eb_lookup_vmas(struct i915_execbuffer *eb)
vma = eb_lookup_vma(eb, eb->exec[i].handle); vma = eb_lookup_vma(eb, eb->exec[i].handle);
if (IS_ERR(vma)) { if (IS_ERR(vma)) {
err = PTR_ERR(vma); err = PTR_ERR(vma);
break; goto err;
} }
err = eb_validate_vma(eb, &eb->exec[i], vma); err = eb_validate_vma(eb, &eb->exec[i], vma);
if (unlikely(err)) { if (unlikely(err)) {
i915_vma_put(vma); i915_vma_put(vma);
break; goto err;
} }
eb_add_vma(eb, i, batch, vma); eb_add_vma(eb, i, batch, vma);
} }
if (unlikely(eb->batch->flags & EXEC_OBJECT_WRITE)) {
drm_dbg(&i915->drm,
"Attempting to use self-modifying batch buffer\n");
return -EINVAL;
}
if (range_overflows_t(u64,
eb->batch_start_offset, eb->batch_len,
eb->batch->vma->size)) {
drm_dbg(&i915->drm, "Attempting to use out-of-bounds batch\n");
return -EINVAL;
}
if (eb->batch_len == 0)
eb->batch_len = eb->batch->vma->size - eb->batch_start_offset;
return 0;
err:
eb->vma[i].vma = NULL; eb->vma[i].vma = NULL;
return err; return err;
} }
...@@ -1802,7 +1824,7 @@ static int eb_prefault_relocations(const struct i915_execbuffer *eb) ...@@ -1802,7 +1824,7 @@ static int eb_prefault_relocations(const struct i915_execbuffer *eb)
return 0; return 0;
} }
static noinline int eb_relocate_slow(struct i915_execbuffer *eb) static noinline int eb_relocate_parse_slow(struct i915_execbuffer *eb)
{ {
bool have_copy = false; bool have_copy = false;
struct eb_vma *ev; struct eb_vma *ev;
...@@ -1867,6 +1889,11 @@ static noinline int eb_relocate_slow(struct i915_execbuffer *eb) ...@@ -1867,6 +1889,11 @@ static noinline int eb_relocate_slow(struct i915_execbuffer *eb)
if (err) if (err)
goto err; goto err;
/* as last step, parse the command buffer */
err = eb_parse(eb);
if (err)
goto err;
/* /*
* Leave the user relocations as are, this is the painfully slow path, * Leave the user relocations as are, this is the painfully slow path,
* and we want to avoid the complication of dropping the lock whilst * and we want to avoid the complication of dropping the lock whilst
...@@ -1899,7 +1926,7 @@ static noinline int eb_relocate_slow(struct i915_execbuffer *eb) ...@@ -1899,7 +1926,7 @@ static noinline int eb_relocate_slow(struct i915_execbuffer *eb)
return err; return err;
} }
static int eb_relocate(struct i915_execbuffer *eb) static int eb_relocate_parse(struct i915_execbuffer *eb)
{ {
int err; int err;
...@@ -1924,10 +1951,10 @@ static int eb_relocate(struct i915_execbuffer *eb) ...@@ -1924,10 +1951,10 @@ static int eb_relocate(struct i915_execbuffer *eb)
} }
if (err) if (err)
return eb_relocate_slow(eb); return eb_relocate_parse_slow(eb);
} }
return 0; return eb_parse(eb);
} }
static int eb_move_to_gpu(struct i915_execbuffer *eb) static int eb_move_to_gpu(struct i915_execbuffer *eb)
...@@ -3045,7 +3072,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, ...@@ -3045,7 +3072,7 @@ i915_gem_do_execbuffer(struct drm_device *dev,
if (unlikely(err)) if (unlikely(err))
goto err_context; goto err_context;
err = eb_relocate(&eb); err = eb_relocate_parse(&eb);
if (err) { if (err) {
/* /*
* If the user expects the execobject.offset and * If the user expects the execobject.offset and
...@@ -3058,33 +3085,10 @@ i915_gem_do_execbuffer(struct drm_device *dev, ...@@ -3058,33 +3085,10 @@ i915_gem_do_execbuffer(struct drm_device *dev,
goto err_vma; goto err_vma;
} }
if (unlikely(eb.batch->flags & EXEC_OBJECT_WRITE)) {
drm_dbg(&i915->drm,
"Attempting to use self-modifying batch buffer\n");
err = -EINVAL;
goto err_vma;
}
if (range_overflows_t(u64,
eb.batch_start_offset, eb.batch_len,
eb.batch->vma->size)) {
drm_dbg(&i915->drm, "Attempting to use out-of-bounds batch\n");
err = -EINVAL;
goto err_vma;
}
if (eb.batch_len == 0)
eb.batch_len = eb.batch->vma->size - eb.batch_start_offset;
err = eb_parse(&eb);
if (err)
goto err_vma;
/* /*
* snb/ivb/vlv conflate the "batch in ppgtt" bit with the "non-secure * snb/ivb/vlv conflate the "batch in ppgtt" bit with the "non-secure
* batch" bit. Hence we need to pin secure batches into the global gtt. * batch" bit. Hence we need to pin secure batches into the global gtt.
* hsw should have this fixed, but bdw mucks it up again. */ * hsw should have this fixed, but bdw mucks it up again. */
batch = eb.batch->vma;
if (eb.batch_flags & I915_DISPATCH_SECURE) { if (eb.batch_flags & I915_DISPATCH_SECURE) {
struct i915_vma *vma; struct i915_vma *vma;
...@@ -3098,13 +3102,15 @@ i915_gem_do_execbuffer(struct drm_device *dev, ...@@ -3098,13 +3102,15 @@ i915_gem_do_execbuffer(struct drm_device *dev,
* fitting due to fragmentation. * fitting due to fragmentation.
* So this is actually safe. * So this is actually safe.
*/ */
vma = i915_gem_object_ggtt_pin(batch->obj, NULL, 0, 0, 0); vma = i915_gem_object_ggtt_pin(eb.batch->vma->obj, NULL, 0, 0, 0);
if (IS_ERR(vma)) { if (IS_ERR(vma)) {
err = PTR_ERR(vma); err = PTR_ERR(vma);
goto err_parse; goto err_parse;
} }
batch = vma; batch = vma;
} else {
batch = eb.batch->vma;
} }
/* All GPU relocation batches must be submitted prior to the user rq */ /* All GPU relocation batches must be submitted prior to the user rq */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment