Commit b291ce0a authored by Chris Wilson's avatar Chris Wilson

drm/i915/gem: Purge the sudden reappearance of i915_gem_object_pin()

This died many years ago as we now use i915_vma first and foremost.
Signed-off-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Cc: Matthew Auld <matthew.auld@intel.com>
Reviewed-by: default avatarMatthew Auld <matthew.auld@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20191115170835.1367869-1-chris@chris-wilson.co.uk
parent 3c1fe1eb
...@@ -1995,28 +1995,38 @@ static int i915_reset_gen7_sol_offsets(struct i915_request *rq) ...@@ -1995,28 +1995,38 @@ static int i915_reset_gen7_sol_offsets(struct i915_request *rq)
static struct i915_vma * static struct i915_vma *
shadow_batch_pin(struct i915_execbuffer *eb, struct drm_i915_gem_object *obj) shadow_batch_pin(struct i915_execbuffer *eb, struct drm_i915_gem_object *obj)
{ {
struct drm_i915_private *dev_priv = eb->i915;
struct i915_vma * const vma = *eb->vma;
struct i915_address_space *vm; struct i915_address_space *vm;
struct i915_vma *vma;
u64 flags; u64 flags;
int err;
/* /*
* PPGTT backed shadow buffers must be mapped RO, to prevent * PPGTT backed shadow buffers must be mapped RO, to prevent
* post-scan tampering * post-scan tampering
*/ */
if (CMDPARSER_USES_GGTT(dev_priv)) { if (CMDPARSER_USES_GGTT(eb->i915)) {
vm = &eb->engine->gt->ggtt->vm;
flags = PIN_GLOBAL; flags = PIN_GLOBAL;
vm = &dev_priv->ggtt.vm;
} else if (vma->vm->has_read_only) {
flags = PIN_USER;
vm = vma->vm;
i915_gem_object_set_readonly(obj);
} else { } else {
DRM_DEBUG("Cannot prevent post-scan tampering without RO capable vm\n"); vm = eb->context->vm;
return ERR_PTR(-EINVAL); if (!vm->has_read_only) {
DRM_DEBUG("Cannot prevent post-scan tampering without RO capable vm\n");
return ERR_PTR(-EINVAL);
}
i915_gem_object_set_readonly(obj);
flags = PIN_USER;
} }
return i915_gem_object_pin(obj, vm, NULL, 0, 0, flags); vma = i915_vma_instance(obj, vm, NULL);
if (IS_ERR(vma))
return vma;
err = i915_vma_pin(vma, 0, 0, flags);
if (err)
return ERR_PTR(err);
return vma;
} }
static struct i915_vma *eb_parse(struct i915_execbuffer *eb) static struct i915_vma *eb_parse(struct i915_execbuffer *eb)
...@@ -2058,7 +2068,7 @@ static struct i915_vma *eb_parse(struct i915_execbuffer *eb) ...@@ -2058,7 +2068,7 @@ static struct i915_vma *eb_parse(struct i915_execbuffer *eb)
* For PPGTT backing however, we have no choice but to forcibly * For PPGTT backing however, we have no choice but to forcibly
* reject unsafe buffers * reject unsafe buffers
*/ */
if (CMDPARSER_USES_GGTT(eb->i915) && (err == -EACCES)) if (i915_vma_is_ggtt(vma) && err == -EACCES)
/* Execute original buffer non-secure */ /* Execute original buffer non-secure */
vma = NULL; vma = NULL;
else else
...@@ -2075,7 +2085,7 @@ static struct i915_vma *eb_parse(struct i915_execbuffer *eb) ...@@ -2075,7 +2085,7 @@ static struct i915_vma *eb_parse(struct i915_execbuffer *eb)
eb->batch_start_offset = 0; eb->batch_start_offset = 0;
eb->batch = vma; eb->batch = vma;
if (CMDPARSER_USES_GGTT(eb->i915)) if (i915_vma_is_ggtt(vma))
eb->batch_flags |= I915_DISPATCH_SECURE; eb->batch_flags |= I915_DISPATCH_SECURE;
/* eb->batch_len unchanged */ /* eb->batch_len unchanged */
......
...@@ -1842,14 +1842,6 @@ int i915_gem_object_unbind(struct drm_i915_gem_object *obj, ...@@ -1842,14 +1842,6 @@ int i915_gem_object_unbind(struct drm_i915_gem_object *obj,
unsigned long flags); unsigned long flags);
#define I915_GEM_OBJECT_UNBIND_ACTIVE BIT(0) #define I915_GEM_OBJECT_UNBIND_ACTIVE BIT(0)
struct i915_vma * __must_check
i915_gem_object_pin(struct drm_i915_gem_object *obj,
struct i915_address_space *vm,
const struct i915_ggtt_view *view,
u64 size,
u64 alignment,
u64 flags);
void i915_gem_runtime_suspend(struct drm_i915_private *dev_priv); void i915_gem_runtime_suspend(struct drm_i915_private *dev_priv);
static inline int __must_check static inline int __must_check
......
...@@ -891,22 +891,8 @@ i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj, ...@@ -891,22 +891,8 @@ i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj,
u64 alignment, u64 alignment,
u64 flags) u64 flags)
{ {
struct drm_i915_private *dev_priv = to_i915(obj->base.dev); struct drm_i915_private *i915 = to_i915(obj->base.dev);
struct i915_address_space *vm = &dev_priv->ggtt.vm; struct i915_ggtt *ggtt = &i915->ggtt;
return i915_gem_object_pin(obj, vm, view, size, alignment,
flags | PIN_GLOBAL);
}
struct i915_vma *
i915_gem_object_pin(struct drm_i915_gem_object *obj,
struct i915_address_space *vm,
const struct i915_ggtt_view *view,
u64 size,
u64 alignment,
u64 flags)
{
struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
struct i915_vma *vma; struct i915_vma *vma;
int ret; int ret;
...@@ -915,17 +901,19 @@ i915_gem_object_pin(struct drm_i915_gem_object *obj, ...@@ -915,17 +901,19 @@ i915_gem_object_pin(struct drm_i915_gem_object *obj,
if (flags & PIN_MAPPABLE && if (flags & PIN_MAPPABLE &&
(!view || view->type == I915_GGTT_VIEW_NORMAL)) { (!view || view->type == I915_GGTT_VIEW_NORMAL)) {
/* If the required space is larger than the available /*
* If the required space is larger than the available
* aperture, we will not able to find a slot for the * aperture, we will not able to find a slot for the
* object and unbinding the object now will be in * object and unbinding the object now will be in
* vain. Worse, doing so may cause us to ping-pong * vain. Worse, doing so may cause us to ping-pong
* the object in and out of the Global GTT and * the object in and out of the Global GTT and
* waste a lot of cycles under the mutex. * waste a lot of cycles under the mutex.
*/ */
if (obj->base.size > dev_priv->ggtt.mappable_end) if (obj->base.size > ggtt->mappable_end)
return ERR_PTR(-E2BIG); return ERR_PTR(-E2BIG);
/* If NONBLOCK is set the caller is optimistically /*
* If NONBLOCK is set the caller is optimistically
* trying to cache the full object within the mappable * trying to cache the full object within the mappable
* aperture, and *must* have a fallback in place for * aperture, and *must* have a fallback in place for
* situations where we cannot bind the object. We * situations where we cannot bind the object. We
...@@ -941,11 +929,11 @@ i915_gem_object_pin(struct drm_i915_gem_object *obj, ...@@ -941,11 +929,11 @@ i915_gem_object_pin(struct drm_i915_gem_object *obj,
* we could try to minimise harm to others. * we could try to minimise harm to others.
*/ */
if (flags & PIN_NONBLOCK && if (flags & PIN_NONBLOCK &&
obj->base.size > dev_priv->ggtt.mappable_end / 2) obj->base.size > ggtt->mappable_end / 2)
return ERR_PTR(-ENOSPC); return ERR_PTR(-ENOSPC);
} }
vma = i915_vma_instance(obj, vm, view); vma = i915_vma_instance(obj, &ggtt->vm, view);
if (IS_ERR(vma)) if (IS_ERR(vma))
return vma; return vma;
...@@ -955,7 +943,7 @@ i915_gem_object_pin(struct drm_i915_gem_object *obj, ...@@ -955,7 +943,7 @@ i915_gem_object_pin(struct drm_i915_gem_object *obj,
return ERR_PTR(-ENOSPC); return ERR_PTR(-ENOSPC);
if (flags & PIN_MAPPABLE && if (flags & PIN_MAPPABLE &&
vma->fence_size > dev_priv->ggtt.mappable_end / 2) vma->fence_size > ggtt->mappable_end / 2)
return ERR_PTR(-ENOSPC); return ERR_PTR(-ENOSPC);
} }
...@@ -965,14 +953,14 @@ i915_gem_object_pin(struct drm_i915_gem_object *obj, ...@@ -965,14 +953,14 @@ i915_gem_object_pin(struct drm_i915_gem_object *obj,
} }
if (vma->fence && !i915_gem_object_is_tiled(obj)) { if (vma->fence && !i915_gem_object_is_tiled(obj)) {
mutex_lock(&vma->vm->mutex); mutex_lock(&ggtt->vm.mutex);
ret = i915_vma_revoke_fence(vma); ret = i915_vma_revoke_fence(vma);
mutex_unlock(&vma->vm->mutex); mutex_unlock(&ggtt->vm.mutex);
if (ret) if (ret)
return ERR_PTR(ret); return ERR_PTR(ret);
} }
ret = i915_vma_pin(vma, size, alignment, flags); ret = i915_vma_pin(vma, size, alignment, flags | PIN_GLOBAL);
if (ret) if (ret)
return ERR_PTR(ret); return ERR_PTR(ret);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment