Commit 7ae19400 authored by Chris Wilson's avatar Chris Wilson

drm/i915: Defer removing fence register tracking to rpm wakeup

Currently, we may simultaneously release the fence register from both
fence_update() and i915_gem_restore_fences(). This is dangerous, so
defer the bookkeeping entirely to i915_gem_restore_fences() when the
device is asleep.
Reported-by: default avatarMika Kuoppala <mika.kuoppala@linux.intel.com>
Signed-off-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Cc: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Reviewed-by: default avatarMika Kuoppala <mika.kuoppala@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190208153708.20023-1-chris@chris-wilson.co.uk
parent 73a116be
......@@ -210,6 +210,7 @@ static int fence_update(struct drm_i915_fence_reg *fence,
struct i915_vma *vma)
{
intel_wakeref_t wakeref;
struct i915_vma *old;
int ret;
if (vma) {
......@@ -229,49 +230,55 @@ static int fence_update(struct drm_i915_fence_reg *fence,
return ret;
}
if (fence->vma) {
struct i915_vma *old = fence->vma;
old = xchg(&fence->vma, NULL);
if (old) {
ret = i915_active_request_retire(&old->last_fence,
&old->obj->base.dev->struct_mutex);
if (ret)
if (ret) {
fence->vma = old;
return ret;
}
i915_vma_flush_writes(old);
}
if (fence->vma && fence->vma != vma) {
/* Ensure that all userspace CPU access is completed before
/*
* Ensure that all userspace CPU access is completed before
* stealing the fence.
*/
GEM_BUG_ON(fence->vma->fence != fence);
i915_vma_revoke_mmap(fence->vma);
fence->vma->fence = NULL;
fence->vma = NULL;
if (old != vma) {
GEM_BUG_ON(old->fence != fence);
i915_vma_revoke_mmap(old);
old->fence = NULL;
}
list_move(&fence->link, &fence->i915->mm.fence_list);
}
/* We only need to update the register itself if the device is awake.
/*
* We only need to update the register itself if the device is awake.
* If the device is currently powered down, we will defer the write
* to the runtime resume, see i915_gem_restore_fences().
*
* This only works for removing the fence register, on acquisition
* the caller must hold the rpm wakeref. The fence register must
* be cleared before we can use any other fences to ensure that
* the new fences do not overlap the elided clears, confusing HW.
*/
wakeref = intel_runtime_pm_get_if_in_use(fence->i915);
if (wakeref) {
fence_write(fence, vma);
intel_runtime_pm_put(fence->i915, wakeref);
if (!wakeref) {
GEM_BUG_ON(vma);
return 0;
}
if (vma) {
if (fence->vma != vma) {
vma->fence = fence;
fence_write(fence, vma);
fence->vma = vma;
}
if (vma) {
vma->fence = fence;
list_move_tail(&fence->link, &fence->i915->mm.fence_list);
}
intel_runtime_pm_put(fence->i915, wakeref);
return 0;
}
......@@ -473,9 +480,10 @@ void i915_gem_restore_fences(struct drm_i915_private *dev_priv)
{
int i;
rcu_read_lock(); /* keep obj alive as we dereference */
for (i = 0; i < dev_priv->num_fence_regs; i++) {
struct drm_i915_fence_reg *reg = &dev_priv->fence_regs[i];
struct i915_vma *vma = reg->vma;
struct i915_vma *vma = READ_ONCE(reg->vma);
GEM_BUG_ON(vma && vma->fence != reg);
......@@ -483,18 +491,12 @@ void i915_gem_restore_fences(struct drm_i915_private *dev_priv)
* Commit delayed tiling changes if we have an object still
* attached to the fence, otherwise just clear the fence.
*/
if (vma && !i915_gem_object_is_tiled(vma->obj)) {
GEM_BUG_ON(!reg->dirty);
GEM_BUG_ON(i915_vma_has_userfault(vma));
list_move(&reg->link, &dev_priv->mm.fence_list);
vma->fence = NULL;
if (vma && !i915_gem_object_is_tiled(vma->obj))
vma = NULL;
}
fence_write(reg, vma);
reg->vma = vma;
}
rcu_read_unlock();
}
/**
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment