Commit 0c159ffe authored by Chris Wilson's avatar Chris Wilson

drm/i915/gem: Defer obj->base.resv fini until RCU callback

Since reservation_object_fini() does an immediate free, rather than
kfree_rcu as normal, we have to delay the release until after the RCU
grace period has elapsed (i.e. from the rcu cleanup callback) so that we
can rely on the RCU protected access to the fences while the object is a
zombie.

i915_gem_busy_ioctl relies on having an RCU barrier to protect the
reservation in order to avoid having to take a reference and strong
memory barriers.

v2: Order is important; only release after putting the pages!

Fixes: c03467ba ("drm/i915/gem: Free pages before rcu-freeing the object")
Testcase: igt/gem_busy/close-race
Signed-off-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Cc: Matthew Auld <matthew.auld@intel.com>
Reviewed-by: default avatarMika Kuoppala <mika.kuoppala@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190703180601.10950-1-chris@chris-wilson.co.uk
parent 21de5a9e
...@@ -152,6 +152,7 @@ static void __i915_gem_free_object_rcu(struct rcu_head *head) ...@@ -152,6 +152,7 @@ static void __i915_gem_free_object_rcu(struct rcu_head *head)
container_of(head, typeof(*obj), rcu); container_of(head, typeof(*obj), rcu);
struct drm_i915_private *i915 = to_i915(obj->base.dev); struct drm_i915_private *i915 = to_i915(obj->base.dev);
reservation_object_fini(&obj->base._resv);
i915_gem_object_free(obj); i915_gem_object_free(obj);
GEM_BUG_ON(!atomic_read(&i915->mm.free_count)); GEM_BUG_ON(!atomic_read(&i915->mm.free_count));
...@@ -187,9 +188,6 @@ static void __i915_gem_free_objects(struct drm_i915_private *i915, ...@@ -187,9 +188,6 @@ static void __i915_gem_free_objects(struct drm_i915_private *i915,
GEM_BUG_ON(atomic_read(&obj->frontbuffer_bits)); GEM_BUG_ON(atomic_read(&obj->frontbuffer_bits));
GEM_BUG_ON(!list_empty(&obj->lut_list)); GEM_BUG_ON(!list_empty(&obj->lut_list));
if (obj->ops->release)
obj->ops->release(obj);
atomic_set(&obj->mm.pages_pin_count, 0); atomic_set(&obj->mm.pages_pin_count, 0);
__i915_gem_object_put_pages(obj, I915_MM_NORMAL); __i915_gem_object_put_pages(obj, I915_MM_NORMAL);
GEM_BUG_ON(i915_gem_object_has_pages(obj)); GEM_BUG_ON(i915_gem_object_has_pages(obj));
...@@ -198,7 +196,10 @@ static void __i915_gem_free_objects(struct drm_i915_private *i915, ...@@ -198,7 +196,10 @@ static void __i915_gem_free_objects(struct drm_i915_private *i915,
if (obj->base.import_attach) if (obj->base.import_attach)
drm_prime_gem_destroy(&obj->base, NULL); drm_prime_gem_destroy(&obj->base, NULL);
drm_gem_object_release(&obj->base); drm_gem_free_mmap_offset(&obj->base);
if (obj->ops->release)
obj->ops->release(obj);
/* But keep the pointer alive for RCU-protected lookups */ /* But keep the pointer alive for RCU-protected lookups */
call_rcu(&obj->rcu, __i915_gem_free_object_rcu); call_rcu(&obj->rcu, __i915_gem_free_object_rcu);
......
...@@ -133,16 +133,9 @@ i915_gem_object_put_pages_phys(struct drm_i915_gem_object *obj, ...@@ -133,16 +133,9 @@ i915_gem_object_put_pages_phys(struct drm_i915_gem_object *obj,
drm_pci_free(obj->base.dev, obj->phys_handle); drm_pci_free(obj->base.dev, obj->phys_handle);
} }
static void
i915_gem_object_release_phys(struct drm_i915_gem_object *obj)
{
i915_gem_object_unpin_pages(obj);
}
static const struct drm_i915_gem_object_ops i915_gem_phys_ops = { static const struct drm_i915_gem_object_ops i915_gem_phys_ops = {
.get_pages = i915_gem_object_get_pages_phys, .get_pages = i915_gem_object_get_pages_phys,
.put_pages = i915_gem_object_put_pages_phys, .put_pages = i915_gem_object_put_pages_phys,
.release = i915_gem_object_release_phys,
}; };
int i915_gem_object_attach_phys(struct drm_i915_gem_object *obj, int align) int i915_gem_object_attach_phys(struct drm_i915_gem_object *obj, int align)
......
...@@ -414,6 +414,11 @@ shmem_pwrite(struct drm_i915_gem_object *obj, ...@@ -414,6 +414,11 @@ shmem_pwrite(struct drm_i915_gem_object *obj,
return 0; return 0;
} }
static void shmem_release(struct drm_i915_gem_object *obj)
{
fput(obj->base.filp);
}
const struct drm_i915_gem_object_ops i915_gem_shmem_ops = { const struct drm_i915_gem_object_ops i915_gem_shmem_ops = {
.flags = I915_GEM_OBJECT_HAS_STRUCT_PAGE | .flags = I915_GEM_OBJECT_HAS_STRUCT_PAGE |
I915_GEM_OBJECT_IS_SHRINKABLE, I915_GEM_OBJECT_IS_SHRINKABLE,
...@@ -424,6 +429,8 @@ const struct drm_i915_gem_object_ops i915_gem_shmem_ops = { ...@@ -424,6 +429,8 @@ const struct drm_i915_gem_object_ops i915_gem_shmem_ops = {
.writeback = shmem_writeback, .writeback = shmem_writeback,
.pwrite = shmem_pwrite, .pwrite = shmem_pwrite,
.release = shmem_release,
}; };
static int create_shmem(struct drm_i915_private *i915, static int create_shmem(struct drm_i915_private *i915,
......
...@@ -529,8 +529,6 @@ i915_gem_object_release_stolen(struct drm_i915_gem_object *obj) ...@@ -529,8 +529,6 @@ i915_gem_object_release_stolen(struct drm_i915_gem_object *obj)
GEM_BUG_ON(!stolen); GEM_BUG_ON(!stolen);
__i915_gem_object_unpin_pages(obj);
i915_gem_stolen_remove_node(dev_priv, stolen); i915_gem_stolen_remove_node(dev_priv, stolen);
kfree(stolen); kfree(stolen);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment