Commit f0cd5182 authored by Chris Wilson's avatar Chris Wilson

drm/i915: Use lockless object free

Having moved the locked phase of freeing an object to a separate worker,
we can now declare to the core that we only need the unlocked variant of
driver->gem_free_object, and can use the simple unreference internally.
Signed-off-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: default avatarJoonas Lahtinen <joonas.lahtinen@linux.intel.com>
Link: http://patchwork.freedesktop.org/patch/msgid/20161028125858.23563-20-chris@chris-wilson.co.uk
parent fbbd37b3
......@@ -2575,7 +2575,7 @@ static struct drm_driver driver = {
.set_busid = drm_pci_set_busid,
.gem_close_object = i915_gem_close_object,
.gem_free_object = i915_gem_free_object,
.gem_free_object_unlocked = i915_gem_free_object,
.gem_vm_ops = &i915_gem_vm_ops,
.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
......
......@@ -2404,19 +2404,12 @@ __attribute__((nonnull))
static inline void
i915_gem_object_put(struct drm_i915_gem_object *obj)
{
drm_gem_object_unreference(&obj->base);
__drm_gem_object_unreference(&obj->base);
}
__deprecated
extern void drm_gem_object_unreference(struct drm_gem_object *);
__attribute__((nonnull))
static inline void
i915_gem_object_put_unlocked(struct drm_i915_gem_object *obj)
{
drm_gem_object_unreference_unlocked(&obj->base);
}
__deprecated
extern void drm_gem_object_unreference_unlocked(struct drm_gem_object *);
......@@ -2511,7 +2504,6 @@ static inline struct i915_vma *i915_vma_get(struct i915_vma *vma)
static inline void i915_vma_put(struct i915_vma *vma)
{
lockdep_assert_held(&vma->vm->dev->struct_mutex);
i915_gem_object_put(vma->obj);
}
......
......@@ -617,7 +617,7 @@ i915_gem_create(struct drm_file *file,
ret = drm_gem_handle_create(file, &obj->base, &handle);
/* drop reference from allocate - handle holds it now */
i915_gem_object_put_unlocked(obj);
i915_gem_object_put(obj);
if (ret)
return ret;
......@@ -1111,7 +1111,7 @@ i915_gem_pread_ioctl(struct drm_device *dev, void *data,
i915_gem_object_unpin_pages(obj);
out:
i915_gem_object_put_unlocked(obj);
i915_gem_object_put(obj);
return ret;
}
......@@ -1444,7 +1444,7 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
i915_gem_object_unpin_pages(obj);
err:
i915_gem_object_put_unlocked(obj);
i915_gem_object_put(obj);
return ret;
}
......@@ -1520,7 +1520,7 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
MAX_SCHEDULE_TIMEOUT,
to_rps_client(file));
if (err)
goto out_unlocked;
goto out;
/* Flush and acquire obj->pages so that we are coherent through
* direct access in memory with previous cached writes through
......@@ -1532,11 +1532,11 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
*/
err = i915_gem_object_pin_pages(obj);
if (err)
goto out_unlocked;
goto out;
err = i915_mutex_lock_interruptible(dev);
if (err)
goto out_pages;
goto out_unpin;
if (read_domains & I915_GEM_DOMAIN_GTT)
err = i915_gem_object_set_to_gtt_domain(obj, write_domain != 0);
......@@ -1551,10 +1551,10 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
if (write_domain != 0)
intel_fb_obj_invalidate(obj, write_origin(obj, write_domain));
out_pages:
out_unpin:
i915_gem_object_unpin_pages(obj);
out_unlocked:
i915_gem_object_put_unlocked(obj);
out:
i915_gem_object_put(obj);
return err;
}
......@@ -1585,7 +1585,7 @@ i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
}
}
i915_gem_object_put_unlocked(obj);
i915_gem_object_put(obj);
return err;
}
......@@ -1631,7 +1631,7 @@ i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
* pages from.
*/
if (!obj->base.filp) {
i915_gem_object_put_unlocked(obj);
i915_gem_object_put(obj);
return -EINVAL;
}
......@@ -1643,7 +1643,7 @@ i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
struct vm_area_struct *vma;
if (down_write_killable(&mm->mmap_sem)) {
i915_gem_object_put_unlocked(obj);
i915_gem_object_put(obj);
return -EINTR;
}
vma = find_vma(mm, addr);
......@@ -1657,7 +1657,7 @@ i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
/* This may race, but that's ok, it only gets set */
WRITE_ONCE(obj->frontbuffer_ggtt_origin, ORIGIN_CPU);
}
i915_gem_object_put_unlocked(obj);
i915_gem_object_put(obj);
if (IS_ERR((void *)addr))
return addr;
......@@ -2105,7 +2105,7 @@ i915_gem_mmap_gtt(struct drm_file *file,
if (ret == 0)
*offset = drm_vma_node_offset_addr(&obj->base.vma_node);
i915_gem_object_put_unlocked(obj);
i915_gem_object_put(obj);
return ret;
}
......@@ -2932,7 +2932,7 @@ i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
args->timeout_ns = 0;
}
i915_gem_object_put_unlocked(obj);
i915_gem_object_put(obj);
return ret;
}
......
......@@ -201,7 +201,7 @@ i915_gem_set_tiling(struct drm_device *dev, void *data,
if (!i915_tiling_ok(dev,
args->stride, obj->base.size, args->tiling_mode)) {
i915_gem_object_put_unlocked(obj);
i915_gem_object_put(obj);
return -EINVAL;
}
......
......@@ -546,7 +546,7 @@ __i915_gem_userptr_get_pages_worker(struct work_struct *_work)
release_pages(pvec, pinned, 0);
drm_free_large(pvec);
i915_gem_object_put_unlocked(obj);
i915_gem_object_put(obj);
put_task_struct(work->task);
kfree(work);
}
......@@ -806,7 +806,7 @@ i915_gem_userptr_ioctl(struct drm_device *dev, void *data, struct drm_file *file
ret = drm_gem_handle_create(file, &obj->base, &handle);
/* drop reference from allocate - handle holds it now */
i915_gem_object_put_unlocked(obj);
i915_gem_object_put(obj);
if (ret)
return ret;
......
......@@ -11051,7 +11051,7 @@ intel_framebuffer_create_for_mode(struct drm_device *dev,
fb = intel_framebuffer_create(dev, &mode_cmd, obj);
if (IS_ERR(fb))
i915_gem_object_put_unlocked(obj);
i915_gem_object_put(obj);
return fb;
}
......@@ -12360,7 +12360,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
crtc->primary->fb = old_fb;
update_state_fb(crtc->primary);
i915_gem_object_put_unlocked(obj);
i915_gem_object_put(obj);
drm_framebuffer_unreference(work->old_fb);
spin_lock_irq(&dev->event_lock);
......@@ -15934,7 +15934,7 @@ intel_user_framebuffer_create(struct drm_device *dev,
fb = intel_framebuffer_create(dev, &mode_cmd, obj);
if (IS_ERR(fb))
i915_gem_object_put_unlocked(obj);
i915_gem_object_put(obj);
return fb;
}
......
......@@ -1222,7 +1222,7 @@ int intel_overlay_put_image_ioctl(struct drm_device *dev, void *data,
out_unlock:
mutex_unlock(&dev->struct_mutex);
drm_modeset_unlock_all(dev);
i915_gem_object_put_unlocked(new_bo);
i915_gem_object_put(new_bo);
out_free:
kfree(params);
......@@ -1466,7 +1466,7 @@ void intel_cleanup_overlay(struct drm_i915_private *dev_priv)
* hardware should be off already */
WARN_ON(dev_priv->overlay->active);
i915_gem_object_put_unlocked(dev_priv->overlay->reg_bo);
i915_gem_object_put(dev_priv->overlay->reg_bo);
kfree(dev_priv->overlay);
}
......
......@@ -5878,7 +5878,7 @@ static void valleyview_cleanup_pctx(struct drm_i915_private *dev_priv)
if (WARN_ON(!dev_priv->vlv_pctx))
return;
i915_gem_object_put_unlocked(dev_priv->vlv_pctx);
i915_gem_object_put(dev_priv->vlv_pctx);
dev_priv->vlv_pctx = NULL;
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment