Commit 6edbd6ab authored by Christian König's avatar Christian König

dma-buf: rename and cleanup dma_resv_get_excl v3

When the comment needs to state explicitly that this
doesn't get a reference to the object then the function
is named rather badly.

Rename the function and use rcu_dereference_check(), this
way it can be used from both rcu as well as lock protected
critical sections.

v2: improve kerneldoc as suggested by Daniel
v3: use dma_resv_excl_fence as function name
Signed-off-by: default avatarChristian König <christian.koenig@amd.com>
Acked-by: default avatarDaniel Vetter <daniel.vetter@ffwll.ch>
Reviewed-by: default avatarJason Ekstrand <jason@jlekstrand.net>
Link: https://patchwork.freedesktop.org/patch/msgid/20210602111714.212426-4-christian.koenig@amd.com
parent 415f6767
...@@ -234,7 +234,7 @@ static __poll_t dma_buf_poll(struct file *file, poll_table *poll) ...@@ -234,7 +234,7 @@ static __poll_t dma_buf_poll(struct file *file, poll_table *poll)
shared_count = fobj->shared_count; shared_count = fobj->shared_count;
else else
shared_count = 0; shared_count = 0;
fence_excl = rcu_dereference(resv->fence_excl); fence_excl = dma_resv_excl_fence(resv);
if (read_seqcount_retry(&resv->seq, seq)) { if (read_seqcount_retry(&resv->seq, seq)) {
rcu_read_unlock(); rcu_read_unlock();
goto retry; goto retry;
...@@ -1382,8 +1382,7 @@ static int dma_buf_debug_show(struct seq_file *s, void *unused) ...@@ -1382,8 +1382,7 @@ static int dma_buf_debug_show(struct seq_file *s, void *unused)
buf_obj->name ?: ""); buf_obj->name ?: "");
robj = buf_obj->resv; robj = buf_obj->resv;
fence = rcu_dereference_protected(robj->fence_excl, fence = dma_resv_excl_fence(robj);
dma_resv_held(robj));
if (fence) if (fence)
seq_printf(s, "\tExclusive fence: %s %s %ssignalled\n", seq_printf(s, "\tExclusive fence: %s %s %ssignalled\n",
fence->ops->get_driver_name(fence), fence->ops->get_driver_name(fence),
......
...@@ -284,7 +284,7 @@ EXPORT_SYMBOL(dma_resv_add_shared_fence); ...@@ -284,7 +284,7 @@ EXPORT_SYMBOL(dma_resv_add_shared_fence);
*/ */
void dma_resv_add_excl_fence(struct dma_resv *obj, struct dma_fence *fence) void dma_resv_add_excl_fence(struct dma_resv *obj, struct dma_fence *fence)
{ {
struct dma_fence *old_fence = dma_resv_get_excl(obj); struct dma_fence *old_fence = dma_resv_excl_fence(obj);
struct dma_resv_list *old; struct dma_resv_list *old;
u32 i = 0; u32 i = 0;
...@@ -380,7 +380,7 @@ int dma_resv_copy_fences(struct dma_resv *dst, struct dma_resv *src) ...@@ -380,7 +380,7 @@ int dma_resv_copy_fences(struct dma_resv *dst, struct dma_resv *src)
rcu_read_unlock(); rcu_read_unlock();
src_list = dma_resv_get_list(dst); src_list = dma_resv_get_list(dst);
old = dma_resv_get_excl(dst); old = dma_resv_excl_fence(dst);
write_seqcount_begin(&dst->seq); write_seqcount_begin(&dst->seq);
/* write_seqcount_begin provides the necessary memory barrier */ /* write_seqcount_begin provides the necessary memory barrier */
...@@ -428,7 +428,7 @@ int dma_resv_get_fences_rcu(struct dma_resv *obj, ...@@ -428,7 +428,7 @@ int dma_resv_get_fences_rcu(struct dma_resv *obj,
rcu_read_lock(); rcu_read_lock();
seq = read_seqcount_begin(&obj->seq); seq = read_seqcount_begin(&obj->seq);
fence_excl = rcu_dereference(obj->fence_excl); fence_excl = dma_resv_excl_fence(obj);
if (fence_excl && !dma_fence_get_rcu(fence_excl)) if (fence_excl && !dma_fence_get_rcu(fence_excl))
goto unlock; goto unlock;
...@@ -523,7 +523,7 @@ long dma_resv_wait_timeout_rcu(struct dma_resv *obj, ...@@ -523,7 +523,7 @@ long dma_resv_wait_timeout_rcu(struct dma_resv *obj,
rcu_read_lock(); rcu_read_lock();
i = -1; i = -1;
fence = rcu_dereference(obj->fence_excl); fence = dma_resv_excl_fence(obj);
if (fence && !test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) { if (fence && !test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) {
if (!dma_fence_get_rcu(fence)) if (!dma_fence_get_rcu(fence))
goto unlock_retry; goto unlock_retry;
...@@ -645,7 +645,7 @@ bool dma_resv_test_signaled_rcu(struct dma_resv *obj, bool test_all) ...@@ -645,7 +645,7 @@ bool dma_resv_test_signaled_rcu(struct dma_resv *obj, bool test_all)
} }
if (!shared_count) { if (!shared_count) {
struct dma_fence *fence_excl = rcu_dereference(obj->fence_excl); struct dma_fence *fence_excl = dma_resv_excl_fence(obj);
if (fence_excl) { if (fence_excl) {
ret = dma_resv_test_signaled_single(fence_excl); ret = dma_resv_test_signaled_single(fence_excl);
......
...@@ -226,7 +226,7 @@ static void amdgpu_gem_object_close(struct drm_gem_object *obj, ...@@ -226,7 +226,7 @@ static void amdgpu_gem_object_close(struct drm_gem_object *obj,
if (!amdgpu_vm_ready(vm)) if (!amdgpu_vm_ready(vm))
goto out_unlock; goto out_unlock;
fence = dma_resv_get_excl(bo->tbo.base.resv); fence = dma_resv_excl_fence(bo->tbo.base.resv);
if (fence) { if (fence) {
amdgpu_bo_fence(bo, fence, true); amdgpu_bo_fence(bo, fence, true);
fence = NULL; fence = NULL;
......
...@@ -210,7 +210,7 @@ int amdgpu_sync_resv(struct amdgpu_device *adev, struct amdgpu_sync *sync, ...@@ -210,7 +210,7 @@ int amdgpu_sync_resv(struct amdgpu_device *adev, struct amdgpu_sync *sync,
return -EINVAL; return -EINVAL;
/* always sync to the exclusive fence */ /* always sync to the exclusive fence */
f = dma_resv_get_excl(resv); f = dma_resv_excl_fence(resv);
r = amdgpu_sync_fence(sync, f); r = amdgpu_sync_fence(sync, f);
flist = dma_resv_get_list(resv); flist = dma_resv_get_list(resv);
......
...@@ -471,7 +471,7 @@ static void etnaviv_gem_describe(struct drm_gem_object *obj, struct seq_file *m) ...@@ -471,7 +471,7 @@ static void etnaviv_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
} }
} }
fence = rcu_dereference(robj->fence_excl); fence = dma_resv_excl_fence(robj);
if (fence) if (fence)
etnaviv_gem_describe_fence(fence, "Exclusive", m); etnaviv_gem_describe_fence(fence, "Exclusive", m);
rcu_read_unlock(); rcu_read_unlock();
......
...@@ -113,8 +113,7 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data, ...@@ -113,8 +113,7 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data,
seq = raw_read_seqcount(&obj->base.resv->seq); seq = raw_read_seqcount(&obj->base.resv->seq);
/* Translate the exclusive fence to the READ *and* WRITE engine */ /* Translate the exclusive fence to the READ *and* WRITE engine */
args->busy = args->busy = busy_check_writer(dma_resv_excl_fence(obj->base.resv));
busy_check_writer(rcu_dereference(obj->base.resv->fence_excl));
/* Translate shared fences to READ set of engines */ /* Translate shared fences to READ set of engines */
list = rcu_dereference(obj->base.resv->fence); list = rcu_dereference(obj->base.resv->fence);
......
...@@ -819,7 +819,7 @@ int msm_gem_sync_object(struct drm_gem_object *obj, ...@@ -819,7 +819,7 @@ int msm_gem_sync_object(struct drm_gem_object *obj,
fobj = dma_resv_get_list(obj->resv); fobj = dma_resv_get_list(obj->resv);
if (!fobj || (fobj->shared_count == 0)) { if (!fobj || (fobj->shared_count == 0)) {
fence = dma_resv_get_excl(obj->resv); fence = dma_resv_excl_fence(obj->resv);
/* don't need to wait on our own fences, since ring is fifo */ /* don't need to wait on our own fences, since ring is fifo */
if (fence && (fence->context != fctx->context)) { if (fence && (fence->context != fctx->context)) {
ret = dma_fence_wait(fence, true); ret = dma_fence_wait(fence, true);
...@@ -1035,7 +1035,7 @@ void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m, ...@@ -1035,7 +1035,7 @@ void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m,
} }
} }
fence = rcu_dereference(robj->fence_excl); fence = dma_resv_excl_fence(robj);
if (fence) if (fence)
describe_fence(fence, "Exclusive", m); describe_fence(fence, "Exclusive", m);
rcu_read_unlock(); rcu_read_unlock();
......
...@@ -951,7 +951,7 @@ nouveau_bo_vm_cleanup(struct ttm_buffer_object *bo, ...@@ -951,7 +951,7 @@ nouveau_bo_vm_cleanup(struct ttm_buffer_object *bo,
{ {
struct nouveau_drm *drm = nouveau_bdev(bo->bdev); struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
struct drm_device *dev = drm->dev; struct drm_device *dev = drm->dev;
struct dma_fence *fence = dma_resv_get_excl(bo->base.resv); struct dma_fence *fence = dma_resv_excl_fence(bo->base.resv);
nv10_bo_put_tile_region(dev, *old_tile, fence); nv10_bo_put_tile_region(dev, *old_tile, fence);
*old_tile = new_tile; *old_tile = new_tile;
......
...@@ -356,7 +356,7 @@ nouveau_fence_sync(struct nouveau_bo *nvbo, struct nouveau_channel *chan, bool e ...@@ -356,7 +356,7 @@ nouveau_fence_sync(struct nouveau_bo *nvbo, struct nouveau_channel *chan, bool e
} }
fobj = dma_resv_get_list(resv); fobj = dma_resv_get_list(resv);
fence = dma_resv_get_excl(resv); fence = dma_resv_excl_fence(resv);
if (fence && (!exclusive || !fobj || !fobj->shared_count)) { if (fence && (!exclusive || !fobj || !fobj->shared_count)) {
struct nouveau_channel *prev = NULL; struct nouveau_channel *prev = NULL;
......
...@@ -533,7 +533,7 @@ static int radeon_crtc_page_flip_target(struct drm_crtc *crtc, ...@@ -533,7 +533,7 @@ static int radeon_crtc_page_flip_target(struct drm_crtc *crtc,
DRM_ERROR("failed to pin new rbo buffer before flip\n"); DRM_ERROR("failed to pin new rbo buffer before flip\n");
goto cleanup; goto cleanup;
} }
work->fence = dma_fence_get(dma_resv_get_excl(new_rbo->tbo.base.resv)); work->fence = dma_fence_get(dma_resv_excl_fence(new_rbo->tbo.base.resv));
radeon_bo_get_tiling_flags(new_rbo, &tiling_flags, NULL); radeon_bo_get_tiling_flags(new_rbo, &tiling_flags, NULL);
radeon_bo_unreserve(new_rbo); radeon_bo_unreserve(new_rbo);
......
...@@ -98,7 +98,7 @@ int radeon_sync_resv(struct radeon_device *rdev, ...@@ -98,7 +98,7 @@ int radeon_sync_resv(struct radeon_device *rdev,
int r = 0; int r = 0;
/* always sync to the exclusive fence */ /* always sync to the exclusive fence */
f = dma_resv_get_excl(resv); f = dma_resv_excl_fence(resv);
fence = f ? to_radeon_fence(f) : NULL; fence = f ? to_radeon_fence(f) : NULL;
if (fence && fence->rdev == rdev) if (fence && fence->rdev == rdev)
radeon_sync_fence(sync, fence); radeon_sync_fence(sync, fence);
......
...@@ -477,7 +477,7 @@ static int radeon_uvd_cs_msg(struct radeon_cs_parser *p, struct radeon_bo *bo, ...@@ -477,7 +477,7 @@ static int radeon_uvd_cs_msg(struct radeon_cs_parser *p, struct radeon_bo *bo,
return -EINVAL; return -EINVAL;
} }
f = dma_resv_get_excl(bo->tbo.base.resv); f = dma_resv_excl_fence(bo->tbo.base.resv);
if (f) { if (f) {
r = radeon_fence_wait((struct radeon_fence *)f, false); r = radeon_fence_wait((struct radeon_fence *)f, false);
if (r) { if (r) {
......
...@@ -262,7 +262,7 @@ static void ttm_bo_flush_all_fences(struct ttm_buffer_object *bo) ...@@ -262,7 +262,7 @@ static void ttm_bo_flush_all_fences(struct ttm_buffer_object *bo)
rcu_read_lock(); rcu_read_lock();
fobj = rcu_dereference(resv->fence); fobj = rcu_dereference(resv->fence);
fence = rcu_dereference(resv->fence_excl); fence = dma_resv_excl_fence(resv);
if (fence && !fence->ops->signaled) if (fence && !fence->ops->signaled)
dma_fence_enable_sw_signaling(fence); dma_fence_enable_sw_signaling(fence);
......
...@@ -1166,7 +1166,7 @@ int vmw_resources_clean(struct vmw_buffer_object *vbo, pgoff_t start, ...@@ -1166,7 +1166,7 @@ int vmw_resources_clean(struct vmw_buffer_object *vbo, pgoff_t start,
if (bo->moving) if (bo->moving)
dma_fence_put(bo->moving); dma_fence_put(bo->moving);
bo->moving = dma_fence_get bo->moving = dma_fence_get
(dma_resv_get_excl(bo->base.resv)); (dma_resv_excl_fence(bo->base.resv));
} }
return 0; return 0;
......
...@@ -226,22 +226,20 @@ static inline void dma_resv_unlock(struct dma_resv *obj) ...@@ -226,22 +226,20 @@ static inline void dma_resv_unlock(struct dma_resv *obj)
} }
/** /**
* dma_resv_get_excl - get the reservation object's * dma_resv_exclusive - return the object's exclusive fence
* exclusive fence, with update-side lock held
* @obj: the reservation object * @obj: the reservation object
* *
* Returns the exclusive fence (if any). Does NOT take a * Returns the exclusive fence (if any). Caller must either hold the objects
* reference. Writers must hold obj->lock, readers may only * through dma_resv_lock() or the RCU read side lock through rcu_read_lock(),
* hold a RCU read side lock. * or one of the variants of each
* *
* RETURNS * RETURNS
* The exclusive fence or NULL * The exclusive fence or NULL
*/ */
static inline struct dma_fence * static inline struct dma_fence *
dma_resv_get_excl(struct dma_resv *obj) dma_resv_excl_fence(struct dma_resv *obj)
{ {
return rcu_dereference_protected(obj->fence_excl, return rcu_dereference_check(obj->fence_excl, dma_resv_held(obj));
dma_resv_held(obj));
} }
/** /**
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment