Commit 440d0f12 authored by Christian König's avatar Christian König

dma-buf: add dma_fence_chain_alloc/free v3

Add a common allocation helper. Cleaning up the mix of kzalloc/kmalloc
and some unused code in the selftest.

v2: polish kernel doc a bit
v3: polish kernel doc even a bit more
Signed-off-by: default avatarChristian König <christian.koenig@amd.com>
Reviewed-by: default avatarDaniel Vetter <daniel.vetter@ffwll.ch>
Link: https://patchwork.freedesktop.org/patch/msgid/20210611120301.10595-3-christian.koenig@amd.com
parent 9c61e789
...@@ -58,28 +58,20 @@ static struct dma_fence *mock_fence(void) ...@@ -58,28 +58,20 @@ static struct dma_fence *mock_fence(void)
return &f->base; return &f->base;
} }
static inline struct mock_chain {
struct dma_fence_chain base;
} *to_mock_chain(struct dma_fence *f) {
return container_of(f, struct mock_chain, base.base);
}
static struct dma_fence *mock_chain(struct dma_fence *prev, static struct dma_fence *mock_chain(struct dma_fence *prev,
struct dma_fence *fence, struct dma_fence *fence,
u64 seqno) u64 seqno)
{ {
struct mock_chain *f; struct dma_fence_chain *f;
f = kmalloc(sizeof(*f), GFP_KERNEL); f = dma_fence_chain_alloc();
if (!f) if (!f)
return NULL; return NULL;
dma_fence_chain_init(&f->base, dma_fence_chain_init(f, dma_fence_get(prev), dma_fence_get(fence),
dma_fence_get(prev),
dma_fence_get(fence),
seqno); seqno);
return &f->base.base; return &f->base;
} }
static int sanitycheck(void *arg) static int sanitycheck(void *arg)
......
...@@ -1109,7 +1109,7 @@ static int amdgpu_cs_process_syncobj_timeline_out_dep(struct amdgpu_cs_parser *p ...@@ -1109,7 +1109,7 @@ static int amdgpu_cs_process_syncobj_timeline_out_dep(struct amdgpu_cs_parser *p
dep->chain = NULL; dep->chain = NULL;
if (syncobj_deps[i].point) { if (syncobj_deps[i].point) {
dep->chain = kmalloc(sizeof(*dep->chain), GFP_KERNEL); dep->chain = dma_fence_chain_alloc();
if (!dep->chain) if (!dep->chain)
return -ENOMEM; return -ENOMEM;
} }
...@@ -1117,7 +1117,7 @@ static int amdgpu_cs_process_syncobj_timeline_out_dep(struct amdgpu_cs_parser *p ...@@ -1117,7 +1117,7 @@ static int amdgpu_cs_process_syncobj_timeline_out_dep(struct amdgpu_cs_parser *p
dep->syncobj = drm_syncobj_find(p->filp, dep->syncobj = drm_syncobj_find(p->filp,
syncobj_deps[i].handle); syncobj_deps[i].handle);
if (!dep->syncobj) { if (!dep->syncobj) {
kfree(dep->chain); dma_fence_chain_free(dep->chain);
return -EINVAL; return -EINVAL;
} }
dep->point = syncobj_deps[i].point; dep->point = syncobj_deps[i].point;
......
...@@ -861,7 +861,7 @@ static int drm_syncobj_transfer_to_timeline(struct drm_file *file_private, ...@@ -861,7 +861,7 @@ static int drm_syncobj_transfer_to_timeline(struct drm_file *file_private,
&fence); &fence);
if (ret) if (ret)
goto err; goto err;
chain = kzalloc(sizeof(struct dma_fence_chain), GFP_KERNEL); chain = dma_fence_chain_alloc();
if (!chain) { if (!chain) {
ret = -ENOMEM; ret = -ENOMEM;
goto err1; goto err1;
...@@ -1402,10 +1402,10 @@ drm_syncobj_timeline_signal_ioctl(struct drm_device *dev, void *data, ...@@ -1402,10 +1402,10 @@ drm_syncobj_timeline_signal_ioctl(struct drm_device *dev, void *data,
goto err_points; goto err_points;
} }
for (i = 0; i < args->count_handles; i++) { for (i = 0; i < args->count_handles; i++) {
chains[i] = kzalloc(sizeof(struct dma_fence_chain), GFP_KERNEL); chains[i] = dma_fence_chain_alloc();
if (!chains[i]) { if (!chains[i]) {
for (j = 0; j < i; j++) for (j = 0; j < i; j++)
kfree(chains[j]); dma_fence_chain_free(chains[j]);
ret = -ENOMEM; ret = -ENOMEM;
goto err_chains; goto err_chains;
} }
......
...@@ -2983,7 +2983,7 @@ __free_fence_array(struct eb_fence *fences, unsigned int n) ...@@ -2983,7 +2983,7 @@ __free_fence_array(struct eb_fence *fences, unsigned int n)
while (n--) { while (n--) {
drm_syncobj_put(ptr_mask_bits(fences[n].syncobj, 2)); drm_syncobj_put(ptr_mask_bits(fences[n].syncobj, 2));
dma_fence_put(fences[n].dma_fence); dma_fence_put(fences[n].dma_fence);
kfree(fences[n].chain_fence); dma_fence_chain_free(fences[n].chain_fence);
} }
kvfree(fences); kvfree(fences);
} }
...@@ -3097,9 +3097,7 @@ add_timeline_fence_array(struct i915_execbuffer *eb, ...@@ -3097,9 +3097,7 @@ add_timeline_fence_array(struct i915_execbuffer *eb,
return -EINVAL; return -EINVAL;
} }
f->chain_fence = f->chain_fence = dma_fence_chain_alloc();
kmalloc(sizeof(*f->chain_fence),
GFP_KERNEL);
if (!f->chain_fence) { if (!f->chain_fence) {
drm_syncobj_put(syncobj); drm_syncobj_put(syncobj);
dma_fence_put(fence); dma_fence_put(fence);
......
...@@ -586,9 +586,7 @@ static struct msm_submit_post_dep *msm_parse_post_deps(struct drm_device *dev, ...@@ -586,9 +586,7 @@ static struct msm_submit_post_dep *msm_parse_post_deps(struct drm_device *dev,
break; break;
} }
post_deps[i].chain = post_deps[i].chain = dma_fence_chain_alloc();
kmalloc(sizeof(*post_deps[i].chain),
GFP_KERNEL);
if (!post_deps[i].chain) { if (!post_deps[i].chain) {
ret = -ENOMEM; ret = -ENOMEM;
break; break;
...@@ -605,7 +603,7 @@ static struct msm_submit_post_dep *msm_parse_post_deps(struct drm_device *dev, ...@@ -605,7 +603,7 @@ static struct msm_submit_post_dep *msm_parse_post_deps(struct drm_device *dev,
if (ret) { if (ret) {
for (j = 0; j <= i; ++j) { for (j = 0; j <= i; ++j) {
kfree(post_deps[j].chain); dma_fence_chain_free(post_deps[j].chain);
if (post_deps[j].syncobj) if (post_deps[j].syncobj)
drm_syncobj_put(post_deps[j].syncobj); drm_syncobj_put(post_deps[j].syncobj);
} }
......
...@@ -12,6 +12,7 @@ ...@@ -12,6 +12,7 @@
#include <linux/dma-fence.h> #include <linux/dma-fence.h>
#include <linux/irq_work.h> #include <linux/irq_work.h>
#include <linux/slab.h>
/** /**
* struct dma_fence_chain - fence to represent an node of a fence chain * struct dma_fence_chain - fence to represent an node of a fence chain
...@@ -66,6 +67,30 @@ to_dma_fence_chain(struct dma_fence *fence) ...@@ -66,6 +67,30 @@ to_dma_fence_chain(struct dma_fence *fence)
return container_of(fence, struct dma_fence_chain, base); return container_of(fence, struct dma_fence_chain, base);
} }
/**
* dma_fence_chain_alloc
*
* Returns a new struct dma_fence_chain object or NULL on failure.
*/
static inline struct dma_fence_chain *dma_fence_chain_alloc(void)
{
return kmalloc(sizeof(struct dma_fence_chain), GFP_KERNEL);
};
/**
* dma_fence_chain_free
* @chain: chain node to free
*
* Frees up an allocated but not used struct dma_fence_chain object. This
* doesn't need an RCU grace period since the fence was never initialized nor
* published. After dma_fence_chain_init() has been called the fence must be
* released by calling dma_fence_put(), and not through this function.
*/
static inline void dma_fence_chain_free(struct dma_fence_chain *chain)
{
kfree(chain);
};
/** /**
* dma_fence_chain_for_each - iterate over all fences in chain * dma_fence_chain_for_each - iterate over all fences in chain
* @iter: current fence * @iter: current fence
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment