Commit 6b51b02a authored by Christian König's avatar Christian König

dma-buf: fix and rework dma_buf_poll v7

Daniel pointed me towards this function and there are multiple obvious problems
in the implementation.

First of all the retry loop is not working as intended. In general the retry
makes only sense if you grab the reference first and then check the sequence
values.

Then we should always also wait for the exclusive fence.

It's also good practice to keep the reference around when installing callbacks
to fences you don't own.

And last the whole implementation was unnecessary complex and rather hard to
understand which could lead to probably unexpected behavior of the IOCTL.

Fix all this by reworking the implementation from scratch. Dropping the
whole RCU approach and taking the lock instead.

Only mildly tested and needs a thoughtful review of the code.

Pushing through drm-misc-next to avoid merge conflicts and give the code
another round of testing.

v2: fix the reference counting as well
v3: keep the excl fence handling as is for stable
v4: back to testing all fences, drop RCU
v5: handle in and out separately
v6: add missing clear of events
v7: change coding style as suggested by Michel, drop unused variables
Signed-off-by: default avatarChristian König <christian.koenig@amd.com>
Reviewed-by: default avatarDaniel Vetter <daniel.vetter@ffwll.ch>
Tested-by: default avatarMichel Dänzer <mdaenzer@redhat.com>
CC: stable@vger.kernel.org
Link: https://patchwork.freedesktop.org/patch/msgid/20210720131110.88512-1-christian.koenig@amd.com
parent 241ffeb0
...@@ -74,7 +74,7 @@ static void dma_buf_release(struct dentry *dentry) ...@@ -74,7 +74,7 @@ static void dma_buf_release(struct dentry *dentry)
* If you hit this BUG() it means someone dropped their ref to the * If you hit this BUG() it means someone dropped their ref to the
* dma-buf while still having pending operation to the buffer. * dma-buf while still having pending operation to the buffer.
*/ */
BUG_ON(dmabuf->cb_shared.active || dmabuf->cb_excl.active); BUG_ON(dmabuf->cb_in.active || dmabuf->cb_out.active);
dma_buf_stats_teardown(dmabuf); dma_buf_stats_teardown(dmabuf);
dmabuf->ops->release(dmabuf); dmabuf->ops->release(dmabuf);
...@@ -206,16 +206,55 @@ static void dma_buf_poll_cb(struct dma_fence *fence, struct dma_fence_cb *cb) ...@@ -206,16 +206,55 @@ static void dma_buf_poll_cb(struct dma_fence *fence, struct dma_fence_cb *cb)
wake_up_locked_poll(dcb->poll, dcb->active); wake_up_locked_poll(dcb->poll, dcb->active);
dcb->active = 0; dcb->active = 0;
spin_unlock_irqrestore(&dcb->poll->lock, flags); spin_unlock_irqrestore(&dcb->poll->lock, flags);
dma_fence_put(fence);
}
static bool dma_buf_poll_shared(struct dma_resv *resv,
struct dma_buf_poll_cb_t *dcb)
{
struct dma_resv_list *fobj = dma_resv_shared_list(resv);
struct dma_fence *fence;
int i, r;
if (!fobj)
return false;
for (i = 0; i < fobj->shared_count; ++i) {
fence = rcu_dereference_protected(fobj->shared[i],
dma_resv_held(resv));
dma_fence_get(fence);
r = dma_fence_add_callback(fence, &dcb->cb, dma_buf_poll_cb);
if (!r)
return true;
dma_fence_put(fence);
}
return false;
}
static bool dma_buf_poll_excl(struct dma_resv *resv,
struct dma_buf_poll_cb_t *dcb)
{
struct dma_fence *fence = dma_resv_excl_fence(resv);
int r;
if (!fence)
return false;
dma_fence_get(fence);
r = dma_fence_add_callback(fence, &dcb->cb, dma_buf_poll_cb);
if (!r)
return true;
dma_fence_put(fence);
return false;
} }
static __poll_t dma_buf_poll(struct file *file, poll_table *poll) static __poll_t dma_buf_poll(struct file *file, poll_table *poll)
{ {
struct dma_buf *dmabuf; struct dma_buf *dmabuf;
struct dma_resv *resv; struct dma_resv *resv;
struct dma_resv_list *fobj;
struct dma_fence *fence_excl;
__poll_t events; __poll_t events;
unsigned shared_count, seq;
dmabuf = file->private_data; dmabuf = file->private_data;
if (!dmabuf || !dmabuf->resv) if (!dmabuf || !dmabuf->resv)
...@@ -229,101 +268,50 @@ static __poll_t dma_buf_poll(struct file *file, poll_table *poll) ...@@ -229,101 +268,50 @@ static __poll_t dma_buf_poll(struct file *file, poll_table *poll)
if (!events) if (!events)
return 0; return 0;
retry: dma_resv_lock(resv, NULL);
seq = read_seqcount_begin(&resv->seq);
rcu_read_lock();
fobj = rcu_dereference(resv->fence);
if (fobj)
shared_count = fobj->shared_count;
else
shared_count = 0;
fence_excl = dma_resv_excl_fence(resv);
if (read_seqcount_retry(&resv->seq, seq)) {
rcu_read_unlock();
goto retry;
}
if (fence_excl && (!(events & EPOLLOUT) || shared_count == 0)) {
struct dma_buf_poll_cb_t *dcb = &dmabuf->cb_excl;
__poll_t pevents = EPOLLIN;
if (shared_count == 0) if (events & EPOLLOUT) {
pevents |= EPOLLOUT; struct dma_buf_poll_cb_t *dcb = &dmabuf->cb_out;
/* Check that callback isn't busy */
spin_lock_irq(&dmabuf->poll.lock); spin_lock_irq(&dmabuf->poll.lock);
if (dcb->active) { if (dcb->active)
dcb->active |= pevents; events &= ~EPOLLOUT;
events &= ~pevents; else
} else dcb->active = EPOLLOUT;
dcb->active = pevents;
spin_unlock_irq(&dmabuf->poll.lock); spin_unlock_irq(&dmabuf->poll.lock);
if (events & pevents) { if (events & EPOLLOUT) {
if (!dma_fence_get_rcu(fence_excl)) { if (!dma_buf_poll_shared(resv, dcb) &&
/* force a recheck */ !dma_buf_poll_excl(resv, dcb))
events &= ~pevents; /* No callback queued, wake up any other waiters */
dma_buf_poll_cb(NULL, &dcb->cb);
} else if (!dma_fence_add_callback(fence_excl, &dcb->cb,
dma_buf_poll_cb)) {
events &= ~pevents;
dma_fence_put(fence_excl);
} else {
/*
* No callback queued, wake up any additional
* waiters.
*/
dma_fence_put(fence_excl);
dma_buf_poll_cb(NULL, &dcb->cb); dma_buf_poll_cb(NULL, &dcb->cb);
} else
events &= ~EPOLLOUT;
} }
} }
if ((events & EPOLLOUT) && shared_count > 0) { if (events & EPOLLIN) {
struct dma_buf_poll_cb_t *dcb = &dmabuf->cb_shared; struct dma_buf_poll_cb_t *dcb = &dmabuf->cb_in;
int i;
/* Only queue a new callback if no event has fired yet */ /* Check that callback isn't busy */
spin_lock_irq(&dmabuf->poll.lock); spin_lock_irq(&dmabuf->poll.lock);
if (dcb->active) if (dcb->active)
events &= ~EPOLLOUT; events &= ~EPOLLIN;
else else
dcb->active = EPOLLOUT; dcb->active = EPOLLIN;
spin_unlock_irq(&dmabuf->poll.lock); spin_unlock_irq(&dmabuf->poll.lock);
if (!(events & EPOLLOUT)) if (events & EPOLLIN) {
goto out; if (!dma_buf_poll_excl(resv, dcb))
/* No callback queued, wake up any other waiters */
for (i = 0; i < shared_count; ++i) {
struct dma_fence *fence = rcu_dereference(fobj->shared[i]);
if (!dma_fence_get_rcu(fence)) {
/*
* fence refcount dropped to zero, this means
* that fobj has been freed
*
* call dma_buf_poll_cb and force a recheck!
*/
events &= ~EPOLLOUT;
dma_buf_poll_cb(NULL, &dcb->cb); dma_buf_poll_cb(NULL, &dcb->cb);
break; else
} events &= ~EPOLLIN;
if (!dma_fence_add_callback(fence, &dcb->cb,
dma_buf_poll_cb)) {
dma_fence_put(fence);
events &= ~EPOLLOUT;
break;
}
dma_fence_put(fence);
} }
/* No callback queued, wake up any additional waiters. */
if (i == shared_count)
dma_buf_poll_cb(NULL, &dcb->cb);
} }
out: dma_resv_unlock(resv);
rcu_read_unlock();
return events; return events;
} }
...@@ -566,8 +554,8 @@ struct dma_buf *dma_buf_export(const struct dma_buf_export_info *exp_info) ...@@ -566,8 +554,8 @@ struct dma_buf *dma_buf_export(const struct dma_buf_export_info *exp_info)
dmabuf->owner = exp_info->owner; dmabuf->owner = exp_info->owner;
spin_lock_init(&dmabuf->name_lock); spin_lock_init(&dmabuf->name_lock);
init_waitqueue_head(&dmabuf->poll); init_waitqueue_head(&dmabuf->poll);
dmabuf->cb_excl.poll = dmabuf->cb_shared.poll = &dmabuf->poll; dmabuf->cb_in.poll = dmabuf->cb_out.poll = &dmabuf->poll;
dmabuf->cb_excl.active = dmabuf->cb_shared.active = 0; dmabuf->cb_in.active = dmabuf->cb_out.active = 0;
if (!resv) { if (!resv) {
resv = (struct dma_resv *)&dmabuf[1]; resv = (struct dma_resv *)&dmabuf[1];
......
...@@ -440,7 +440,7 @@ struct dma_buf { ...@@ -440,7 +440,7 @@ struct dma_buf {
wait_queue_head_t *poll; wait_queue_head_t *poll;
__poll_t active; __poll_t active;
} cb_excl, cb_shared; } cb_in, cb_out;
#ifdef CONFIG_DMABUF_SYSFS_STATS #ifdef CONFIG_DMABUF_SYSFS_STATS
/** /**
* @sysfs_entry: * @sysfs_entry:
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment