Commit eaa7def0 authored by Sakari Ailus's avatar Sakari Ailus Committed by Greg Kroah-Hartman

media: v4l: event: Add subscription to list before calling "add" operation

commit 92539d3e upstream.

Patch ad608fbc changed how events were subscribed to address an issue
elsewhere. As a side effect of that change, the "add" callback was called
before the event subscription was added to the list of subscribed events,
causing the first event queued by the add callback (and possibly other
events arriving soon afterwards) to be lost.

Fix this by adding the subscription to the list before calling the "add"
callback, and clean up afterwards if that fails.

Fixes: ad608fbc ("media: v4l: event: Prevent freeing event subscriptions while accessed")
Reported-by: default avatarDave Stevenson <dave.stevenson@raspberrypi.org>
Signed-off-by: default avatarSakari Ailus <sakari.ailus@linux.intel.com>
Tested-by: default avatarDave Stevenson <dave.stevenson@raspberrypi.org>
Reviewed-by: default avatarHans Verkuil <hans.verkuil@cisco.com>
Tested-by: default avatarHans Verkuil <hans.verkuil@cisco.com>
Cc: stable@vger.kernel.org (for 4.14 and up)
Signed-off-by: default avatarMauro Carvalho Chehab <mchehab+samsung@kernel.org>
Signed-off-by: default avatarGreg Kroah-Hartman <gregkh@linuxfoundation.org>
parent 88723e6e
......@@ -193,6 +193,22 @@ int v4l2_event_pending(struct v4l2_fh *fh)
}
EXPORT_SYMBOL_GPL(v4l2_event_pending);
static void __v4l2_event_unsubscribe(struct v4l2_subscribed_event *sev)
{
struct v4l2_fh *fh = sev->fh;
unsigned int i;
lockdep_assert_held(&fh->subscribe_lock);
assert_spin_locked(&fh->vdev->fh_lock);
/* Remove any pending events for this subscription */
for (i = 0; i < sev->in_use; i++) {
list_del(&sev->events[sev_pos(sev, i)].list);
fh->navailable--;
}
list_del(&sev->list);
}
int v4l2_event_subscribe(struct v4l2_fh *fh,
const struct v4l2_event_subscription *sub, unsigned elems,
const struct v4l2_subscribed_event_ops *ops)
......@@ -224,27 +240,23 @@ int v4l2_event_subscribe(struct v4l2_fh *fh,
spin_lock_irqsave(&fh->vdev->fh_lock, flags);
found_ev = v4l2_event_subscribed(fh, sub->type, sub->id);
if (!found_ev)
list_add(&sev->list, &fh->subscribed);
spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
if (found_ev) {
/* Already listening */
kvfree(sev);
goto out_unlock;
}
if (sev->ops && sev->ops->add) {
} else if (sev->ops && sev->ops->add) {
ret = sev->ops->add(sev, elems);
if (ret) {
spin_lock_irqsave(&fh->vdev->fh_lock, flags);
__v4l2_event_unsubscribe(sev);
spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
kvfree(sev);
goto out_unlock;
}
}
spin_lock_irqsave(&fh->vdev->fh_lock, flags);
list_add(&sev->list, &fh->subscribed);
spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
out_unlock:
mutex_unlock(&fh->subscribe_lock);
return ret;
......@@ -279,7 +291,6 @@ int v4l2_event_unsubscribe(struct v4l2_fh *fh,
{
struct v4l2_subscribed_event *sev;
unsigned long flags;
int i;
if (sub->type == V4L2_EVENT_ALL) {
v4l2_event_unsubscribe_all(fh);
......@@ -291,14 +302,8 @@ int v4l2_event_unsubscribe(struct v4l2_fh *fh,
spin_lock_irqsave(&fh->vdev->fh_lock, flags);
sev = v4l2_event_subscribed(fh, sub->type, sub->id);
if (sev != NULL) {
/* Remove any pending events for this subscription */
for (i = 0; i < sev->in_use; i++) {
list_del(&sev->events[sev_pos(sev, i)].list);
fh->navailable--;
}
list_del(&sev->list);
}
if (sev != NULL)
__v4l2_event_unsubscribe(sev);
spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment