Commit 43ba1100 authored by Lars-Peter Clausen's avatar Lars-Peter Clausen Committed by Greg Kroah-Hartman

staging:iio:events: Use waitqueue lock to protect event queue

Use the waitqueue lock to protect the event queue instead of a custom mutex.
This has the advantage that we can call the waitqueue operations with the lock
held, which simplifies the code flow a bit.
Signed-off-by: default avatarLars-Peter Clausen <lars@metafoo.de>
Acked-by: default avatarJonathan Cameron <jic23@kernel.org>
Signed-off-by: default avatarGreg Kroah-Hartman <gregkh@linuxfoundation.org>
parent 2c00193f
...@@ -35,7 +35,6 @@ ...@@ -35,7 +35,6 @@
*/ */
struct iio_event_interface { struct iio_event_interface {
wait_queue_head_t wait; wait_queue_head_t wait;
struct mutex event_list_lock;
DECLARE_KFIFO(det_events, struct iio_event_data, 16); DECLARE_KFIFO(det_events, struct iio_event_data, 16);
struct list_head dev_attr_list; struct list_head dev_attr_list;
...@@ -50,19 +49,17 @@ int iio_push_event(struct iio_dev *indio_dev, u64 ev_code, s64 timestamp) ...@@ -50,19 +49,17 @@ int iio_push_event(struct iio_dev *indio_dev, u64 ev_code, s64 timestamp)
int copied; int copied;
/* Does anyone care? */ /* Does anyone care? */
mutex_lock(&ev_int->event_list_lock); spin_lock(&ev_int->wait.lock);
if (test_bit(IIO_BUSY_BIT_POS, &ev_int->flags)) { if (test_bit(IIO_BUSY_BIT_POS, &ev_int->flags)) {
ev.id = ev_code; ev.id = ev_code;
ev.timestamp = timestamp; ev.timestamp = timestamp;
copied = kfifo_put(&ev_int->det_events, &ev); copied = kfifo_put(&ev_int->det_events, &ev);
mutex_unlock(&ev_int->event_list_lock);
if (copied != 0) if (copied != 0)
wake_up_interruptible(&ev_int->wait); wake_up_locked(&ev_int->wait);
} else }
mutex_unlock(&ev_int->event_list_lock); spin_unlock(&ev_int->wait.lock);
return 0; return 0;
} }
...@@ -80,28 +77,25 @@ static ssize_t iio_event_chrdev_read(struct file *filep, ...@@ -80,28 +77,25 @@ static ssize_t iio_event_chrdev_read(struct file *filep,
if (count < sizeof(struct iio_event_data)) if (count < sizeof(struct iio_event_data))
return -EINVAL; return -EINVAL;
mutex_lock(&ev_int->event_list_lock); spin_lock(&ev_int->wait.lock);
if (kfifo_is_empty(&ev_int->det_events)) { if (kfifo_is_empty(&ev_int->det_events)) {
if (filep->f_flags & O_NONBLOCK) { if (filep->f_flags & O_NONBLOCK) {
ret = -EAGAIN; ret = -EAGAIN;
goto error_mutex_unlock; goto error_unlock;
} }
mutex_unlock(&ev_int->event_list_lock);
/* Blocking on device; waiting for something to be there */ /* Blocking on device; waiting for something to be there */
ret = wait_event_interruptible(ev_int->wait, ret = wait_event_interruptible_locked(ev_int->wait,
!kfifo_is_empty(&ev_int->det_events)); !kfifo_is_empty(&ev_int->det_events));
if (ret) if (ret)
goto error_ret; goto error_unlock;
/* Single access device so no one else can get the data */ /* Single access device so no one else can get the data */
mutex_lock(&ev_int->event_list_lock);
} }
mutex_unlock(&ev_int->event_list_lock);
ret = kfifo_to_user(&ev_int->det_events, buf, count, &copied); ret = kfifo_to_user(&ev_int->det_events, buf, count, &copied);
error_mutex_unlock: error_unlock:
mutex_unlock(&ev_int->event_list_lock); spin_unlock(&ev_int->wait.lock);
error_ret:
return ret ? ret : copied; return ret ? ret : copied;
} }
...@@ -109,7 +103,7 @@ static int iio_event_chrdev_release(struct inode *inode, struct file *filep) ...@@ -109,7 +103,7 @@ static int iio_event_chrdev_release(struct inode *inode, struct file *filep)
{ {
struct iio_event_interface *ev_int = filep->private_data; struct iio_event_interface *ev_int = filep->private_data;
mutex_lock(&ev_int->event_list_lock); spin_lock(&ev_int->wait.lock);
clear_bit(IIO_BUSY_BIT_POS, &ev_int->flags); clear_bit(IIO_BUSY_BIT_POS, &ev_int->flags);
/* /*
* In order to maintain a clean state for reopening, * In order to maintain a clean state for reopening,
...@@ -117,7 +111,7 @@ static int iio_event_chrdev_release(struct inode *inode, struct file *filep) ...@@ -117,7 +111,7 @@ static int iio_event_chrdev_release(struct inode *inode, struct file *filep)
* any new __iio_push_event calls running. * any new __iio_push_event calls running.
*/ */
kfifo_reset_out(&ev_int->det_events); kfifo_reset_out(&ev_int->det_events);
mutex_unlock(&ev_int->event_list_lock); spin_unlock(&ev_int->wait.lock);
return 0; return 0;
} }
...@@ -137,18 +131,18 @@ int iio_event_getfd(struct iio_dev *indio_dev) ...@@ -137,18 +131,18 @@ int iio_event_getfd(struct iio_dev *indio_dev)
if (ev_int == NULL) if (ev_int == NULL)
return -ENODEV; return -ENODEV;
mutex_lock(&ev_int->event_list_lock); spin_lock(&ev_int->wait.lock);
if (test_and_set_bit(IIO_BUSY_BIT_POS, &ev_int->flags)) { if (test_and_set_bit(IIO_BUSY_BIT_POS, &ev_int->flags)) {
mutex_unlock(&ev_int->event_list_lock); spin_unlock(&ev_int->wait.lock);
return -EBUSY; return -EBUSY;
} }
mutex_unlock(&ev_int->event_list_lock); spin_unlock(&ev_int->wait.lock);
fd = anon_inode_getfd("iio:event", fd = anon_inode_getfd("iio:event",
&iio_event_chrdev_fileops, ev_int, O_RDONLY); &iio_event_chrdev_fileops, ev_int, O_RDONLY);
if (fd < 0) { if (fd < 0) {
mutex_lock(&ev_int->event_list_lock); spin_lock(&ev_int->wait.lock);
clear_bit(IIO_BUSY_BIT_POS, &ev_int->flags); clear_bit(IIO_BUSY_BIT_POS, &ev_int->flags);
mutex_unlock(&ev_int->event_list_lock); spin_unlock(&ev_int->wait.lock);
} }
return fd; return fd;
} }
...@@ -360,7 +354,6 @@ static bool iio_check_for_dynamic_events(struct iio_dev *indio_dev) ...@@ -360,7 +354,6 @@ static bool iio_check_for_dynamic_events(struct iio_dev *indio_dev)
static void iio_setup_ev_int(struct iio_event_interface *ev_int) static void iio_setup_ev_int(struct iio_event_interface *ev_int)
{ {
mutex_init(&ev_int->event_list_lock);
INIT_KFIFO(ev_int->det_events); INIT_KFIFO(ev_int->det_events);
init_waitqueue_head(&ev_int->wait); init_waitqueue_head(&ev_int->wait);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment