Commit 37d34556 authored by Josselin Costanzi's avatar Josselin Costanzi Committed by Jonathan Cameron

iio: add watermark logic to iio read and poll

Currently the IIO buffer blocking read only wait until at least one
data element is available.
This patch makes the reader sleep until enough data is collected before
returning to userspace. This should limit the read() calls count when
trying to get data in batches.

Co-author: Yannick Bedhomme <yannick.bedhomme@mobile-devices.fr>
Signed-off-by: default avatarJosselin Costanzi <josselin.costanzi@mobile-devices.fr>
[rebased and remove buffer timeout]
Signed-off-by: default avatarOctavian Purdila <octavian.purdila@intel.com>
Reviewed-by: default avatarLars-Peter Clausen <lars@metafoo.de>
Signed-off-by: default avatarJonathan Cameron <jic23@kernel.org>
parent 9444a300
...@@ -1280,3 +1280,18 @@ Contact: linux-iio@vger.kernel.org ...@@ -1280,3 +1280,18 @@ Contact: linux-iio@vger.kernel.org
Description: Description:
Specifies number of seconds in which we compute the steps Specifies number of seconds in which we compute the steps
that occur in order to decide if the consumer is making steps. that occur in order to decide if the consumer is making steps.
What: /sys/bus/iio/devices/iio:deviceX/buffer/watermark
KernelVersion: 4.2
Contact: linux-iio@vger.kernel.org
Description:
A single positive integer specifying the maximum number of scan
elements to wait for.
Poll will block until the watermark is reached.
Blocking read will wait until the minimum between the requested
read amount or the low water mark is available.
Non-blocking read will retrieve the available samples from the
buffer even if there are less samples then watermark level. This
allows the application to block on poll with a timeout and read
the available samples after the timeout expires and thus have a
maximum delay guarantee.
...@@ -37,11 +37,28 @@ static bool iio_buffer_is_active(struct iio_buffer *buf) ...@@ -37,11 +37,28 @@ static bool iio_buffer_is_active(struct iio_buffer *buf)
return !list_empty(&buf->buffer_list); return !list_empty(&buf->buffer_list);
} }
static bool iio_buffer_data_available(struct iio_buffer *buf) static size_t iio_buffer_data_available(struct iio_buffer *buf)
{ {
return buf->access->data_available(buf); return buf->access->data_available(buf);
} }
static bool iio_buffer_ready(struct iio_dev *indio_dev, struct iio_buffer *buf,
size_t to_wait)
{
/* wakeup if the device was unregistered */
if (!indio_dev->info)
return true;
/* drain the buffer if it was disabled */
if (!iio_buffer_is_active(buf))
to_wait = min_t(size_t, to_wait, 1);
if (iio_buffer_data_available(buf) >= to_wait)
return true;
return false;
}
/** /**
* iio_buffer_read_first_n_outer() - chrdev read for buffer access * iio_buffer_read_first_n_outer() - chrdev read for buffer access
* *
...@@ -53,6 +70,8 @@ ssize_t iio_buffer_read_first_n_outer(struct file *filp, char __user *buf, ...@@ -53,6 +70,8 @@ ssize_t iio_buffer_read_first_n_outer(struct file *filp, char __user *buf,
{ {
struct iio_dev *indio_dev = filp->private_data; struct iio_dev *indio_dev = filp->private_data;
struct iio_buffer *rb = indio_dev->buffer; struct iio_buffer *rb = indio_dev->buffer;
size_t datum_size;
size_t to_wait = 0;
int ret; int ret;
if (!indio_dev->info) if (!indio_dev->info)
...@@ -61,19 +80,26 @@ ssize_t iio_buffer_read_first_n_outer(struct file *filp, char __user *buf, ...@@ -61,19 +80,26 @@ ssize_t iio_buffer_read_first_n_outer(struct file *filp, char __user *buf,
if (!rb || !rb->access->read_first_n) if (!rb || !rb->access->read_first_n)
return -EINVAL; return -EINVAL;
datum_size = rb->bytes_per_datum;
/*
* If datum_size is 0 there will never be anything to read from the
* buffer, so signal end of file now.
*/
if (!datum_size)
return 0;
if (!(filp->f_flags & O_NONBLOCK))
to_wait = min_t(size_t, n / datum_size, rb->watermark);
do { do {
if (!iio_buffer_data_available(rb)) { ret = wait_event_interruptible(rb->pollq,
if (filp->f_flags & O_NONBLOCK) iio_buffer_ready(indio_dev, rb, to_wait));
return -EAGAIN; if (ret)
return ret;
ret = wait_event_interruptible(rb->pollq, if (!indio_dev->info)
iio_buffer_data_available(rb) || return -ENODEV;
indio_dev->info == NULL);
if (ret)
return ret;
if (indio_dev->info == NULL)
return -ENODEV;
}
ret = rb->access->read_first_n(rb, n, buf); ret = rb->access->read_first_n(rb, n, buf);
if (ret == 0 && (filp->f_flags & O_NONBLOCK)) if (ret == 0 && (filp->f_flags & O_NONBLOCK))
...@@ -96,9 +122,8 @@ unsigned int iio_buffer_poll(struct file *filp, ...@@ -96,9 +122,8 @@ unsigned int iio_buffer_poll(struct file *filp,
return -ENODEV; return -ENODEV;
poll_wait(filp, &rb->pollq, wait); poll_wait(filp, &rb->pollq, wait);
if (iio_buffer_data_available(rb)) if (iio_buffer_ready(indio_dev, rb, rb->watermark))
return POLLIN | POLLRDNORM; return POLLIN | POLLRDNORM;
/* need a way of knowing if there may be enough data... */
return 0; return 0;
} }
...@@ -123,6 +148,7 @@ void iio_buffer_init(struct iio_buffer *buffer) ...@@ -123,6 +148,7 @@ void iio_buffer_init(struct iio_buffer *buffer)
INIT_LIST_HEAD(&buffer->buffer_list); INIT_LIST_HEAD(&buffer->buffer_list);
init_waitqueue_head(&buffer->pollq); init_waitqueue_head(&buffer->pollq);
kref_init(&buffer->ref); kref_init(&buffer->ref);
buffer->watermark = 1;
} }
EXPORT_SYMBOL(iio_buffer_init); EXPORT_SYMBOL(iio_buffer_init);
...@@ -416,6 +442,11 @@ static ssize_t iio_buffer_write_length(struct device *dev, ...@@ -416,6 +442,11 @@ static ssize_t iio_buffer_write_length(struct device *dev,
buffer->access->set_length(buffer, val); buffer->access->set_length(buffer, val);
ret = 0; ret = 0;
} }
if (ret)
goto out;
if (buffer->length && buffer->length < buffer->watermark)
buffer->watermark = buffer->length;
out:
mutex_unlock(&indio_dev->mlock); mutex_unlock(&indio_dev->mlock);
return ret ? ret : len; return ret ? ret : len;
...@@ -472,6 +503,7 @@ static void iio_buffer_activate(struct iio_dev *indio_dev, ...@@ -472,6 +503,7 @@ static void iio_buffer_activate(struct iio_dev *indio_dev,
static void iio_buffer_deactivate(struct iio_buffer *buffer) static void iio_buffer_deactivate(struct iio_buffer *buffer)
{ {
list_del_init(&buffer->buffer_list); list_del_init(&buffer->buffer_list);
wake_up_interruptible(&buffer->pollq);
iio_buffer_put(buffer); iio_buffer_put(buffer);
} }
...@@ -754,16 +786,64 @@ static ssize_t iio_buffer_store_enable(struct device *dev, ...@@ -754,16 +786,64 @@ static ssize_t iio_buffer_store_enable(struct device *dev,
static const char * const iio_scan_elements_group_name = "scan_elements"; static const char * const iio_scan_elements_group_name = "scan_elements";
static ssize_t iio_buffer_show_watermark(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct iio_dev *indio_dev = dev_to_iio_dev(dev);
struct iio_buffer *buffer = indio_dev->buffer;
return sprintf(buf, "%u\n", buffer->watermark);
}
static ssize_t iio_buffer_store_watermark(struct device *dev,
struct device_attribute *attr,
const char *buf,
size_t len)
{
struct iio_dev *indio_dev = dev_to_iio_dev(dev);
struct iio_buffer *buffer = indio_dev->buffer;
unsigned int val;
int ret;
ret = kstrtouint(buf, 10, &val);
if (ret)
return ret;
if (!val)
return -EINVAL;
mutex_lock(&indio_dev->mlock);
if (val > buffer->length) {
ret = -EINVAL;
goto out;
}
if (iio_buffer_is_active(indio_dev->buffer)) {
ret = -EBUSY;
goto out;
}
buffer->watermark = val;
out:
mutex_unlock(&indio_dev->mlock);
return ret ? ret : len;
}
static DEVICE_ATTR(length, S_IRUGO | S_IWUSR, iio_buffer_read_length, static DEVICE_ATTR(length, S_IRUGO | S_IWUSR, iio_buffer_read_length,
iio_buffer_write_length); iio_buffer_write_length);
static struct device_attribute dev_attr_length_ro = __ATTR(length, static struct device_attribute dev_attr_length_ro = __ATTR(length,
S_IRUGO, iio_buffer_read_length, NULL); S_IRUGO, iio_buffer_read_length, NULL);
static DEVICE_ATTR(enable, S_IRUGO | S_IWUSR, static DEVICE_ATTR(enable, S_IRUGO | S_IWUSR,
iio_buffer_show_enable, iio_buffer_store_enable); iio_buffer_show_enable, iio_buffer_store_enable);
static DEVICE_ATTR(watermark, S_IRUGO | S_IWUSR,
iio_buffer_show_watermark, iio_buffer_store_watermark);
static struct attribute *iio_buffer_attrs[] = { static struct attribute *iio_buffer_attrs[] = {
&dev_attr_length.attr, &dev_attr_length.attr,
&dev_attr_enable.attr, &dev_attr_enable.attr,
&dev_attr_watermark.attr,
}; };
int iio_buffer_alloc_sysfs_and_mask(struct iio_dev *indio_dev) int iio_buffer_alloc_sysfs_and_mask(struct iio_dev *indio_dev)
...@@ -944,8 +1024,18 @@ static const void *iio_demux(struct iio_buffer *buffer, ...@@ -944,8 +1024,18 @@ static const void *iio_demux(struct iio_buffer *buffer,
static int iio_push_to_buffer(struct iio_buffer *buffer, const void *data) static int iio_push_to_buffer(struct iio_buffer *buffer, const void *data)
{ {
const void *dataout = iio_demux(buffer, data); const void *dataout = iio_demux(buffer, data);
int ret;
ret = buffer->access->store_to(buffer, dataout);
if (ret)
return ret;
return buffer->access->store_to(buffer, dataout); /*
* We can't just test for watermark to decide if we wake the poll queue
* because read may request less samples than the watermark.
*/
wake_up_interruptible_poll(&buffer->pollq, POLLIN | POLLRDNORM);
return 0;
} }
static void iio_buffer_demux_free(struct iio_buffer *buffer) static void iio_buffer_demux_free(struct iio_buffer *buffer)
......
...@@ -83,9 +83,6 @@ static int iio_store_to_kfifo(struct iio_buffer *r, ...@@ -83,9 +83,6 @@ static int iio_store_to_kfifo(struct iio_buffer *r,
ret = kfifo_in(&kf->kf, data, 1); ret = kfifo_in(&kf->kf, data, 1);
if (ret != 1) if (ret != 1)
return -EBUSY; return -EBUSY;
wake_up_interruptible_poll(&r->pollq, POLLIN | POLLRDNORM);
return 0; return 0;
} }
...@@ -109,16 +106,16 @@ static int iio_read_first_n_kfifo(struct iio_buffer *r, ...@@ -109,16 +106,16 @@ static int iio_read_first_n_kfifo(struct iio_buffer *r,
return copied; return copied;
} }
static bool iio_kfifo_buf_data_available(struct iio_buffer *r) static size_t iio_kfifo_buf_data_available(struct iio_buffer *r)
{ {
struct iio_kfifo *kf = iio_to_kfifo(r); struct iio_kfifo *kf = iio_to_kfifo(r);
bool empty; size_t samples;
mutex_lock(&kf->user_lock); mutex_lock(&kf->user_lock);
empty = kfifo_is_empty(&kf->kf); samples = kfifo_len(&kf->kf);
mutex_unlock(&kf->user_lock); mutex_unlock(&kf->user_lock);
return !empty; return samples;
} }
static void iio_kfifo_buffer_release(struct iio_buffer *buffer) static void iio_kfifo_buffer_release(struct iio_buffer *buffer)
......
...@@ -129,9 +129,9 @@ static int sca3000_read_first_n_hw_rb(struct iio_buffer *r, ...@@ -129,9 +129,9 @@ static int sca3000_read_first_n_hw_rb(struct iio_buffer *r,
return ret ? ret : num_read; return ret ? ret : num_read;
} }
static bool sca3000_ring_buf_data_available(struct iio_buffer *r) static size_t sca3000_ring_buf_data_available(struct iio_buffer *r)
{ {
return r->stufftoread; return r->stufftoread ? r->watermark : 0;
} }
/** /**
......
...@@ -21,8 +21,8 @@ struct iio_buffer; ...@@ -21,8 +21,8 @@ struct iio_buffer;
* struct iio_buffer_access_funcs - access functions for buffers. * struct iio_buffer_access_funcs - access functions for buffers.
* @store_to: actually store stuff to the buffer * @store_to: actually store stuff to the buffer
* @read_first_n: try to get a specified number of bytes (must exist) * @read_first_n: try to get a specified number of bytes (must exist)
* @data_available: indicates whether data for reading from the buffer is * @data_available: indicates how much data is available for reading from
* available. * the buffer.
* @request_update: if a parameter change has been marked, update underlying * @request_update: if a parameter change has been marked, update underlying
* storage. * storage.
* @set_bytes_per_datum:set number of bytes per datum * @set_bytes_per_datum:set number of bytes per datum
...@@ -43,7 +43,7 @@ struct iio_buffer_access_funcs { ...@@ -43,7 +43,7 @@ struct iio_buffer_access_funcs {
int (*read_first_n)(struct iio_buffer *buffer, int (*read_first_n)(struct iio_buffer *buffer,
size_t n, size_t n,
char __user *buf); char __user *buf);
bool (*data_available)(struct iio_buffer *buffer); size_t (*data_available)(struct iio_buffer *buffer);
int (*request_update)(struct iio_buffer *buffer); int (*request_update)(struct iio_buffer *buffer);
...@@ -72,6 +72,7 @@ struct iio_buffer_access_funcs { ...@@ -72,6 +72,7 @@ struct iio_buffer_access_funcs {
* @demux_bounce: [INTERN] buffer for doing gather from incoming scan. * @demux_bounce: [INTERN] buffer for doing gather from incoming scan.
* @buffer_list: [INTERN] entry in the devices list of current buffers. * @buffer_list: [INTERN] entry in the devices list of current buffers.
* @ref: [INTERN] reference count of the buffer. * @ref: [INTERN] reference count of the buffer.
* @watermark: [INTERN] number of datums to wait for poll/read.
*/ */
struct iio_buffer { struct iio_buffer {
int length; int length;
...@@ -90,6 +91,7 @@ struct iio_buffer { ...@@ -90,6 +91,7 @@ struct iio_buffer {
void *demux_bounce; void *demux_bounce;
struct list_head buffer_list; struct list_head buffer_list;
struct kref ref; struct kref ref;
unsigned int watermark;
}; };
/** /**
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment