Commit 08e7e0ad authored by Lars-Peter Clausen's avatar Lars-Peter Clausen Committed by Jonathan Cameron

iio: buffer: Allocate standard attributes in the core

All buffers want at least the length and the enable attribute. Move the
creation of those attributes to the core instead of having to do this in
each individual buffer implementation. This allows us to get rid of some
boiler-plate code.
Signed-off-by: default avatarLars-Peter Clausen <lars@metafoo.de>
Signed-off-by: default avatarJonathan Cameron <jic23@kernel.org>
parent d967cb6b
......@@ -383,9 +383,9 @@ static int iio_buffer_add_channel_sysfs(struct iio_dev *indio_dev,
return ret;
}
ssize_t iio_buffer_read_length(struct device *dev,
struct device_attribute *attr,
char *buf)
static ssize_t iio_buffer_read_length(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct iio_dev *indio_dev = dev_to_iio_dev(dev);
struct iio_buffer *buffer = indio_dev->buffer;
......@@ -396,12 +396,10 @@ ssize_t iio_buffer_read_length(struct device *dev,
return 0;
}
EXPORT_SYMBOL(iio_buffer_read_length);
ssize_t iio_buffer_write_length(struct device *dev,
struct device_attribute *attr,
const char *buf,
size_t len)
static ssize_t iio_buffer_write_length(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t len)
{
struct iio_dev *indio_dev = dev_to_iio_dev(dev);
struct iio_buffer *buffer = indio_dev->buffer;
......@@ -428,16 +426,14 @@ ssize_t iio_buffer_write_length(struct device *dev,
return ret ? ret : len;
}
EXPORT_SYMBOL(iio_buffer_write_length);
ssize_t iio_buffer_show_enable(struct device *dev,
struct device_attribute *attr,
char *buf)
static ssize_t iio_buffer_show_enable(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct iio_dev *indio_dev = dev_to_iio_dev(dev);
return sprintf(buf, "%d\n", iio_buffer_is_active(indio_dev->buffer));
}
EXPORT_SYMBOL(iio_buffer_show_enable);
static int iio_compute_scan_bytes(struct iio_dev *indio_dev,
const unsigned long *mask, bool timestamp)
......@@ -724,10 +720,10 @@ int iio_update_buffers(struct iio_dev *indio_dev,
}
EXPORT_SYMBOL_GPL(iio_update_buffers);
ssize_t iio_buffer_store_enable(struct device *dev,
struct device_attribute *attr,
const char *buf,
size_t len)
static ssize_t iio_buffer_store_enable(struct device *dev,
struct device_attribute *attr,
const char *buf,
size_t len)
{
int ret;
bool requested_state;
......@@ -759,10 +755,14 @@ ssize_t iio_buffer_store_enable(struct device *dev,
mutex_unlock(&indio_dev->mlock);
return (ret < 0) ? ret : len;
}
EXPORT_SYMBOL(iio_buffer_store_enable);
static const char * const iio_scan_elements_group_name = "scan_elements";
static DEVICE_ATTR(length, S_IRUGO | S_IWUSR, iio_buffer_read_length,
iio_buffer_write_length);
static DEVICE_ATTR(enable, S_IRUGO | S_IWUSR,
iio_buffer_show_enable, iio_buffer_store_enable);
int iio_buffer_alloc_sysfs_and_mask(struct iio_dev *indio_dev)
{
struct iio_dev_attr *p;
......@@ -774,6 +774,27 @@ int iio_buffer_alloc_sysfs_and_mask(struct iio_dev *indio_dev)
if (!buffer)
return 0;
attrcount = 0;
if (buffer->attrs) {
while (buffer->attrs[attrcount] != NULL)
attrcount++;
}
buffer->buffer_group.name = "buffer";
buffer->buffer_group.attrs = kcalloc(attrcount + 3,
sizeof(*buffer->buffer_group.attrs), GFP_KERNEL);
if (!buffer->buffer_group.attrs)
return -ENOMEM;
buffer->buffer_group.attrs[0] = &dev_attr_length.attr;
buffer->buffer_group.attrs[1] = &dev_attr_enable.attr;
if (buffer->attrs)
memcpy(&buffer->buffer_group.attrs[2], buffer->attrs,
sizeof(*&buffer->buffer_group.attrs) * (attrcount - 2));
buffer->buffer_group.attrs[attrcount+2] = NULL;
indio_dev->groups[indio_dev->groupcounter++] = &buffer->buffer_group;
if (buffer->scan_el_attrs != NULL) {
attr = buffer->scan_el_attrs->attrs;
while (*attr++ != NULL)
......@@ -838,6 +859,7 @@ int iio_buffer_alloc_sysfs_and_mask(struct iio_dev *indio_dev)
kfree(buffer->scan_mask);
error_cleanup_dynamic:
iio_free_chan_devattr_list(&buffer->scan_el_dev_attr_list);
kfree(indio_dev->buffer->buffer_group.attrs);
return ret;
}
......@@ -848,6 +870,7 @@ void iio_buffer_free_sysfs_and_mask(struct iio_dev *indio_dev)
return;
kfree(indio_dev->buffer->scan_mask);
kfree(indio_dev->buffer->buffer_group.attrs);
kfree(indio_dev->buffer->scan_el_group.attrs);
iio_free_chan_devattr_list(&indio_dev->buffer->scan_el_dev_attr_list);
}
......
......@@ -52,20 +52,6 @@ static int iio_get_length_kfifo(struct iio_buffer *r)
return r->length;
}
static IIO_BUFFER_ENABLE_ATTR;
static IIO_BUFFER_LENGTH_ATTR;
static struct attribute *iio_kfifo_attributes[] = {
&dev_attr_length.attr,
&dev_attr_enable.attr,
NULL,
};
static struct attribute_group iio_kfifo_attribute_group = {
.attrs = iio_kfifo_attributes,
.name = "buffer",
};
static int iio_mark_update_needed_kfifo(struct iio_buffer *r)
{
struct iio_kfifo *kf = iio_to_kfifo(r);
......@@ -169,7 +155,6 @@ struct iio_buffer *iio_kfifo_allocate(struct iio_dev *indio_dev)
return NULL;
kf->update_needed = true;
iio_buffer_init(&kf->buffer);
kf->buffer.attrs = &iio_kfifo_attribute_group;
kf->buffer.access = &kfifo_access_funcs;
kf->buffer.length = 2;
mutex_init(&kf->user_lock);
......
......@@ -140,9 +140,6 @@ static bool sca3000_ring_buf_data_available(struct iio_buffer *r)
return r->stufftoread;
}
static IIO_BUFFER_ENABLE_ATTR;
static IIO_BUFFER_LENGTH_ATTR;
/**
* sca3000_query_ring_int() is the hardware ring status interrupt enabled
**/
......@@ -232,20 +229,13 @@ static IIO_DEVICE_ATTR(in_accel_scale,
* only apply to the ring buffer. At all times full rate and accuracy
* is available via direct reading from registers.
*/
static struct attribute *sca3000_ring_attributes[] = {
&dev_attr_length.attr,
&dev_attr_enable.attr,
static const struct attribute *sca3000_ring_attributes[] = {
&iio_dev_attr_50_percent.dev_attr.attr,
&iio_dev_attr_75_percent.dev_attr.attr,
&iio_dev_attr_in_accel_scale.dev_attr.attr,
NULL,
};
static struct attribute_group sca3000_ring_attr = {
.attrs = sca3000_ring_attributes,
.name = "buffer",
};
static struct iio_buffer *sca3000_rb_allocate(struct iio_dev *indio_dev)
{
struct iio_buffer *buf;
......@@ -258,7 +248,7 @@ static struct iio_buffer *sca3000_rb_allocate(struct iio_dev *indio_dev)
ring->private = indio_dev;
buf = &ring->buf;
buf->stufftoread = 0;
buf->attrs = &sca3000_ring_attr;
buf->attrs = sca3000_ring_attributes;
iio_buffer_init(buf);
return buf;
......
......@@ -83,10 +83,11 @@ struct iio_buffer {
bool scan_timestamp;
const struct iio_buffer_access_funcs *access;
struct list_head scan_el_dev_attr_list;
struct attribute_group buffer_group;
struct attribute_group scan_el_group;
wait_queue_head_t pollq;
bool stufftoread;
const struct attribute_group *attrs;
const struct attribute **attrs;
struct list_head demux_list;
void *demux_bounce;
struct list_head buffer_list;
......@@ -148,40 +149,6 @@ static inline int iio_push_to_buffers_with_timestamp(struct iio_dev *indio_dev,
int iio_update_demux(struct iio_dev *indio_dev);
/**
* iio_buffer_read_length() - attr func to get number of datums in the buffer
**/
ssize_t iio_buffer_read_length(struct device *dev,
struct device_attribute *attr,
char *buf);
/**
* iio_buffer_write_length() - attr func to set number of datums in the buffer
**/
ssize_t iio_buffer_write_length(struct device *dev,
struct device_attribute *attr,
const char *buf,
size_t len);
/**
* iio_buffer_store_enable() - attr to turn the buffer on
**/
ssize_t iio_buffer_store_enable(struct device *dev,
struct device_attribute *attr,
const char *buf,
size_t len);
/**
* iio_buffer_show_enable() - attr to see if the buffer is on
**/
ssize_t iio_buffer_show_enable(struct device *dev,
struct device_attribute *attr,
char *buf);
#define IIO_BUFFER_LENGTH_ATTR DEVICE_ATTR(length, S_IRUGO | S_IWUSR, \
iio_buffer_read_length, \
iio_buffer_write_length)
#define IIO_BUFFER_ENABLE_ATTR DEVICE_ATTR(enable, S_IRUGO | S_IWUSR, \
iio_buffer_show_enable, \
iio_buffer_store_enable)
bool iio_validate_scan_mask_onehot(struct iio_dev *indio_dev,
const unsigned long *mask);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment