Commit e362dfbb authored by Jonathan Cameron's avatar Jonathan Cameron Committed by Greg Kroah-Hartman

staging:iio:adc:ad7887 move to irqchip based trigger handling.

Untested.
Signed-off-by: default avatarJonathan Cameron <jic23@cam.ac.uk>
Signed-off-by: default avatarGreg Kroah-Hartman <gregkh@suse.de>
parent 13cfac20
...@@ -61,8 +61,6 @@ struct ad7887_state { ...@@ -61,8 +61,6 @@ struct ad7887_state {
struct spi_device *spi; struct spi_device *spi;
const struct ad7887_chip_info *chip_info; const struct ad7887_chip_info *chip_info;
struct regulator *reg; struct regulator *reg;
struct work_struct poll_work;
atomic_t protect_ring;
size_t d_size; size_t d_size;
u16 int_vref_mv; u16 int_vref_mv;
bool en_dual; bool en_dual;
......
...@@ -150,7 +150,6 @@ static int __devinit ad7887_probe(struct spi_device *spi) ...@@ -150,7 +150,6 @@ static int __devinit ad7887_probe(struct spi_device *spi)
spi_set_drvdata(spi, st); spi_set_drvdata(spi, st);
atomic_set(&st->protect_ring, 0);
st->spi = spi; st->spi = spi;
st->indio_dev = iio_allocate_device(0); st->indio_dev = iio_allocate_device(0);
......
...@@ -163,48 +163,27 @@ static int ad7887_ring_postdisable(struct iio_dev *indio_dev) ...@@ -163,48 +163,27 @@ static int ad7887_ring_postdisable(struct iio_dev *indio_dev)
} }
/** /**
* ad7887_poll_func_th() th of trigger launched polling to ring buffer * ad7887_trigger_handler() bh of trigger launched polling to ring buffer
*
* As sampling only occurs on spi comms occurring, leave timestamping until
* then. Some triggers will generate their own time stamp. Currently
* there is no way of notifying them when no one cares.
**/
static void ad7887_poll_func_th(struct iio_dev *indio_dev, s64 time)
{
struct ad7887_state *st = indio_dev->dev_data;
schedule_work(&st->poll_work);
return;
}
/**
* ad7887_poll_bh_to_ring() bh of trigger launched polling to ring buffer
* @work_s: the work struct through which this was scheduled
* *
* Currently there is no option in this driver to disable the saving of * Currently there is no option in this driver to disable the saving of
* timestamps within the ring. * timestamps within the ring.
* I think the one copy of this at a time was to avoid problems if the
* trigger was set far too high and the reads then locked up the computer.
**/ **/
static void ad7887_poll_bh_to_ring(struct work_struct *work_s) static irqreturn_t ad7887_trigger_handler(int irq, void *p)
{ {
struct ad7887_state *st = container_of(work_s, struct ad7887_state, struct iio_poll_func *pf = p;
poll_work); struct iio_dev *indio_dev = pf->private_data;
struct iio_dev *indio_dev = st->indio_dev; struct ad7887_state *st = iio_dev_get_devdata(indio_dev);
struct iio_sw_ring_buffer *sw_ring = iio_to_sw_ring(indio_dev->ring);
struct iio_ring_buffer *ring = indio_dev->ring; struct iio_ring_buffer *ring = indio_dev->ring;
struct iio_sw_ring_buffer *sw_ring = iio_to_sw_ring(indio_dev->ring);
s64 time_ns; s64 time_ns;
__u8 *buf; __u8 *buf;
int b_sent; int b_sent;
unsigned int bytes = ring->scan_count * st->chip_info->storagebits / 8; unsigned int bytes = ring->scan_count * st->chip_info->storagebits / 8;
/* Ensure only one copy of this function running at a time */
if (atomic_inc_return(&st->protect_ring) > 1)
return;
buf = kzalloc(st->d_size, GFP_KERNEL); buf = kzalloc(st->d_size, GFP_KERNEL);
if (buf == NULL) if (buf == NULL)
return; return -ENOMEM;
b_sent = spi_sync(st->spi, st->ring_msg); b_sent = spi_sync(st->spi, st->ring_msg);
if (b_sent) if (b_sent)
...@@ -215,17 +194,17 @@ static void ad7887_poll_bh_to_ring(struct work_struct *work_s) ...@@ -215,17 +194,17 @@ static void ad7887_poll_bh_to_ring(struct work_struct *work_s)
memcpy(buf, st->data, bytes); memcpy(buf, st->data, bytes);
if (ring->scan_timestamp) if (ring->scan_timestamp)
memcpy(buf + st->d_size - sizeof(s64), memcpy(buf + st->d_size - sizeof(s64),
&time_ns, sizeof(time_ns)); &time_ns, sizeof(time_ns));
indio_dev->ring->access.store_to(&sw_ring->buf, buf, time_ns); indio_dev->ring->access.store_to(&sw_ring->buf, buf, time_ns);
done: done:
kfree(buf); kfree(buf);
atomic_dec(&st->protect_ring);
return IRQ_HANDLED;
} }
int ad7887_register_ring_funcs_and_init(struct iio_dev *indio_dev) int ad7887_register_ring_funcs_and_init(struct iio_dev *indio_dev)
{ {
struct ad7887_state *st = indio_dev->dev_data;
int ret; int ret;
indio_dev->ring = iio_sw_rb_allocate(indio_dev); indio_dev->ring = iio_sw_rb_allocate(indio_dev);
...@@ -235,10 +214,22 @@ int ad7887_register_ring_funcs_and_init(struct iio_dev *indio_dev) ...@@ -235,10 +214,22 @@ int ad7887_register_ring_funcs_and_init(struct iio_dev *indio_dev)
} }
/* Effectively select the ring buffer implementation */ /* Effectively select the ring buffer implementation */
iio_ring_sw_register_funcs(&indio_dev->ring->access); iio_ring_sw_register_funcs(&indio_dev->ring->access);
ret = iio_alloc_pollfunc(indio_dev, NULL, &ad7887_poll_func_th); indio_dev->pollfunc = kzalloc(sizeof(*indio_dev->pollfunc), GFP_KERNEL);
if (ret) if (indio_dev->pollfunc == NULL) {
ret = -ENOMEM;
goto error_deallocate_sw_rb; goto error_deallocate_sw_rb;
}
indio_dev->pollfunc->private_data = indio_dev;
indio_dev->pollfunc->h = &iio_pollfunc_store_time;
indio_dev->pollfunc->thread = &ad7887_trigger_handler;
indio_dev->pollfunc->type = IRQF_ONESHOT;
indio_dev->pollfunc->name =
kasprintf(GFP_KERNEL, "ad7887_consumer%d", indio_dev->id);
if (indio_dev->pollfunc->name == NULL) {
ret = -ENOMEM;
goto error_free_pollfunc;
}
/* Ring buffer functions - here trigger setup related */ /* Ring buffer functions - here trigger setup related */
indio_dev->ring->preenable = &ad7887_ring_preenable; indio_dev->ring->preenable = &ad7887_ring_preenable;
...@@ -248,11 +239,11 @@ int ad7887_register_ring_funcs_and_init(struct iio_dev *indio_dev) ...@@ -248,11 +239,11 @@ int ad7887_register_ring_funcs_and_init(struct iio_dev *indio_dev)
indio_dev->ring->scan_el_attrs = &ad7887_scan_el_group; indio_dev->ring->scan_el_attrs = &ad7887_scan_el_group;
indio_dev->ring->scan_timestamp = true; indio_dev->ring->scan_timestamp = true;
INIT_WORK(&st->poll_work, &ad7887_poll_bh_to_ring);
/* Flag that polled ring buffering is possible */ /* Flag that polled ring buffering is possible */
indio_dev->modes |= INDIO_RING_TRIGGERED; indio_dev->modes |= INDIO_RING_TRIGGERED;
return 0; return 0;
error_free_pollfunc:
kfree(indio_dev->pollfunc);
error_deallocate_sw_rb: error_deallocate_sw_rb:
iio_sw_rb_free(indio_dev->ring); iio_sw_rb_free(indio_dev->ring);
error_ret: error_ret:
...@@ -267,6 +258,7 @@ void ad7887_ring_cleanup(struct iio_dev *indio_dev) ...@@ -267,6 +258,7 @@ void ad7887_ring_cleanup(struct iio_dev *indio_dev)
iio_trigger_dettach_poll_func(indio_dev->trig, iio_trigger_dettach_poll_func(indio_dev->trig,
indio_dev->pollfunc); indio_dev->pollfunc);
} }
kfree(indio_dev->pollfunc->name);
kfree(indio_dev->pollfunc); kfree(indio_dev->pollfunc);
iio_sw_rb_free(indio_dev->ring); iio_sw_rb_free(indio_dev->ring);
} }
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment