Release 4.7 drivers/staging/iio/accel/lis3l02dq_ring.c
#include <linux/interrupt.h>
#include <linux/gpio.h>
#include <linux/mutex.h>
#include <linux/kernel.h>
#include <linux/spi/spi.h>
#include <linux/slab.h>
#include <linux/export.h>
#include <linux/iio/iio.h>
#include <linux/iio/kfifo_buf.h>
#include <linux/iio/trigger.h>
#include <linux/iio/trigger_consumer.h>
#include "lis3l02dq.h"
/**
* combine_8_to_16() utility function to munge two u8s into u16
**/
static inline u16 combine_8_to_16(u8 lower, u8 upper)
{
u16 _lower = lower;
u16 _upper = upper;
return _lower | (_upper << 8);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
jonathan cameron | jonathan cameron | 32 | 100.00% | 1 | 100.00% |
| Total | 32 | 100.00% | 1 | 100.00% |
/**
* lis3l02dq_data_rdy_trig_poll() the event handler for the data rdy trig
**/
irqreturn_t lis3l02dq_data_rdy_trig_poll(int irq, void *private)
{
struct iio_dev *indio_dev = private;
struct lis3l02dq_state *st = iio_priv(indio_dev);
if (st->trigger_on) {
iio_trigger_poll(st->trig);
return IRQ_HANDLED;
}
return IRQ_WAKE_THREAD;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
jonathan cameron | jonathan cameron | 50 | 100.00% | 2 | 100.00% |
| Total | 50 | 100.00% | 2 | 100.00% |
static const u8 read_all_tx_array[] = {
LIS3L02DQ_READ_REG(LIS3L02DQ_REG_OUT_X_L_ADDR), 0,
LIS3L02DQ_READ_REG(LIS3L02DQ_REG_OUT_X_H_ADDR), 0,
LIS3L02DQ_READ_REG(LIS3L02DQ_REG_OUT_Y_L_ADDR), 0,
LIS3L02DQ_READ_REG(LIS3L02DQ_REG_OUT_Y_H_ADDR), 0,
LIS3L02DQ_READ_REG(LIS3L02DQ_REG_OUT_Z_L_ADDR), 0,
LIS3L02DQ_READ_REG(LIS3L02DQ_REG_OUT_Z_H_ADDR), 0,
};
/**
* lis3l02dq_read_all() Reads all channels currently selected
* @indio_dev: IIO device state
* @rx_array: (dma capable) receive array, must be at least
* 4*number of channels
**/
static int lis3l02dq_read_all(struct iio_dev *indio_dev, u8 *rx_array)
{
struct lis3l02dq_state *st = iio_priv(indio_dev);
struct spi_transfer *xfers;
struct spi_message msg;
int ret, i, j = 0;
xfers = kcalloc(bitmap_weight(indio_dev->active_scan_mask,
indio_dev->masklength) * 2,
sizeof(*xfers), GFP_KERNEL);
if (!xfers)
return -ENOMEM;
mutex_lock(&st->buf_lock);
for (i = 0; i < ARRAY_SIZE(read_all_tx_array) / 4; i++)
if (test_bit(i, indio_dev->active_scan_mask)) {
/* lower byte */
xfers[j].tx_buf = st->tx + (2 * j);
st->tx[2 * j] = read_all_tx_array[i * 4];
st->tx[2 * j + 1] = 0;
if (rx_array)
xfers[j].rx_buf = rx_array + (j * 2);
xfers[j].bits_per_word = 8;
xfers[j].len = 2;
xfers[j].cs_change = 1;
j++;
/* upper byte */
xfers[j].tx_buf = st->tx + (2 * j);
st->tx[2 * j] = read_all_tx_array[i * 4 + 2];
st->tx[2 * j + 1] = 0;
if (rx_array)
xfers[j].rx_buf = rx_array + (j * 2);
xfers[j].bits_per_word = 8;
xfers[j].len = 2;
xfers[j].cs_change = 1;
j++;
}
/* After these are transmitted, the rx_buff should have
* values in alternate bytes
*/
spi_message_init(&msg);
for (j = 0; j < bitmap_weight(indio_dev->active_scan_mask,
indio_dev->masklength) * 2; j++)
spi_message_add_tail(&xfers[j], &msg);
ret = spi_sync(st->us, &msg);
mutex_unlock(&st->buf_lock);
kfree(xfers);
return ret;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
jonathan cameron | jonathan cameron | 371 | 97.12% | 4 | 57.14% |
ioana ciornei | ioana ciornei | 8 | 2.09% | 1 | 14.29% |
thomas meyer | thomas meyer | 2 | 0.52% | 1 | 14.29% |
greg kroah-hartman | greg kroah-hartman | 1 | 0.26% | 1 | 14.29% |
| Total | 382 | 100.00% | 7 | 100.00% |
static int lis3l02dq_get_buffer_element(struct iio_dev *indio_dev,
u8 *buf)
{
int ret, i;
u8 *rx_array;
s16 *data = (s16 *)buf;
int scan_count = bitmap_weight(indio_dev->active_scan_mask,
indio_dev->masklength);
rx_array = kcalloc(4, scan_count, GFP_KERNEL);
if (!rx_array)
return -ENOMEM;
ret = lis3l02dq_read_all(indio_dev, rx_array);
if (ret < 0) {
kfree(rx_array);
return ret;
}
for (i = 0; i < scan_count; i++)
data[i] = combine_8_to_16(rx_array[i * 4 + 1],
rx_array[i * 4 + 3]);
kfree(rx_array);
return i * sizeof(data[0]);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
jonathan cameron | jonathan cameron | 138 | 93.24% | 5 | 62.50% |
peter meerwald | peter meerwald | 7 | 4.73% | 1 | 12.50% |
navya sri nizamkari | navya sri nizamkari | 2 | 1.35% | 1 | 12.50% |
cristina opriceana | cristina opriceana | 1 | 0.68% | 1 | 12.50% |
| Total | 148 | 100.00% | 8 | 100.00% |
static irqreturn_t lis3l02dq_trigger_handler(int irq, void *p)
{
struct iio_poll_func *pf = p;
struct iio_dev *indio_dev = pf->indio_dev;
int len = 0;
char *data;
data = kmalloc(indio_dev->scan_bytes, GFP_KERNEL);
if (!data)
goto done;
if (!bitmap_empty(indio_dev->active_scan_mask, indio_dev->masklength))
len = lis3l02dq_get_buffer_element(indio_dev, data);
iio_push_to_buffers_with_timestamp(indio_dev, data, pf->timestamp);
kfree(data);
done:
iio_trigger_notify_done(indio_dev->trig);
return IRQ_HANDLED;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
jonathan cameron | jonathan cameron | 93 | 86.11% | 6 | 60.00% |
lars-peter clausen | lars-peter clausen | 13 | 12.04% | 2 | 20.00% |
cristina opriceana | cristina opriceana | 1 | 0.93% | 1 | 10.00% |
peter meerwald | peter meerwald | 1 | 0.93% | 1 | 10.00% |
| Total | 108 | 100.00% | 10 | 100.00% |
/* Caller responsible for locking as necessary. */
static int
__lis3l02dq_write_data_ready_config(struct iio_dev *indio_dev, bool state)
{
int ret;
u8 valold;
bool currentlyset;
struct lis3l02dq_state *st = iio_priv(indio_dev);
/* Get the current event mask register */
ret = lis3l02dq_spi_read_reg_8(indio_dev,
LIS3L02DQ_REG_CTRL_2_ADDR,
&valold);
if (ret)
goto error_ret;
/* Find out if data ready is already on */
currentlyset
= valold & LIS3L02DQ_REG_CTRL_2_ENABLE_DATA_READY_GENERATION;
/* Disable requested */
if (!state && currentlyset) {
/* Disable the data ready signal */
valold &= ~LIS3L02DQ_REG_CTRL_2_ENABLE_DATA_READY_GENERATION;
/* The double write is to overcome a hardware bug? */
ret = lis3l02dq_spi_write_reg_8(indio_dev,
LIS3L02DQ_REG_CTRL_2_ADDR,
valold);
if (ret)
goto error_ret;
ret = lis3l02dq_spi_write_reg_8(indio_dev,
LIS3L02DQ_REG_CTRL_2_ADDR,
valold);
if (ret)
goto error_ret;
st->trigger_on = false;
/* Enable requested */
} else if (state && !currentlyset) {
/* If not set, enable requested
* first disable all events
*/
ret = lis3l02dq_disable_all_events(indio_dev);
if (ret < 0)
goto error_ret;
valold = ret |
LIS3L02DQ_REG_CTRL_2_ENABLE_DATA_READY_GENERATION;
st->trigger_on = true;
ret = lis3l02dq_spi_write_reg_8(indio_dev,
LIS3L02DQ_REG_CTRL_2_ADDR,
valold);
if (ret)
goto error_ret;
}
return 0;
error_ret:
return ret;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
jonathan cameron | jonathan cameron | 180 | 97.30% | 5 | 62.50% |
lars-peter clausen | lars-peter clausen | 2 | 1.08% | 1 | 12.50% |
peter meerwald | peter meerwald | 2 | 1.08% | 1 | 12.50% |
cristina moraru | cristina moraru | 1 | 0.54% | 1 | 12.50% |
| Total | 185 | 100.00% | 8 | 100.00% |
/**
* lis3l02dq_data_rdy_trigger_set_state() set datardy interrupt state
*
* If disabling the interrupt also does a final read to ensure it is clear.
* This is only important in some cases where the scan enable elements are
* switched before the buffer is reenabled.
**/
static int lis3l02dq_data_rdy_trigger_set_state(struct iio_trigger *trig,
bool state)
{
struct iio_dev *indio_dev = iio_trigger_get_drvdata(trig);
int ret = 0;
u8 t;
__lis3l02dq_write_data_ready_config(indio_dev, state);
if (!state) {
/*
* A possible quirk with the handler is currently worked around
* by ensuring outstanding read events are cleared.
*/
ret = lis3l02dq_read_all(indio_dev, NULL);
}
lis3l02dq_spi_read_reg_8(indio_dev,
LIS3L02DQ_REG_WAKE_UP_SRC_ADDR,
&t);
return ret;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
jonathan cameron | jonathan cameron | 64 | 92.75% | 2 | 40.00% |
lars-peter clausen | lars-peter clausen | 4 | 5.80% | 2 | 40.00% |
peter meerwald | peter meerwald | 1 | 1.45% | 1 | 20.00% |
| Total | 69 | 100.00% | 5 | 100.00% |
/**
* lis3l02dq_trig_try_reen() try reenabling irq for data rdy trigger
* @trig: the datardy trigger
*/
static int lis3l02dq_trig_try_reen(struct iio_trigger *trig)
{
struct iio_dev *indio_dev = iio_trigger_get_drvdata(trig);
struct lis3l02dq_state *st = iio_priv(indio_dev);
int i;
/* If gpio still high (or high again)
* In theory possible we will need to do this several times
*/
for (i = 0; i < 5; i++)
if (gpio_get_value(st->gpio))
lis3l02dq_read_all(indio_dev, NULL);
else
break;
if (i == 5)
pr_info("Failed to clear the interrupt for lis3l02dq\n");
/* irq reenabled so success! */
return 0;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
jonathan cameron | jonathan cameron | 75 | 92.59% | 3 | 42.86% |
lars-peter clausen | lars-peter clausen | 3 | 3.70% | 1 | 14.29% |
cristina moraru | cristina moraru | 1 | 1.23% | 1 | 14.29% |
arnd bergmann | arnd bergmann | 1 | 1.23% | 1 | 14.29% |
ebru akagunduz | ebru akagunduz | 1 | 1.23% | 1 | 14.29% |
| Total | 81 | 100.00% | 7 | 100.00% |
static const struct iio_trigger_ops lis3l02dq_trigger_ops = {
.owner = THIS_MODULE,
.set_trigger_state = &lis3l02dq_data_rdy_trigger_set_state,
.try_reenable = &lis3l02dq_trig_try_reen,
};
int lis3l02dq_probe_trigger(struct iio_dev *indio_dev)
{
int ret;
struct lis3l02dq_state *st = iio_priv(indio_dev);
st->trig = iio_trigger_alloc("lis3l02dq-dev%d", indio_dev->id);
if (!st->trig) {
ret = -ENOMEM;
goto error_ret;
}
st->trig->dev.parent = &st->us->dev;
st->trig->ops = &lis3l02dq_trigger_ops;
iio_trigger_set_drvdata(st->trig, indio_dev);
ret = iio_trigger_register(st->trig);
if (ret)
goto error_free_trig;
return 0;
error_free_trig:
iio_trigger_free(st->trig);
error_ret:
return ret;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
jonathan cameron | jonathan cameron | 111 | 93.28% | 7 | 70.00% |
lars-peter clausen | lars-peter clausen | 6 | 5.04% | 2 | 20.00% |
dan carpenter | dan carpenter | 2 | 1.68% | 1 | 10.00% |
| Total | 119 | 100.00% | 10 | 100.00% |
void lis3l02dq_remove_trigger(struct iio_dev *indio_dev)
{
struct lis3l02dq_state *st = iio_priv(indio_dev);
iio_trigger_unregister(st->trig);
iio_trigger_free(st->trig);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
jonathan cameron | jonathan cameron | 33 | 97.06% | 3 | 75.00% |
lars-peter clausen | lars-peter clausen | 1 | 2.94% | 1 | 25.00% |
| Total | 34 | 100.00% | 4 | 100.00% |
void lis3l02dq_unconfigure_buffer(struct iio_dev *indio_dev)
{
iio_dealloc_pollfunc(indio_dev->pollfunc);
iio_kfifo_free(indio_dev->buffer);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
jonathan cameron | jonathan cameron | 24 | 100.00% | 6 | 100.00% |
| Total | 24 | 100.00% | 6 | 100.00% |
static int lis3l02dq_buffer_postenable(struct iio_dev *indio_dev)
{
/* Disable unwanted channels otherwise the interrupt will not clear */
u8 t;
int ret;
bool oneenabled = false;
ret = lis3l02dq_spi_read_reg_8(indio_dev,
LIS3L02DQ_REG_CTRL_1_ADDR,
&t);
if (ret)
goto error_ret;
if (test_bit(0, indio_dev->active_scan_mask)) {
t |= LIS3L02DQ_REG_CTRL_1_AXES_X_ENABLE;
oneenabled = true;
} else {
t &= ~LIS3L02DQ_REG_CTRL_1_AXES_X_ENABLE;
}
if (test_bit(1, indio_dev->active_scan_mask)) {
t |= LIS3L02DQ_REG_CTRL_1_AXES_Y_ENABLE;
oneenabled = true;
} else {
t &= ~LIS3L02DQ_REG_CTRL_1_AXES_Y_ENABLE;
}
if (test_bit(2, indio_dev->active_scan_mask)) {
t |= LIS3L02DQ_REG_CTRL_1_AXES_Z_ENABLE;
oneenabled = true;
} else {
t &= ~LIS3L02DQ_REG_CTRL_1_AXES_Z_ENABLE;
}
if (!oneenabled) /* what happens in this case is unknown */
return -EINVAL;
ret = lis3l02dq_spi_write_reg_8(indio_dev,
LIS3L02DQ_REG_CTRL_1_ADDR,
t);
if (ret)
goto error_ret;
return iio_triggered_buffer_postenable(indio_dev);
error_ret:
return ret;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
jonathan cameron | jonathan cameron | 162 | 96.43% | 5 | 83.33% |
haneen mohammed | haneen mohammed | 6 | 3.57% | 1 | 16.67% |
| Total | 168 | 100.00% | 6 | 100.00% |
/* Turn all channels on again */
static int lis3l02dq_buffer_predisable(struct iio_dev *indio_dev)
{
u8 t;
int ret;
ret = iio_triggered_buffer_predisable(indio_dev);
if (ret)
goto error_ret;
ret = lis3l02dq_spi_read_reg_8(indio_dev,
LIS3L02DQ_REG_CTRL_1_ADDR,
&t);
if (ret)
goto error_ret;
t |= LIS3L02DQ_REG_CTRL_1_AXES_X_ENABLE |
LIS3L02DQ_REG_CTRL_1_AXES_Y_ENABLE |
LIS3L02DQ_REG_CTRL_1_AXES_Z_ENABLE;
ret = lis3l02dq_spi_write_reg_8(indio_dev,
LIS3L02DQ_REG_CTRL_1_ADDR,
t);
error_ret:
return ret;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
jonathan cameron | jonathan cameron | 74 | 100.00% | 4 | 100.00% |
| Total | 74 | 100.00% | 4 | 100.00% |
static const struct iio_buffer_setup_ops lis3l02dq_buffer_setup_ops = {
.postenable = &lis3l02dq_buffer_postenable,
.predisable = &lis3l02dq_buffer_predisable,
};
int lis3l02dq_configure_buffer(struct iio_dev *indio_dev)
{
int ret;
struct iio_buffer *buffer;
buffer = iio_kfifo_allocate();
if (!buffer)
return -ENOMEM;
iio_device_attach_buffer(indio_dev, buffer);
buffer->scan_timestamp = true;
indio_dev->setup_ops = &lis3l02dq_buffer_setup_ops;
/* Functions are NULL as we set handler below */
indio_dev->pollfunc = iio_alloc_pollfunc(&iio_pollfunc_store_time,
&lis3l02dq_trigger_handler,
0,
indio_dev,
"lis3l02dq_consumer%d",
indio_dev->id);
if (!indio_dev->pollfunc) {
ret = -ENOMEM;
goto error_iio_sw_rb_free;
}
indio_dev->modes |= INDIO_BUFFER_TRIGGERED;
return 0;
error_iio_sw_rb_free:
iio_kfifo_free(indio_dev->buffer);
return ret;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
jonathan cameron | jonathan cameron | 99 | 86.84% | 11 | 68.75% |
manuel stahl | manuel stahl | 9 | 7.89% | 2 | 12.50% |
lars-peter clausen | lars-peter clausen | 4 | 3.51% | 1 | 6.25% |
karol wrona | karol wrona | 1 | 0.88% | 1 | 6.25% |
cristina opriceana | cristina opriceana | 1 | 0.88% | 1 | 6.25% |
| Total | 114 | 100.00% | 16 | 100.00% |
Overall Contributors
| Person | Tokens | Prop | Commits | CommitProp |
jonathan cameron | jonathan cameron | 1635 | 94.73% | 27 | 51.92% |
lars-peter clausen | lars-peter clausen | 33 | 1.91% | 7 | 13.46% |
peter meerwald | peter meerwald | 14 | 0.81% | 3 | 5.77% |
manuel stahl | manuel stahl | 9 | 0.52% | 2 | 3.85% |
ioana ciornei | ioana ciornei | 8 | 0.46% | 1 | 1.92% |
haneen mohammed | haneen mohammed | 6 | 0.35% | 1 | 1.92% |
cristina opriceana | cristina opriceana | 3 | 0.17% | 1 | 1.92% |
tejun heo | tejun heo | 3 | 0.17% | 1 | 1.92% |
paul gortmaker | paul gortmaker | 3 | 0.17% | 1 | 1.92% |
thomas meyer | thomas meyer | 2 | 0.12% | 1 | 1.92% |
navya sri nizamkari | navya sri nizamkari | 2 | 0.12% | 1 | 1.92% |
dan carpenter | dan carpenter | 2 | 0.12% | 1 | 1.92% |
cristina moraru | cristina moraru | 2 | 0.12% | 1 | 1.92% |
greg kroah-hartman | greg kroah-hartman | 1 | 0.06% | 1 | 1.92% |
ebru akagunduz | ebru akagunduz | 1 | 0.06% | 1 | 1.92% |
karol wrona | karol wrona | 1 | 0.06% | 1 | 1.92% |
arnd bergmann | arnd bergmann | 1 | 0.06% | 1 | 1.92% |
| Total | 1726 | 100.00% | 52 | 100.00% |
Information contained on this website is for historical information purposes only and does not indicate or represent copyright ownership.