forked from luck/tmp_suning_uos_patched
staging:iio: Add support for multiple buffers
Route all buffer writes through the demux. Addition or removal of a buffer results in tear down and setup of all the buffers for a given device. Signed-off-by: Jonathan Cameron <jic23@kernel.org> Tested-by: srinivas pandruvada <srinivas.pandruvada@intel.com>
This commit is contained in:
parent
4eb3ccf157
commit
84b36ce5f7
|
@ -197,21 +197,8 @@ static const struct iio_info accel_3d_info = {
|
|||
/* Function to push data to buffer */
|
||||
static void hid_sensor_push_data(struct iio_dev *indio_dev, u8 *data, int len)
|
||||
{
|
||||
struct iio_buffer *buffer = indio_dev->buffer;
|
||||
int datum_sz;
|
||||
|
||||
dev_dbg(&indio_dev->dev, "hid_sensor_push_data\n");
|
||||
if (!buffer) {
|
||||
dev_err(&indio_dev->dev, "Buffer == NULL\n");
|
||||
return;
|
||||
}
|
||||
datum_sz = buffer->access->get_bytes_per_datum(buffer);
|
||||
if (len > datum_sz) {
|
||||
dev_err(&indio_dev->dev, "Datum size mismatch %d:%d\n", len,
|
||||
datum_sz);
|
||||
return;
|
||||
}
|
||||
iio_push_to_buffer(buffer, (u8 *)data);
|
||||
iio_push_to_buffers(indio_dev, (u8 *)data);
|
||||
}
|
||||
|
||||
/* Callback handler to send event after all samples are received and captured */
|
||||
|
|
|
@ -91,7 +91,6 @@ static irqreturn_t ad7266_trigger_handler(int irq, void *p)
|
|||
{
|
||||
struct iio_poll_func *pf = p;
|
||||
struct iio_dev *indio_dev = pf->indio_dev;
|
||||
struct iio_buffer *buffer = indio_dev->buffer;
|
||||
struct ad7266_state *st = iio_priv(indio_dev);
|
||||
int ret;
|
||||
|
||||
|
@ -99,7 +98,7 @@ static irqreturn_t ad7266_trigger_handler(int irq, void *p)
|
|||
if (ret == 0) {
|
||||
if (indio_dev->scan_timestamp)
|
||||
((s64 *)st->data)[1] = pf->timestamp;
|
||||
iio_push_to_buffer(buffer, (u8 *)st->data);
|
||||
iio_push_to_buffers(indio_dev, (u8 *)st->data);
|
||||
}
|
||||
|
||||
iio_trigger_notify_done(indio_dev->trig);
|
||||
|
|
|
@ -76,7 +76,7 @@ static irqreturn_t ad7476_trigger_handler(int irq, void *p)
|
|||
if (indio_dev->scan_timestamp)
|
||||
((s64 *)st->data)[1] = time_ns;
|
||||
|
||||
iio_push_to_buffer(indio_dev->buffer, st->data);
|
||||
iio_push_to_buffers(indio_dev, st->data);
|
||||
done:
|
||||
iio_trigger_notify_done(indio_dev->trig);
|
||||
|
||||
|
|
|
@ -134,7 +134,7 @@ static irqreturn_t ad7887_trigger_handler(int irq, void *p)
|
|||
memcpy(st->data + indio_dev->scan_bytes - sizeof(s64),
|
||||
&time_ns, sizeof(time_ns));
|
||||
|
||||
iio_push_to_buffer(indio_dev->buffer, st->data);
|
||||
iio_push_to_buffers(indio_dev, st->data);
|
||||
done:
|
||||
iio_trigger_notify_done(indio_dev->trig);
|
||||
|
||||
|
|
|
@ -391,7 +391,7 @@ static irqreturn_t ad_sd_trigger_handler(int irq, void *p)
|
|||
break;
|
||||
}
|
||||
|
||||
iio_push_to_buffer(indio_dev->buffer, (uint8_t *)data);
|
||||
iio_push_to_buffers(indio_dev, (uint8_t *)data);
|
||||
|
||||
iio_trigger_notify_done(indio_dev->trig);
|
||||
sigma_delta->irq_dis = false;
|
||||
|
|
|
@ -65,7 +65,6 @@ static irqreturn_t at91_adc_trigger_handler(int irq, void *p)
|
|||
struct iio_poll_func *pf = p;
|
||||
struct iio_dev *idev = pf->indio_dev;
|
||||
struct at91_adc_state *st = iio_priv(idev);
|
||||
struct iio_buffer *buffer = idev->buffer;
|
||||
int i, j = 0;
|
||||
|
||||
for (i = 0; i < idev->masklength; i++) {
|
||||
|
@ -81,7 +80,7 @@ static irqreturn_t at91_adc_trigger_handler(int irq, void *p)
|
|||
*timestamp = pf->timestamp;
|
||||
}
|
||||
|
||||
iio_push_to_buffer(buffer, st->buffer);
|
||||
iio_push_to_buffers(indio_dev, (u8 *)st->buffer);
|
||||
|
||||
iio_trigger_notify_done(idev->trig);
|
||||
|
||||
|
|
|
@ -197,21 +197,8 @@ static const struct iio_info gyro_3d_info = {
|
|||
/* Function to push data to buffer */
|
||||
static void hid_sensor_push_data(struct iio_dev *indio_dev, u8 *data, int len)
|
||||
{
|
||||
struct iio_buffer *buffer = indio_dev->buffer;
|
||||
int datum_sz;
|
||||
|
||||
dev_dbg(&indio_dev->dev, "hid_sensor_push_data\n");
|
||||
if (!buffer) {
|
||||
dev_err(&indio_dev->dev, "Buffer == NULL\n");
|
||||
return;
|
||||
}
|
||||
datum_sz = buffer->access->get_bytes_per_datum(buffer);
|
||||
if (len > datum_sz) {
|
||||
dev_err(&indio_dev->dev, "Datum size mismatch %d:%d\n", len,
|
||||
datum_sz);
|
||||
return;
|
||||
}
|
||||
iio_push_to_buffer(buffer, (u8 *)data);
|
||||
iio_push_to_buffers(indio_dev, (u8 *)data);
|
||||
}
|
||||
|
||||
/* Callback handler to send event after all samples are received and captured */
|
||||
|
|
|
@ -31,6 +31,18 @@ static const char * const iio_endian_prefix[] = {
|
|||
[IIO_LE] = "le",
|
||||
};
|
||||
|
||||
static bool iio_buffer_is_active(struct iio_dev *indio_dev,
|
||||
struct iio_buffer *buf)
|
||||
{
|
||||
struct list_head *p;
|
||||
|
||||
list_for_each(p, &indio_dev->buffer_list)
|
||||
if (p == &buf->buffer_list)
|
||||
return true;
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
/**
|
||||
* iio_buffer_read_first_n_outer() - chrdev read for buffer access
|
||||
*
|
||||
|
@ -134,7 +146,7 @@ static ssize_t iio_scan_el_store(struct device *dev,
|
|||
if (ret < 0)
|
||||
return ret;
|
||||
mutex_lock(&indio_dev->mlock);
|
||||
if (iio_buffer_enabled(indio_dev)) {
|
||||
if (iio_buffer_is_active(indio_dev, indio_dev->buffer)) {
|
||||
ret = -EBUSY;
|
||||
goto error_ret;
|
||||
}
|
||||
|
@ -180,12 +192,11 @@ static ssize_t iio_scan_el_ts_store(struct device *dev,
|
|||
return ret;
|
||||
|
||||
mutex_lock(&indio_dev->mlock);
|
||||
if (iio_buffer_enabled(indio_dev)) {
|
||||
if (iio_buffer_is_active(indio_dev, indio_dev->buffer)) {
|
||||
ret = -EBUSY;
|
||||
goto error_ret;
|
||||
}
|
||||
indio_dev->buffer->scan_timestamp = state;
|
||||
indio_dev->scan_timestamp = state;
|
||||
error_ret:
|
||||
mutex_unlock(&indio_dev->mlock);
|
||||
|
||||
|
@ -385,7 +396,7 @@ ssize_t iio_buffer_write_length(struct device *dev,
|
|||
return len;
|
||||
|
||||
mutex_lock(&indio_dev->mlock);
|
||||
if (iio_buffer_enabled(indio_dev)) {
|
||||
if (iio_buffer_is_active(indio_dev, indio_dev->buffer)) {
|
||||
ret = -EBUSY;
|
||||
} else {
|
||||
if (buffer->access->set_length)
|
||||
|
@ -398,102 +409,14 @@ ssize_t iio_buffer_write_length(struct device *dev,
|
|||
}
|
||||
EXPORT_SYMBOL(iio_buffer_write_length);
|
||||
|
||||
ssize_t iio_buffer_store_enable(struct device *dev,
|
||||
struct device_attribute *attr,
|
||||
const char *buf,
|
||||
size_t len)
|
||||
{
|
||||
int ret;
|
||||
bool requested_state, current_state;
|
||||
int previous_mode;
|
||||
struct iio_dev *indio_dev = dev_to_iio_dev(dev);
|
||||
struct iio_buffer *buffer = indio_dev->buffer;
|
||||
|
||||
mutex_lock(&indio_dev->mlock);
|
||||
previous_mode = indio_dev->currentmode;
|
||||
requested_state = !(buf[0] == '0');
|
||||
current_state = iio_buffer_enabled(indio_dev);
|
||||
if (current_state == requested_state) {
|
||||
printk(KERN_INFO "iio-buffer, current state requested again\n");
|
||||
goto done;
|
||||
}
|
||||
if (requested_state) {
|
||||
if (indio_dev->setup_ops->preenable) {
|
||||
ret = indio_dev->setup_ops->preenable(indio_dev);
|
||||
if (ret) {
|
||||
printk(KERN_ERR
|
||||
"Buffer not started: "
|
||||
"buffer preenable failed\n");
|
||||
goto error_ret;
|
||||
}
|
||||
}
|
||||
if (buffer->access->request_update) {
|
||||
ret = buffer->access->request_update(buffer);
|
||||
if (ret) {
|
||||
printk(KERN_INFO
|
||||
"Buffer not started: "
|
||||
"buffer parameter update failed\n");
|
||||
goto error_ret;
|
||||
}
|
||||
}
|
||||
/* Definitely possible for devices to support both of these. */
|
||||
if (indio_dev->modes & INDIO_BUFFER_TRIGGERED) {
|
||||
if (!indio_dev->trig) {
|
||||
printk(KERN_INFO
|
||||
"Buffer not started: no trigger\n");
|
||||
ret = -EINVAL;
|
||||
goto error_ret;
|
||||
}
|
||||
indio_dev->currentmode = INDIO_BUFFER_TRIGGERED;
|
||||
} else if (indio_dev->modes & INDIO_BUFFER_HARDWARE)
|
||||
indio_dev->currentmode = INDIO_BUFFER_HARDWARE;
|
||||
else { /* should never be reached */
|
||||
ret = -EINVAL;
|
||||
goto error_ret;
|
||||
}
|
||||
|
||||
if (indio_dev->setup_ops->postenable) {
|
||||
ret = indio_dev->setup_ops->postenable(indio_dev);
|
||||
if (ret) {
|
||||
printk(KERN_INFO
|
||||
"Buffer not started: "
|
||||
"postenable failed\n");
|
||||
indio_dev->currentmode = previous_mode;
|
||||
if (indio_dev->setup_ops->postdisable)
|
||||
indio_dev->setup_ops->
|
||||
postdisable(indio_dev);
|
||||
goto error_ret;
|
||||
}
|
||||
}
|
||||
} else {
|
||||
if (indio_dev->setup_ops->predisable) {
|
||||
ret = indio_dev->setup_ops->predisable(indio_dev);
|
||||
if (ret)
|
||||
goto error_ret;
|
||||
}
|
||||
indio_dev->currentmode = INDIO_DIRECT_MODE;
|
||||
if (indio_dev->setup_ops->postdisable) {
|
||||
ret = indio_dev->setup_ops->postdisable(indio_dev);
|
||||
if (ret)
|
||||
goto error_ret;
|
||||
}
|
||||
}
|
||||
done:
|
||||
mutex_unlock(&indio_dev->mlock);
|
||||
return len;
|
||||
|
||||
error_ret:
|
||||
mutex_unlock(&indio_dev->mlock);
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL(iio_buffer_store_enable);
|
||||
|
||||
ssize_t iio_buffer_show_enable(struct device *dev,
|
||||
struct device_attribute *attr,
|
||||
char *buf)
|
||||
{
|
||||
struct iio_dev *indio_dev = dev_to_iio_dev(dev);
|
||||
return sprintf(buf, "%d\n", iio_buffer_enabled(indio_dev));
|
||||
return sprintf(buf, "%d\n",
|
||||
iio_buffer_is_active(indio_dev,
|
||||
indio_dev->buffer));
|
||||
}
|
||||
EXPORT_SYMBOL(iio_buffer_show_enable);
|
||||
|
||||
|
@ -537,35 +460,220 @@ static int iio_compute_scan_bytes(struct iio_dev *indio_dev, const long *mask,
|
|||
return bytes;
|
||||
}
|
||||
|
||||
int iio_sw_buffer_preenable(struct iio_dev *indio_dev)
|
||||
int iio_update_buffers(struct iio_dev *indio_dev,
|
||||
struct iio_buffer *insert_buffer,
|
||||
struct iio_buffer *remove_buffer)
|
||||
{
|
||||
struct iio_buffer *buffer = indio_dev->buffer;
|
||||
dev_dbg(&indio_dev->dev, "%s\n", __func__);
|
||||
int ret;
|
||||
int success = 0;
|
||||
struct iio_buffer *buffer;
|
||||
unsigned long *compound_mask;
|
||||
const unsigned long *old_mask;
|
||||
|
||||
/* How much space will the demuxed element take? */
|
||||
indio_dev->scan_bytes =
|
||||
iio_compute_scan_bytes(indio_dev, buffer->scan_mask,
|
||||
buffer->scan_timestamp);
|
||||
buffer->access->set_bytes_per_datum(buffer, indio_dev->scan_bytes);
|
||||
/* Wind down existing buffers - iff there are any */
|
||||
if (!list_empty(&indio_dev->buffer_list)) {
|
||||
if (indio_dev->setup_ops->predisable) {
|
||||
ret = indio_dev->setup_ops->predisable(indio_dev);
|
||||
if (ret)
|
||||
goto error_ret;
|
||||
}
|
||||
indio_dev->currentmode = INDIO_DIRECT_MODE;
|
||||
if (indio_dev->setup_ops->postdisable) {
|
||||
ret = indio_dev->setup_ops->postdisable(indio_dev);
|
||||
if (ret)
|
||||
goto error_ret;
|
||||
}
|
||||
}
|
||||
/* Keep a copy of current setup to allow roll back */
|
||||
old_mask = indio_dev->active_scan_mask;
|
||||
if (!indio_dev->available_scan_masks)
|
||||
indio_dev->active_scan_mask = NULL;
|
||||
|
||||
if (remove_buffer)
|
||||
list_del(&remove_buffer->buffer_list);
|
||||
if (insert_buffer)
|
||||
list_add(&insert_buffer->buffer_list, &indio_dev->buffer_list);
|
||||
|
||||
/* If no buffers in list, we are done */
|
||||
if (list_empty(&indio_dev->buffer_list)) {
|
||||
indio_dev->currentmode = INDIO_DIRECT_MODE;
|
||||
if (indio_dev->available_scan_masks == NULL)
|
||||
kfree(old_mask);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* What scan mask do we actually have ?*/
|
||||
if (indio_dev->available_scan_masks)
|
||||
compound_mask = kcalloc(BITS_TO_LONGS(indio_dev->masklength),
|
||||
sizeof(long), GFP_KERNEL);
|
||||
if (compound_mask == NULL) {
|
||||
if (indio_dev->available_scan_masks == NULL)
|
||||
kfree(old_mask);
|
||||
return -ENOMEM;
|
||||
}
|
||||
indio_dev->scan_timestamp = 0;
|
||||
|
||||
list_for_each_entry(buffer, &indio_dev->buffer_list, buffer_list) {
|
||||
bitmap_or(compound_mask, compound_mask, buffer->scan_mask,
|
||||
indio_dev->masklength);
|
||||
indio_dev->scan_timestamp |= buffer->scan_timestamp;
|
||||
}
|
||||
if (indio_dev->available_scan_masks) {
|
||||
indio_dev->active_scan_mask =
|
||||
iio_scan_mask_match(indio_dev->available_scan_masks,
|
||||
indio_dev->masklength,
|
||||
buffer->scan_mask);
|
||||
else
|
||||
indio_dev->active_scan_mask = buffer->scan_mask;
|
||||
|
||||
if (indio_dev->active_scan_mask == NULL)
|
||||
return -EINVAL;
|
||||
compound_mask);
|
||||
if (indio_dev->active_scan_mask == NULL) {
|
||||
/*
|
||||
* Roll back.
|
||||
* Note can only occur when adding a buffer.
|
||||
*/
|
||||
list_del(&insert_buffer->buffer_list);
|
||||
indio_dev->active_scan_mask = old_mask;
|
||||
success = -EINVAL;
|
||||
}
|
||||
} else {
|
||||
indio_dev->active_scan_mask = compound_mask;
|
||||
}
|
||||
|
||||
iio_update_demux(indio_dev);
|
||||
|
||||
if (indio_dev->info->update_scan_mode)
|
||||
return indio_dev->info
|
||||
/* Wind up again */
|
||||
if (indio_dev->setup_ops->preenable) {
|
||||
ret = indio_dev->setup_ops->preenable(indio_dev);
|
||||
if (ret) {
|
||||
printk(KERN_ERR
|
||||
"Buffer not started:"
|
||||
"buffer preenable failed\n");
|
||||
goto error_remove_inserted;
|
||||
}
|
||||
}
|
||||
indio_dev->scan_bytes =
|
||||
iio_compute_scan_bytes(indio_dev,
|
||||
indio_dev->active_scan_mask,
|
||||
indio_dev->scan_timestamp);
|
||||
list_for_each_entry(buffer, &indio_dev->buffer_list, buffer_list)
|
||||
if (buffer->access->request_update) {
|
||||
ret = buffer->access->request_update(buffer);
|
||||
if (ret) {
|
||||
printk(KERN_INFO
|
||||
"Buffer not started:"
|
||||
"buffer parameter update failed\n");
|
||||
goto error_run_postdisable;
|
||||
}
|
||||
}
|
||||
if (indio_dev->info->update_scan_mode) {
|
||||
ret = indio_dev->info
|
||||
->update_scan_mode(indio_dev,
|
||||
indio_dev->active_scan_mask);
|
||||
if (ret < 0) {
|
||||
printk(KERN_INFO "update scan mode failed\n");
|
||||
goto error_run_postdisable;
|
||||
}
|
||||
}
|
||||
/* Definitely possible for devices to support both of these.*/
|
||||
if (indio_dev->modes & INDIO_BUFFER_TRIGGERED) {
|
||||
if (!indio_dev->trig) {
|
||||
printk(KERN_INFO "Buffer not started: no trigger\n");
|
||||
ret = -EINVAL;
|
||||
/* Can only occur on first buffer */
|
||||
goto error_run_postdisable;
|
||||
}
|
||||
indio_dev->currentmode = INDIO_BUFFER_TRIGGERED;
|
||||
} else if (indio_dev->modes & INDIO_BUFFER_HARDWARE) {
|
||||
indio_dev->currentmode = INDIO_BUFFER_HARDWARE;
|
||||
} else { /* should never be reached */
|
||||
ret = -EINVAL;
|
||||
goto error_run_postdisable;
|
||||
}
|
||||
|
||||
if (indio_dev->setup_ops->postenable) {
|
||||
ret = indio_dev->setup_ops->postenable(indio_dev);
|
||||
if (ret) {
|
||||
printk(KERN_INFO
|
||||
"Buffer not started: postenable failed\n");
|
||||
indio_dev->currentmode = INDIO_DIRECT_MODE;
|
||||
if (indio_dev->setup_ops->postdisable)
|
||||
indio_dev->setup_ops->postdisable(indio_dev);
|
||||
goto error_disable_all_buffers;
|
||||
}
|
||||
}
|
||||
|
||||
if (indio_dev->available_scan_masks)
|
||||
kfree(compound_mask);
|
||||
else
|
||||
kfree(old_mask);
|
||||
|
||||
return success;
|
||||
|
||||
error_disable_all_buffers:
|
||||
indio_dev->currentmode = INDIO_DIRECT_MODE;
|
||||
error_run_postdisable:
|
||||
if (indio_dev->setup_ops->postdisable)
|
||||
indio_dev->setup_ops->postdisable(indio_dev);
|
||||
error_remove_inserted:
|
||||
|
||||
if (insert_buffer)
|
||||
list_del(&insert_buffer->buffer_list);
|
||||
indio_dev->active_scan_mask = old_mask;
|
||||
kfree(compound_mask);
|
||||
error_ret:
|
||||
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(iio_update_buffers);
|
||||
|
||||
ssize_t iio_buffer_store_enable(struct device *dev,
|
||||
struct device_attribute *attr,
|
||||
const char *buf,
|
||||
size_t len)
|
||||
{
|
||||
int ret;
|
||||
bool requested_state;
|
||||
struct iio_dev *indio_dev = dev_to_iio_dev(dev);
|
||||
struct iio_buffer *pbuf = indio_dev->buffer;
|
||||
bool inlist;
|
||||
|
||||
ret = strtobool(buf, &requested_state);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
mutex_lock(&indio_dev->mlock);
|
||||
|
||||
/* Find out if it is in the list */
|
||||
inlist = iio_buffer_is_active(indio_dev, pbuf);
|
||||
/* Already in desired state */
|
||||
if (inlist == requested_state)
|
||||
goto done;
|
||||
|
||||
if (requested_state)
|
||||
ret = iio_update_buffers(indio_dev,
|
||||
indio_dev->buffer, NULL);
|
||||
else
|
||||
ret = iio_update_buffers(indio_dev,
|
||||
NULL, indio_dev->buffer);
|
||||
|
||||
if (ret < 0)
|
||||
goto done;
|
||||
done:
|
||||
mutex_unlock(&indio_dev->mlock);
|
||||
return (ret < 0) ? ret : len;
|
||||
}
|
||||
EXPORT_SYMBOL(iio_buffer_store_enable);
|
||||
|
||||
int iio_sw_buffer_preenable(struct iio_dev *indio_dev)
|
||||
{
|
||||
struct iio_buffer *buffer;
|
||||
unsigned bytes;
|
||||
dev_dbg(&indio_dev->dev, "%s\n", __func__);
|
||||
|
||||
list_for_each_entry(buffer, &indio_dev->buffer_list, buffer_list)
|
||||
if (buffer->access->set_bytes_per_datum) {
|
||||
bytes = iio_compute_scan_bytes(indio_dev,
|
||||
buffer->scan_mask,
|
||||
buffer->scan_timestamp);
|
||||
|
||||
buffer->access->set_bytes_per_datum(buffer, bytes);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(iio_sw_buffer_preenable);
|
||||
|
@ -599,7 +707,11 @@ static bool iio_validate_scan_mask(struct iio_dev *indio_dev,
|
|||
* iio_scan_mask_set() - set particular bit in the scan mask
|
||||
* @buffer: the buffer whose scan mask we are interested in
|
||||
* @bit: the bit to be set.
|
||||
**/
|
||||
*
|
||||
* Note that at this point we have no way of knowing what other
|
||||
* buffers might request, hence this code only verifies that the
|
||||
* individual buffers request is plausible.
|
||||
*/
|
||||
int iio_scan_mask_set(struct iio_dev *indio_dev,
|
||||
struct iio_buffer *buffer, int bit)
|
||||
{
|
||||
|
@ -682,13 +794,12 @@ static unsigned char *iio_demux(struct iio_buffer *buffer,
|
|||
return buffer->demux_bounce;
|
||||
}
|
||||
|
||||
int iio_push_to_buffer(struct iio_buffer *buffer, unsigned char *data)
|
||||
static int iio_push_to_buffer(struct iio_buffer *buffer, unsigned char *data)
|
||||
{
|
||||
unsigned char *dataout = iio_demux(buffer, data);
|
||||
|
||||
return buffer->access->store_to(buffer, dataout);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(iio_push_to_buffer);
|
||||
|
||||
static void iio_buffer_demux_free(struct iio_buffer *buffer)
|
||||
{
|
||||
|
@ -699,10 +810,26 @@ static void iio_buffer_demux_free(struct iio_buffer *buffer)
|
|||
}
|
||||
}
|
||||
|
||||
int iio_update_demux(struct iio_dev *indio_dev)
|
||||
|
||||
int iio_push_to_buffers(struct iio_dev *indio_dev, unsigned char *data)
|
||||
{
|
||||
int ret;
|
||||
struct iio_buffer *buf;
|
||||
|
||||
list_for_each_entry(buf, &indio_dev->buffer_list, buffer_list) {
|
||||
ret = iio_push_to_buffer(buf, data);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(iio_push_to_buffers);
|
||||
|
||||
static int iio_buffer_update_demux(struct iio_dev *indio_dev,
|
||||
struct iio_buffer *buffer)
|
||||
{
|
||||
const struct iio_chan_spec *ch;
|
||||
struct iio_buffer *buffer = indio_dev->buffer;
|
||||
int ret, in_ind = -1, out_ind, length;
|
||||
unsigned in_loc = 0, out_loc = 0;
|
||||
struct iio_demux_table *p;
|
||||
|
@ -787,4 +914,23 @@ int iio_update_demux(struct iio_dev *indio_dev)
|
|||
|
||||
return ret;
|
||||
}
|
||||
|
||||
int iio_update_demux(struct iio_dev *indio_dev)
|
||||
{
|
||||
struct iio_buffer *buffer;
|
||||
int ret;
|
||||
|
||||
list_for_each_entry(buffer, &indio_dev->buffer_list, buffer_list) {
|
||||
ret = iio_buffer_update_demux(indio_dev, buffer);
|
||||
if (ret < 0)
|
||||
goto error_clear_mux_table;
|
||||
}
|
||||
return 0;
|
||||
|
||||
error_clear_mux_table:
|
||||
list_for_each_entry(buffer, &indio_dev->buffer_list, buffer_list)
|
||||
iio_buffer_demux_free(buffer);
|
||||
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(iio_update_demux);
|
||||
|
|
|
@ -856,6 +856,7 @@ struct iio_dev *iio_device_alloc(int sizeof_priv)
|
|||
return NULL;
|
||||
}
|
||||
dev_set_name(&dev->dev, "iio:device%d", dev->id);
|
||||
INIT_LIST_HEAD(&dev->buffer_list);
|
||||
}
|
||||
|
||||
return dev;
|
||||
|
|
|
@ -164,7 +164,6 @@ static irqreturn_t adjd_s311_trigger_handler(int irq, void *p)
|
|||
struct iio_poll_func *pf = p;
|
||||
struct iio_dev *indio_dev = pf->indio_dev;
|
||||
struct adjd_s311_data *data = iio_priv(indio_dev);
|
||||
struct iio_buffer *buffer = indio_dev->buffer;
|
||||
s64 time_ns = iio_get_time_ns();
|
||||
int len = 0;
|
||||
int i, j = 0;
|
||||
|
@ -187,7 +186,7 @@ static irqreturn_t adjd_s311_trigger_handler(int irq, void *p)
|
|||
if (indio_dev->scan_timestamp)
|
||||
*(s64 *)((u8 *)data->buffer + ALIGN(len, sizeof(s64)))
|
||||
= time_ns;
|
||||
iio_push_to_buffer(buffer, (u8 *)data->buffer);
|
||||
iio_push_to_buffers(indio_dev, (u8 *)data->buffer);
|
||||
|
||||
done:
|
||||
iio_trigger_notify_done(indio_dev->trig);
|
||||
|
|
|
@ -176,21 +176,8 @@ static const struct iio_info als_info = {
|
|||
/* Function to push data to buffer */
|
||||
static void hid_sensor_push_data(struct iio_dev *indio_dev, u8 *data, int len)
|
||||
{
|
||||
struct iio_buffer *buffer = indio_dev->buffer;
|
||||
int datum_sz;
|
||||
|
||||
dev_dbg(&indio_dev->dev, "hid_sensor_push_data\n");
|
||||
if (!buffer) {
|
||||
dev_err(&indio_dev->dev, "Buffer == NULL\n");
|
||||
return;
|
||||
}
|
||||
datum_sz = buffer->access->get_bytes_per_datum(buffer);
|
||||
if (len > datum_sz) {
|
||||
dev_err(&indio_dev->dev, "Datum size mismatch %d:%d\n", len,
|
||||
datum_sz);
|
||||
return;
|
||||
}
|
||||
iio_push_to_buffer(buffer, (u8 *)data);
|
||||
iio_push_to_buffers(indio_dev, (u8 *)data);
|
||||
}
|
||||
|
||||
/* Callback handler to send event after all samples are received and captured */
|
||||
|
|
|
@ -198,21 +198,8 @@ static const struct iio_info magn_3d_info = {
|
|||
/* Function to push data to buffer */
|
||||
static void hid_sensor_push_data(struct iio_dev *indio_dev, u8 *data, int len)
|
||||
{
|
||||
struct iio_buffer *buffer = indio_dev->buffer;
|
||||
int datum_sz;
|
||||
|
||||
dev_dbg(&indio_dev->dev, "hid_sensor_push_data\n");
|
||||
if (!buffer) {
|
||||
dev_err(&indio_dev->dev, "Buffer == NULL\n");
|
||||
return;
|
||||
}
|
||||
datum_sz = buffer->access->get_bytes_per_datum(buffer);
|
||||
if (len > datum_sz) {
|
||||
dev_err(&indio_dev->dev, "Datum size mismatch %d:%d\n", len,
|
||||
datum_sz);
|
||||
return;
|
||||
}
|
||||
iio_push_to_buffer(buffer, (u8 *)data);
|
||||
iio_push_to_buffers(indio_dev, (u8 *)data);
|
||||
}
|
||||
|
||||
/* Callback handler to send event after all samples are received and captured */
|
||||
|
|
|
@ -82,7 +82,7 @@ static irqreturn_t adis16201_trigger_handler(int irq, void *p)
|
|||
if (indio_dev->scan_timestamp)
|
||||
*((s64 *)(data + ((i + 3)/4)*4)) = pf->timestamp;
|
||||
|
||||
iio_push_to_buffer(indio_dev->buffer, (u8 *)data);
|
||||
iio_push_to_buffers(indio_dev, (u8 *)data);
|
||||
|
||||
kfree(data);
|
||||
done:
|
||||
|
|
|
@ -81,7 +81,7 @@ static irqreturn_t adis16203_trigger_handler(int irq, void *p)
|
|||
if (indio_dev->scan_timestamp)
|
||||
*((s64 *)(data + ((i + 3)/4)*4)) = pf->timestamp;
|
||||
|
||||
iio_push_to_buffer(indio_dev->buffer, (u8 *)data);
|
||||
iio_push_to_buffers(indio_dev, (u8 *)data);
|
||||
|
||||
kfree(data);
|
||||
done:
|
||||
|
|
|
@ -78,7 +78,7 @@ static irqreturn_t adis16204_trigger_handler(int irq, void *p)
|
|||
if (indio_dev->scan_timestamp)
|
||||
*((s64 *)(data + ((i + 3)/4)*4)) = pf->timestamp;
|
||||
|
||||
iio_push_to_buffer(indio_dev->buffer, (u8 *)data);
|
||||
iio_push_to_buffers(indio_dev, (u8 *)data);
|
||||
|
||||
kfree(data);
|
||||
done:
|
||||
|
|
|
@ -78,7 +78,7 @@ static irqreturn_t adis16209_trigger_handler(int irq, void *p)
|
|||
if (indio_dev->scan_timestamp)
|
||||
*((s64 *)(data + ((i + 3)/4)*4)) = pf->timestamp;
|
||||
|
||||
iio_push_to_buffer(indio_dev->buffer, (u8 *)data);
|
||||
iio_push_to_buffers(indio_dev, (u8 *)data);
|
||||
|
||||
kfree(data);
|
||||
done:
|
||||
|
|
|
@ -76,7 +76,7 @@ static irqreturn_t adis16240_trigger_handler(int irq, void *p)
|
|||
if (indio_dev->scan_timestamp)
|
||||
*((s64 *)(data + ((i + 3)/4)*4)) = pf->timestamp;
|
||||
|
||||
iio_push_to_buffer(indio_dev->buffer, (u8 *)data);
|
||||
iio_push_to_buffers(indio_dev, (u8 *)data);
|
||||
|
||||
kfree(data);
|
||||
done:
|
||||
|
|
|
@ -154,7 +154,7 @@ static irqreturn_t lis3l02dq_trigger_handler(int irq, void *p)
|
|||
if (indio_dev->scan_timestamp)
|
||||
*(s64 *)((u8 *)data + ALIGN(len, sizeof(s64)))
|
||||
= pf->timestamp;
|
||||
iio_push_to_buffer(indio_dev->buffer, (u8 *)data);
|
||||
iio_push_to_buffers(indio_dev, (u8 *)data);
|
||||
|
||||
kfree(data);
|
||||
done:
|
||||
|
|
|
@ -93,7 +93,7 @@ static irqreturn_t ad7298_trigger_handler(int irq, void *p)
|
|||
indio_dev->masklength); i++)
|
||||
buf[i] = be16_to_cpu(st->rx_buf[i]);
|
||||
|
||||
iio_push_to_buffer(indio_dev->buffer, (u8 *)buf);
|
||||
iio_push_to_buffers(indio_dev, (u8 *)buf);
|
||||
|
||||
done:
|
||||
iio_trigger_notify_done(indio_dev->trig);
|
||||
|
|
|
@ -83,7 +83,7 @@ static void ad7606_poll_bh_to_ring(struct work_struct *work_s)
|
|||
if (indio_dev->scan_timestamp)
|
||||
*((s64 *)(buf + indio_dev->scan_bytes - sizeof(s64))) = time_ns;
|
||||
|
||||
iio_push_to_buffer(indio_dev->buffer, buf);
|
||||
iio_push_to_buffers(indio_dev, buf);
|
||||
done:
|
||||
gpio_set_value(st->pdata->gpio_convst, 0);
|
||||
iio_trigger_notify_done(indio_dev->trig);
|
||||
|
|
|
@ -77,7 +77,7 @@ static irqreturn_t ad799x_trigger_handler(int irq, void *p)
|
|||
memcpy(rxbuf + indio_dev->scan_bytes - sizeof(s64),
|
||||
&time_ns, sizeof(time_ns));
|
||||
|
||||
iio_push_to_buffer(indio_dev->buffer, rxbuf);
|
||||
iio_push_to_buffers(indio_dev, rxbuf);
|
||||
done:
|
||||
kfree(rxbuf);
|
||||
out:
|
||||
|
|
|
@ -80,7 +80,7 @@ static irqreturn_t max1363_trigger_handler(int irq, void *p)
|
|||
|
||||
if (indio_dev->scan_timestamp)
|
||||
memcpy(rxbuf + d_size - sizeof(s64), &time_ns, sizeof(time_ns));
|
||||
iio_push_to_buffer(indio_dev->buffer, rxbuf);
|
||||
iio_push_to_buffers(indio_dev, rxbuf);
|
||||
|
||||
done_free:
|
||||
kfree(rxbuf);
|
||||
|
|
|
@ -237,7 +237,6 @@ static irqreturn_t mxs_lradc_trigger_handler(int irq, void *p)
|
|||
struct iio_poll_func *pf = p;
|
||||
struct iio_dev *iio = pf->indio_dev;
|
||||
struct mxs_lradc *lradc = iio_priv(iio);
|
||||
struct iio_buffer *buffer = iio->buffer;
|
||||
const uint32_t chan_value = LRADC_CH_ACCUMULATE |
|
||||
((LRADC_DELAY_TIMER_LOOP - 1) << LRADC_CH_NUM_SAMPLES_OFFSET);
|
||||
int i, j = 0;
|
||||
|
@ -256,7 +255,7 @@ static irqreturn_t mxs_lradc_trigger_handler(int irq, void *p)
|
|||
*timestamp = pf->timestamp;
|
||||
}
|
||||
|
||||
iio_push_to_buffer(buffer, (u8 *)lradc->buffer);
|
||||
iio_push_to_buffers(iio, (u8 *)lradc->buffer);
|
||||
|
||||
iio_trigger_notify_done(iio->trig);
|
||||
|
||||
|
|
|
@ -81,7 +81,7 @@ static irqreturn_t adis16260_trigger_handler(int irq, void *p)
|
|||
if (indio_dev->scan_timestamp)
|
||||
*((s64 *)(data + ((i + 3)/4)*4)) = pf->timestamp;
|
||||
|
||||
iio_push_to_buffer(indio_dev->buffer, (u8 *)data);
|
||||
iio_push_to_buffers(indio_dev, (u8 *)data);
|
||||
|
||||
kfree(data);
|
||||
done:
|
||||
|
|
|
@ -46,7 +46,6 @@ static irqreturn_t iio_simple_dummy_trigger_h(int irq, void *p)
|
|||
{
|
||||
struct iio_poll_func *pf = p;
|
||||
struct iio_dev *indio_dev = pf->indio_dev;
|
||||
struct iio_buffer *buffer = indio_dev->buffer;
|
||||
int len = 0;
|
||||
u16 *data;
|
||||
|
||||
|
@ -76,7 +75,7 @@ static irqreturn_t iio_simple_dummy_trigger_h(int irq, void *p)
|
|||
i < bitmap_weight(indio_dev->active_scan_mask,
|
||||
indio_dev->masklength);
|
||||
i++, j++) {
|
||||
j = find_next_bit(buffer->scan_mask,
|
||||
j = find_next_bit(indio_dev->active_scan_mask,
|
||||
indio_dev->masklength, j);
|
||||
/* random access read from the 'device' */
|
||||
data[i] = fakedata[j];
|
||||
|
@ -87,7 +86,7 @@ static irqreturn_t iio_simple_dummy_trigger_h(int irq, void *p)
|
|||
if (indio_dev->scan_timestamp)
|
||||
*(s64 *)((u8 *)data + ALIGN(len, sizeof(s64)))
|
||||
= iio_get_time_ns();
|
||||
iio_push_to_buffer(buffer, (u8 *)data);
|
||||
iio_push_to_buffers(indio_dev, (u8 *)data);
|
||||
|
||||
kfree(data);
|
||||
|
||||
|
|
|
@ -647,7 +647,6 @@ static void ad5933_work(struct work_struct *work)
|
|||
struct ad5933_state *st = container_of(work,
|
||||
struct ad5933_state, work.work);
|
||||
struct iio_dev *indio_dev = i2c_get_clientdata(st->client);
|
||||
struct iio_buffer *ring = indio_dev->buffer;
|
||||
signed short buf[2];
|
||||
unsigned char status;
|
||||
|
||||
|
@ -677,8 +676,7 @@ static void ad5933_work(struct work_struct *work)
|
|||
} else {
|
||||
buf[0] = be16_to_cpu(buf[0]);
|
||||
}
|
||||
/* save datum to the ring */
|
||||
iio_push_to_buffer(ring, (u8 *)buf);
|
||||
iio_push_to_buffers(indio_dev, (u8 *)buf);
|
||||
} else {
|
||||
/* no data available - try again later */
|
||||
schedule_delayed_work(&st->work, st->poll_time_jiffies);
|
||||
|
|
|
@ -114,7 +114,6 @@ static irqreturn_t adis16400_trigger_handler(int irq, void *p)
|
|||
struct iio_poll_func *pf = p;
|
||||
struct iio_dev *indio_dev = pf->indio_dev;
|
||||
struct adis16400_state *st = iio_priv(indio_dev);
|
||||
struct iio_buffer *ring = indio_dev->buffer;
|
||||
int i = 0, j, ret = 0;
|
||||
s16 *data;
|
||||
|
||||
|
@ -148,9 +147,9 @@ static irqreturn_t adis16400_trigger_handler(int irq, void *p)
|
|||
}
|
||||
}
|
||||
/* Guaranteed to be aligned with 8 byte boundary */
|
||||
if (ring->scan_timestamp)
|
||||
if (indio_dev->scan_timestamp)
|
||||
*((s64 *)(data + ((i + 3)/4)*4)) = pf->timestamp;
|
||||
iio_push_to_buffer(ring, (u8 *) data);
|
||||
iio_push_to_buffers(indio_dev, (u8 *) data);
|
||||
|
||||
done:
|
||||
kfree(data);
|
||||
|
|
|
@ -73,7 +73,7 @@ static irqreturn_t ade7758_trigger_handler(int irq, void *p)
|
|||
if (indio_dev->scan_timestamp)
|
||||
dat64[1] = pf->timestamp;
|
||||
|
||||
iio_push_to_buffer(indio_dev->buffer, (u8 *)dat64);
|
||||
iio_push_to_buffers(indio_dev, (u8 *)dat64);
|
||||
|
||||
iio_trigger_notify_done(indio_dev->trig);
|
||||
|
||||
|
|
|
@ -66,7 +66,8 @@ struct iio_buffer_access_funcs {
|
|||
* @stufftoread: [INTERN] flag to indicate new data.
|
||||
* @demux_list: [INTERN] list of operations required to demux the scan.
|
||||
* @demux_bounce: [INTERN] buffer for doing gather from incoming scan.
|
||||
**/
|
||||
* @buffer_list: [INTERN] entry in the devices list of current buffers.
|
||||
*/
|
||||
struct iio_buffer {
|
||||
int length;
|
||||
int bytes_per_datum;
|
||||
|
@ -81,8 +82,21 @@ struct iio_buffer {
|
|||
const struct attribute_group *attrs;
|
||||
struct list_head demux_list;
|
||||
unsigned char *demux_bounce;
|
||||
struct list_head buffer_list;
|
||||
};
|
||||
|
||||
/**
|
||||
* iio_update_buffers() - add or remove buffer from active list
|
||||
* @indio_dev: device to add buffer to
|
||||
* @insert_buffer: buffer to insert
|
||||
* @remove_buffer: buffer_to_remove
|
||||
*
|
||||
* Note this will tear down the all buffering and build it up again
|
||||
*/
|
||||
int iio_update_buffers(struct iio_dev *indio_dev,
|
||||
struct iio_buffer *insert_buffer,
|
||||
struct iio_buffer *remove_buffer);
|
||||
|
||||
/**
|
||||
* iio_buffer_init() - Initialize the buffer structure
|
||||
* @buffer: buffer to be initialized
|
||||
|
@ -115,11 +129,11 @@ int iio_scan_mask_set(struct iio_dev *indio_dev,
|
|||
struct iio_buffer *buffer, int bit);
|
||||
|
||||
/**
|
||||
* iio_push_to_buffer() - push to a registered buffer.
|
||||
* @buffer: IIO buffer structure for device
|
||||
* @data: the data to push to the buffer
|
||||
* iio_push_to_buffers() - push to a registered buffer.
|
||||
* @indio_dev: iio_dev structure for device.
|
||||
* @data: Full scan.
|
||||
*/
|
||||
int iio_push_to_buffer(struct iio_buffer *buffer, unsigned char *data);
|
||||
int iio_push_to_buffers(struct iio_dev *indio_dev, unsigned char *data);
|
||||
|
||||
int iio_update_demux(struct iio_dev *indio_dev);
|
||||
|
||||
|
|
|
@ -410,6 +410,7 @@ struct iio_buffer_setup_ops {
|
|||
* and owner
|
||||
* @event_interface: [INTERN] event chrdevs associated with interrupt lines
|
||||
* @buffer: [DRIVER] any buffer present
|
||||
* @buffer_list: [INTERN] list of all buffers currently attached
|
||||
* @scan_bytes: [INTERN] num bytes captured to be fed to buffer demux
|
||||
* @mlock: [INTERN] lock used to prevent simultaneous device state
|
||||
* changes
|
||||
|
@ -448,6 +449,7 @@ struct iio_dev {
|
|||
struct iio_event_interface *event_interface;
|
||||
|
||||
struct iio_buffer *buffer;
|
||||
struct list_head buffer_list;
|
||||
int scan_bytes;
|
||||
struct mutex mlock;
|
||||
|
||||
|
|
Loading…
Reference in New Issue
Block a user