1 // SPDX-License-Identifier: GPL-2.0-only
2 /* The industrial I/O core
4 * Copyright (c) 2008 Jonathan Cameron
6 * Handling of buffer allocation / resizing.
8 * Things to look at here.
9 * - Better memory allocation techniques?
10 * - Alternative access techniques?
12 #include <linux/anon_inodes.h>
13 #include <linux/kernel.h>
14 #include <linux/export.h>
15 #include <linux/device.h>
16 #include <linux/file.h>
18 #include <linux/cdev.h>
19 #include <linux/slab.h>
20 #include <linux/poll.h>
21 #include <linux/sched/signal.h>
23 #include <linux/iio/iio.h>
24 #include <linux/iio/iio-opaque.h>
26 #include "iio_core_trigger.h"
27 #include <linux/iio/sysfs.h>
28 #include <linux/iio/buffer.h>
29 #include <linux/iio/buffer_impl.h>
31 static const char * const iio_endian_prefix[] = {
36 static bool iio_buffer_is_active(struct iio_buffer *buf)
38 return !list_empty(&buf->buffer_list);
41 static size_t iio_buffer_data_available(struct iio_buffer *buf)
43 return buf->access->data_available(buf);
46 static int iio_buffer_flush_hwfifo(struct iio_dev *indio_dev,
47 struct iio_buffer *buf, size_t required)
49 if (!indio_dev->info->hwfifo_flush_to_buffer)
52 return indio_dev->info->hwfifo_flush_to_buffer(indio_dev, required);
55 static bool iio_buffer_ready(struct iio_dev *indio_dev, struct iio_buffer *buf,
56 size_t to_wait, int to_flush)
61 /* wakeup if the device was unregistered */
65 /* drain the buffer if it was disabled */
66 if (!iio_buffer_is_active(buf)) {
67 to_wait = min_t(size_t, to_wait, 1);
71 avail = iio_buffer_data_available(buf);
73 if (avail >= to_wait) {
74 /* force a flush for non-blocking reads */
75 if (!to_wait && avail < to_flush)
76 iio_buffer_flush_hwfifo(indio_dev, buf,
82 flushed = iio_buffer_flush_hwfifo(indio_dev, buf,
87 if (avail + flushed >= to_wait)
94 * iio_buffer_read() - chrdev read for buffer access
95 * @filp: File structure pointer for the char device
96 * @buf: Destination buffer for iio buffer read
97 * @n: First n bytes to read
98 * @f_ps: Long offset provided by the user as a seek position
100 * This function relies on all buffer implementations having an
101 * iio_buffer as their first element.
103 * Return: negative values corresponding to error codes or ret != 0
104 * for ending the reading activity
106 static ssize_t iio_buffer_read(struct file *filp, char __user *buf,
107 size_t n, loff_t *f_ps)
109 struct iio_dev_buffer_pair *ib = filp->private_data;
110 struct iio_buffer *rb = ib->buffer;
111 struct iio_dev *indio_dev = ib->indio_dev;
112 DEFINE_WAIT_FUNC(wait, woken_wake_function);
117 if (!indio_dev->info)
120 if (!rb || !rb->access->read)
123 datum_size = rb->bytes_per_datum;
126 * If datum_size is 0 there will never be anything to read from the
127 * buffer, so signal end of file now.
132 if (filp->f_flags & O_NONBLOCK)
135 to_wait = min_t(size_t, n / datum_size, rb->watermark);
137 add_wait_queue(&rb->pollq, &wait);
139 if (!indio_dev->info) {
144 if (!iio_buffer_ready(indio_dev, rb, to_wait, n / datum_size)) {
145 if (signal_pending(current)) {
150 wait_woken(&wait, TASK_INTERRUPTIBLE,
151 MAX_SCHEDULE_TIMEOUT);
155 ret = rb->access->read(rb, n, buf);
156 if (ret == 0 && (filp->f_flags & O_NONBLOCK))
159 remove_wait_queue(&rb->pollq, &wait);
165 * iio_buffer_poll() - poll the buffer to find out if it has data
166 * @filp: File structure pointer for device access
167 * @wait: Poll table structure pointer for which the driver adds
170 * Return: (EPOLLIN | EPOLLRDNORM) if data is available for reading
171 * or 0 for other cases
173 static __poll_t iio_buffer_poll(struct file *filp,
174 struct poll_table_struct *wait)
176 struct iio_dev_buffer_pair *ib = filp->private_data;
177 struct iio_buffer *rb = ib->buffer;
178 struct iio_dev *indio_dev = ib->indio_dev;
180 if (!indio_dev->info || rb == NULL)
183 poll_wait(filp, &rb->pollq, wait);
184 if (iio_buffer_ready(indio_dev, rb, rb->watermark, 0))
185 return EPOLLIN | EPOLLRDNORM;
189 ssize_t iio_buffer_read_wrapper(struct file *filp, char __user *buf,
190 size_t n, loff_t *f_ps)
192 struct iio_dev_buffer_pair *ib = filp->private_data;
193 struct iio_buffer *rb = ib->buffer;
195 /* check if buffer was opened through new API */
196 if (test_bit(IIO_BUSY_BIT_POS, &rb->flags))
199 return iio_buffer_read(filp, buf, n, f_ps);
202 __poll_t iio_buffer_poll_wrapper(struct file *filp,
203 struct poll_table_struct *wait)
205 struct iio_dev_buffer_pair *ib = filp->private_data;
206 struct iio_buffer *rb = ib->buffer;
208 /* check if buffer was opened through new API */
209 if (test_bit(IIO_BUSY_BIT_POS, &rb->flags))
212 return iio_buffer_poll(filp, wait);
216 * iio_buffer_wakeup_poll - Wakes up the buffer waitqueue
217 * @indio_dev: The IIO device
219 * Wakes up the event waitqueue used for poll(). Should usually
220 * be called when the device is unregistered.
222 void iio_buffer_wakeup_poll(struct iio_dev *indio_dev)
224 struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
225 struct iio_buffer *buffer;
228 for (i = 0; i < iio_dev_opaque->attached_buffers_cnt; i++) {
229 buffer = iio_dev_opaque->attached_buffers[i];
230 wake_up(&buffer->pollq);
234 void iio_buffer_init(struct iio_buffer *buffer)
236 INIT_LIST_HEAD(&buffer->demux_list);
237 INIT_LIST_HEAD(&buffer->buffer_list);
238 init_waitqueue_head(&buffer->pollq);
239 kref_init(&buffer->ref);
240 if (!buffer->watermark)
241 buffer->watermark = 1;
243 EXPORT_SYMBOL(iio_buffer_init);
245 void iio_device_detach_buffers(struct iio_dev *indio_dev)
247 struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
248 struct iio_buffer *buffer;
251 for (i = 0; i < iio_dev_opaque->attached_buffers_cnt; i++) {
252 buffer = iio_dev_opaque->attached_buffers[i];
253 iio_buffer_put(buffer);
256 kfree(iio_dev_opaque->attached_buffers);
259 static ssize_t iio_show_scan_index(struct device *dev,
260 struct device_attribute *attr,
263 return sysfs_emit(buf, "%u\n", to_iio_dev_attr(attr)->c->scan_index);
266 static ssize_t iio_show_fixed_type(struct device *dev,
267 struct device_attribute *attr,
270 struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
271 u8 type = this_attr->c->scan_type.endianness;
273 if (type == IIO_CPU) {
274 #ifdef __LITTLE_ENDIAN
280 if (this_attr->c->scan_type.repeat > 1)
281 return sysfs_emit(buf, "%s:%c%d/%dX%d>>%u\n",
282 iio_endian_prefix[type],
283 this_attr->c->scan_type.sign,
284 this_attr->c->scan_type.realbits,
285 this_attr->c->scan_type.storagebits,
286 this_attr->c->scan_type.repeat,
287 this_attr->c->scan_type.shift);
289 return sysfs_emit(buf, "%s:%c%d/%d>>%u\n",
290 iio_endian_prefix[type],
291 this_attr->c->scan_type.sign,
292 this_attr->c->scan_type.realbits,
293 this_attr->c->scan_type.storagebits,
294 this_attr->c->scan_type.shift);
297 static ssize_t iio_scan_el_show(struct device *dev,
298 struct device_attribute *attr,
302 struct iio_buffer *buffer = to_iio_dev_attr(attr)->buffer;
304 /* Ensure ret is 0 or 1. */
305 ret = !!test_bit(to_iio_dev_attr(attr)->address,
308 return sysfs_emit(buf, "%d\n", ret);
311 /* Note NULL used as error indicator as it doesn't make sense. */
312 static const unsigned long *iio_scan_mask_match(const unsigned long *av_masks,
313 unsigned int masklength,
314 const unsigned long *mask,
317 if (bitmap_empty(mask, masklength))
321 if (bitmap_equal(mask, av_masks, masklength))
324 if (bitmap_subset(mask, av_masks, masklength))
327 av_masks += BITS_TO_LONGS(masklength);
332 static bool iio_validate_scan_mask(struct iio_dev *indio_dev,
333 const unsigned long *mask)
335 if (!indio_dev->setup_ops->validate_scan_mask)
338 return indio_dev->setup_ops->validate_scan_mask(indio_dev, mask);
342 * iio_scan_mask_set() - set particular bit in the scan mask
343 * @indio_dev: the iio device
344 * @buffer: the buffer whose scan mask we are interested in
345 * @bit: the bit to be set.
347 * Note that at this point we have no way of knowing what other
348 * buffers might request, hence this code only verifies that the
349 * individual buffers request is plausible.
351 static int iio_scan_mask_set(struct iio_dev *indio_dev,
352 struct iio_buffer *buffer, int bit)
354 const unsigned long *mask;
355 unsigned long *trialmask;
357 trialmask = bitmap_zalloc(indio_dev->masklength, GFP_KERNEL);
358 if (trialmask == NULL)
360 if (!indio_dev->masklength) {
361 WARN(1, "Trying to set scanmask prior to registering buffer\n");
362 goto err_invalid_mask;
364 bitmap_copy(trialmask, buffer->scan_mask, indio_dev->masklength);
365 set_bit(bit, trialmask);
367 if (!iio_validate_scan_mask(indio_dev, trialmask))
368 goto err_invalid_mask;
370 if (indio_dev->available_scan_masks) {
371 mask = iio_scan_mask_match(indio_dev->available_scan_masks,
372 indio_dev->masklength,
375 goto err_invalid_mask;
377 bitmap_copy(buffer->scan_mask, trialmask, indio_dev->masklength);
379 bitmap_free(trialmask);
384 bitmap_free(trialmask);
388 static int iio_scan_mask_clear(struct iio_buffer *buffer, int bit)
390 clear_bit(bit, buffer->scan_mask);
394 static int iio_scan_mask_query(struct iio_dev *indio_dev,
395 struct iio_buffer *buffer, int bit)
397 if (bit > indio_dev->masklength)
400 if (!buffer->scan_mask)
403 /* Ensure return value is 0 or 1. */
404 return !!test_bit(bit, buffer->scan_mask);
407 static ssize_t iio_scan_el_store(struct device *dev,
408 struct device_attribute *attr,
414 struct iio_dev *indio_dev = dev_to_iio_dev(dev);
415 struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
416 struct iio_buffer *buffer = this_attr->buffer;
418 ret = strtobool(buf, &state);
421 mutex_lock(&indio_dev->mlock);
422 if (iio_buffer_is_active(buffer)) {
426 ret = iio_scan_mask_query(indio_dev, buffer, this_attr->address);
430 ret = iio_scan_mask_clear(buffer, this_attr->address);
433 } else if (state && !ret) {
434 ret = iio_scan_mask_set(indio_dev, buffer, this_attr->address);
440 mutex_unlock(&indio_dev->mlock);
442 return ret < 0 ? ret : len;
446 static ssize_t iio_scan_el_ts_show(struct device *dev,
447 struct device_attribute *attr,
450 struct iio_buffer *buffer = to_iio_dev_attr(attr)->buffer;
452 return sysfs_emit(buf, "%d\n", buffer->scan_timestamp);
455 static ssize_t iio_scan_el_ts_store(struct device *dev,
456 struct device_attribute *attr,
461 struct iio_dev *indio_dev = dev_to_iio_dev(dev);
462 struct iio_buffer *buffer = to_iio_dev_attr(attr)->buffer;
465 ret = strtobool(buf, &state);
469 mutex_lock(&indio_dev->mlock);
470 if (iio_buffer_is_active(buffer)) {
474 buffer->scan_timestamp = state;
476 mutex_unlock(&indio_dev->mlock);
478 return ret ? ret : len;
481 static int iio_buffer_add_channel_sysfs(struct iio_dev *indio_dev,
482 struct iio_buffer *buffer,
483 const struct iio_chan_spec *chan)
485 int ret, attrcount = 0;
487 ret = __iio_add_chan_devattr("index",
489 &iio_show_scan_index,
495 &buffer->buffer_attr_list);
499 ret = __iio_add_chan_devattr("type",
501 &iio_show_fixed_type,
507 &buffer->buffer_attr_list);
511 if (chan->type != IIO_TIMESTAMP)
512 ret = __iio_add_chan_devattr("en",
520 &buffer->buffer_attr_list);
522 ret = __iio_add_chan_devattr("en",
524 &iio_scan_el_ts_show,
525 &iio_scan_el_ts_store,
530 &buffer->buffer_attr_list);
538 static ssize_t iio_buffer_read_length(struct device *dev,
539 struct device_attribute *attr,
542 struct iio_buffer *buffer = to_iio_dev_attr(attr)->buffer;
544 return sysfs_emit(buf, "%d\n", buffer->length);
547 static ssize_t iio_buffer_write_length(struct device *dev,
548 struct device_attribute *attr,
549 const char *buf, size_t len)
551 struct iio_dev *indio_dev = dev_to_iio_dev(dev);
552 struct iio_buffer *buffer = to_iio_dev_attr(attr)->buffer;
556 ret = kstrtouint(buf, 10, &val);
560 if (val == buffer->length)
563 mutex_lock(&indio_dev->mlock);
564 if (iio_buffer_is_active(buffer)) {
567 buffer->access->set_length(buffer, val);
572 if (buffer->length && buffer->length < buffer->watermark)
573 buffer->watermark = buffer->length;
575 mutex_unlock(&indio_dev->mlock);
577 return ret ? ret : len;
580 static ssize_t iio_buffer_show_enable(struct device *dev,
581 struct device_attribute *attr,
584 struct iio_buffer *buffer = to_iio_dev_attr(attr)->buffer;
586 return sysfs_emit(buf, "%d\n", iio_buffer_is_active(buffer));
589 static unsigned int iio_storage_bytes_for_si(struct iio_dev *indio_dev,
590 unsigned int scan_index)
592 const struct iio_chan_spec *ch;
595 ch = iio_find_channel_from_si(indio_dev, scan_index);
596 bytes = ch->scan_type.storagebits / 8;
597 if (ch->scan_type.repeat > 1)
598 bytes *= ch->scan_type.repeat;
602 static unsigned int iio_storage_bytes_for_timestamp(struct iio_dev *indio_dev)
604 struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
606 return iio_storage_bytes_for_si(indio_dev,
607 iio_dev_opaque->scan_index_timestamp);
610 static int iio_compute_scan_bytes(struct iio_dev *indio_dev,
611 const unsigned long *mask, bool timestamp)
614 int length, i, largest = 0;
616 /* How much space will the demuxed element take? */
617 for_each_set_bit(i, mask,
618 indio_dev->masklength) {
619 length = iio_storage_bytes_for_si(indio_dev, i);
620 bytes = ALIGN(bytes, length);
622 largest = max(largest, length);
626 length = iio_storage_bytes_for_timestamp(indio_dev);
627 bytes = ALIGN(bytes, length);
629 largest = max(largest, length);
632 bytes = ALIGN(bytes, largest);
636 static void iio_buffer_activate(struct iio_dev *indio_dev,
637 struct iio_buffer *buffer)
639 struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
641 iio_buffer_get(buffer);
642 list_add(&buffer->buffer_list, &iio_dev_opaque->buffer_list);
645 static void iio_buffer_deactivate(struct iio_buffer *buffer)
647 list_del_init(&buffer->buffer_list);
648 wake_up_interruptible(&buffer->pollq);
649 iio_buffer_put(buffer);
652 static void iio_buffer_deactivate_all(struct iio_dev *indio_dev)
654 struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
655 struct iio_buffer *buffer, *_buffer;
657 list_for_each_entry_safe(buffer, _buffer,
658 &iio_dev_opaque->buffer_list, buffer_list)
659 iio_buffer_deactivate(buffer);
662 static int iio_buffer_enable(struct iio_buffer *buffer,
663 struct iio_dev *indio_dev)
665 if (!buffer->access->enable)
667 return buffer->access->enable(buffer, indio_dev);
670 static int iio_buffer_disable(struct iio_buffer *buffer,
671 struct iio_dev *indio_dev)
673 if (!buffer->access->disable)
675 return buffer->access->disable(buffer, indio_dev);
678 static void iio_buffer_update_bytes_per_datum(struct iio_dev *indio_dev,
679 struct iio_buffer *buffer)
683 if (!buffer->access->set_bytes_per_datum)
686 bytes = iio_compute_scan_bytes(indio_dev, buffer->scan_mask,
687 buffer->scan_timestamp);
689 buffer->access->set_bytes_per_datum(buffer, bytes);
692 static int iio_buffer_request_update(struct iio_dev *indio_dev,
693 struct iio_buffer *buffer)
697 iio_buffer_update_bytes_per_datum(indio_dev, buffer);
698 if (buffer->access->request_update) {
699 ret = buffer->access->request_update(buffer);
701 dev_dbg(&indio_dev->dev,
702 "Buffer not started: buffer parameter update failed (%d)\n",
711 static void iio_free_scan_mask(struct iio_dev *indio_dev,
712 const unsigned long *mask)
714 /* If the mask is dynamically allocated free it, otherwise do nothing */
715 if (!indio_dev->available_scan_masks)
719 struct iio_device_config {
721 unsigned int watermark;
722 const unsigned long *scan_mask;
723 unsigned int scan_bytes;
727 static int iio_verify_update(struct iio_dev *indio_dev,
728 struct iio_buffer *insert_buffer, struct iio_buffer *remove_buffer,
729 struct iio_device_config *config)
731 struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
732 unsigned long *compound_mask;
733 const unsigned long *scan_mask;
734 bool strict_scanmask = false;
735 struct iio_buffer *buffer;
740 bitmap_empty(insert_buffer->scan_mask, indio_dev->masklength)) {
741 dev_dbg(&indio_dev->dev,
742 "At least one scan element must be enabled first\n");
746 memset(config, 0, sizeof(*config));
747 config->watermark = ~0;
750 * If there is just one buffer and we are removing it there is nothing
753 if (remove_buffer && !insert_buffer &&
754 list_is_singular(&iio_dev_opaque->buffer_list))
757 modes = indio_dev->modes;
759 list_for_each_entry(buffer, &iio_dev_opaque->buffer_list, buffer_list) {
760 if (buffer == remove_buffer)
762 modes &= buffer->access->modes;
763 config->watermark = min(config->watermark, buffer->watermark);
767 modes &= insert_buffer->access->modes;
768 config->watermark = min(config->watermark,
769 insert_buffer->watermark);
772 /* Definitely possible for devices to support both of these. */
773 if ((modes & INDIO_BUFFER_TRIGGERED) && indio_dev->trig) {
774 config->mode = INDIO_BUFFER_TRIGGERED;
775 } else if (modes & INDIO_BUFFER_HARDWARE) {
777 * Keep things simple for now and only allow a single buffer to
778 * be connected in hardware mode.
780 if (insert_buffer && !list_empty(&iio_dev_opaque->buffer_list))
782 config->mode = INDIO_BUFFER_HARDWARE;
783 strict_scanmask = true;
784 } else if (modes & INDIO_BUFFER_SOFTWARE) {
785 config->mode = INDIO_BUFFER_SOFTWARE;
787 /* Can only occur on first buffer */
788 if (indio_dev->modes & INDIO_BUFFER_TRIGGERED)
789 dev_dbg(&indio_dev->dev, "Buffer not started: no trigger\n");
793 /* What scan mask do we actually have? */
794 compound_mask = bitmap_zalloc(indio_dev->masklength, GFP_KERNEL);
795 if (compound_mask == NULL)
798 scan_timestamp = false;
800 list_for_each_entry(buffer, &iio_dev_opaque->buffer_list, buffer_list) {
801 if (buffer == remove_buffer)
803 bitmap_or(compound_mask, compound_mask, buffer->scan_mask,
804 indio_dev->masklength);
805 scan_timestamp |= buffer->scan_timestamp;
809 bitmap_or(compound_mask, compound_mask,
810 insert_buffer->scan_mask, indio_dev->masklength);
811 scan_timestamp |= insert_buffer->scan_timestamp;
814 if (indio_dev->available_scan_masks) {
815 scan_mask = iio_scan_mask_match(indio_dev->available_scan_masks,
816 indio_dev->masklength,
819 bitmap_free(compound_mask);
820 if (scan_mask == NULL)
823 scan_mask = compound_mask;
826 config->scan_bytes = iio_compute_scan_bytes(indio_dev,
827 scan_mask, scan_timestamp);
828 config->scan_mask = scan_mask;
829 config->scan_timestamp = scan_timestamp;
835 * struct iio_demux_table - table describing demux memcpy ops
836 * @from: index to copy from
837 * @to: index to copy to
838 * @length: how many bytes to copy
839 * @l: list head used for management
841 struct iio_demux_table {
848 static void iio_buffer_demux_free(struct iio_buffer *buffer)
850 struct iio_demux_table *p, *q;
851 list_for_each_entry_safe(p, q, &buffer->demux_list, l) {
857 static int iio_buffer_add_demux(struct iio_buffer *buffer,
858 struct iio_demux_table **p, unsigned int in_loc, unsigned int out_loc,
862 if (*p && (*p)->from + (*p)->length == in_loc &&
863 (*p)->to + (*p)->length == out_loc) {
864 (*p)->length += length;
866 *p = kmalloc(sizeof(**p), GFP_KERNEL);
871 (*p)->length = length;
872 list_add_tail(&(*p)->l, &buffer->demux_list);
878 static int iio_buffer_update_demux(struct iio_dev *indio_dev,
879 struct iio_buffer *buffer)
881 int ret, in_ind = -1, out_ind, length;
882 unsigned in_loc = 0, out_loc = 0;
883 struct iio_demux_table *p = NULL;
885 /* Clear out any old demux */
886 iio_buffer_demux_free(buffer);
887 kfree(buffer->demux_bounce);
888 buffer->demux_bounce = NULL;
890 /* First work out which scan mode we will actually have */
891 if (bitmap_equal(indio_dev->active_scan_mask,
893 indio_dev->masklength))
896 /* Now we have the two masks, work from least sig and build up sizes */
897 for_each_set_bit(out_ind,
899 indio_dev->masklength) {
900 in_ind = find_next_bit(indio_dev->active_scan_mask,
901 indio_dev->masklength,
903 while (in_ind != out_ind) {
904 length = iio_storage_bytes_for_si(indio_dev, in_ind);
905 /* Make sure we are aligned */
906 in_loc = roundup(in_loc, length) + length;
907 in_ind = find_next_bit(indio_dev->active_scan_mask,
908 indio_dev->masklength,
911 length = iio_storage_bytes_for_si(indio_dev, in_ind);
912 out_loc = roundup(out_loc, length);
913 in_loc = roundup(in_loc, length);
914 ret = iio_buffer_add_demux(buffer, &p, in_loc, out_loc, length);
916 goto error_clear_mux_table;
920 /* Relies on scan_timestamp being last */
921 if (buffer->scan_timestamp) {
922 length = iio_storage_bytes_for_timestamp(indio_dev);
923 out_loc = roundup(out_loc, length);
924 in_loc = roundup(in_loc, length);
925 ret = iio_buffer_add_demux(buffer, &p, in_loc, out_loc, length);
927 goto error_clear_mux_table;
930 buffer->demux_bounce = kzalloc(out_loc, GFP_KERNEL);
931 if (buffer->demux_bounce == NULL) {
933 goto error_clear_mux_table;
937 error_clear_mux_table:
938 iio_buffer_demux_free(buffer);
943 static int iio_update_demux(struct iio_dev *indio_dev)
945 struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
946 struct iio_buffer *buffer;
949 list_for_each_entry(buffer, &iio_dev_opaque->buffer_list, buffer_list) {
950 ret = iio_buffer_update_demux(indio_dev, buffer);
952 goto error_clear_mux_table;
956 error_clear_mux_table:
957 list_for_each_entry(buffer, &iio_dev_opaque->buffer_list, buffer_list)
958 iio_buffer_demux_free(buffer);
963 static int iio_enable_buffers(struct iio_dev *indio_dev,
964 struct iio_device_config *config)
966 struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
967 struct iio_buffer *buffer;
970 indio_dev->active_scan_mask = config->scan_mask;
971 indio_dev->scan_timestamp = config->scan_timestamp;
972 indio_dev->scan_bytes = config->scan_bytes;
973 indio_dev->currentmode = config->mode;
975 iio_update_demux(indio_dev);
978 if (indio_dev->setup_ops->preenable) {
979 ret = indio_dev->setup_ops->preenable(indio_dev);
981 dev_dbg(&indio_dev->dev,
982 "Buffer not started: buffer preenable failed (%d)\n", ret);
983 goto err_undo_config;
987 if (indio_dev->info->update_scan_mode) {
988 ret = indio_dev->info
989 ->update_scan_mode(indio_dev,
990 indio_dev->active_scan_mask);
992 dev_dbg(&indio_dev->dev,
993 "Buffer not started: update scan mode failed (%d)\n",
995 goto err_run_postdisable;
999 if (indio_dev->info->hwfifo_set_watermark)
1000 indio_dev->info->hwfifo_set_watermark(indio_dev,
1003 list_for_each_entry(buffer, &iio_dev_opaque->buffer_list, buffer_list) {
1004 ret = iio_buffer_enable(buffer, indio_dev);
1006 goto err_disable_buffers;
1009 if (indio_dev->currentmode == INDIO_BUFFER_TRIGGERED) {
1010 ret = iio_trigger_attach_poll_func(indio_dev->trig,
1011 indio_dev->pollfunc);
1013 goto err_disable_buffers;
1016 if (indio_dev->setup_ops->postenable) {
1017 ret = indio_dev->setup_ops->postenable(indio_dev);
1019 dev_dbg(&indio_dev->dev,
1020 "Buffer not started: postenable failed (%d)\n", ret);
1021 goto err_detach_pollfunc;
1027 err_detach_pollfunc:
1028 if (indio_dev->currentmode == INDIO_BUFFER_TRIGGERED) {
1029 iio_trigger_detach_poll_func(indio_dev->trig,
1030 indio_dev->pollfunc);
1032 err_disable_buffers:
1033 list_for_each_entry_continue_reverse(buffer, &iio_dev_opaque->buffer_list,
1035 iio_buffer_disable(buffer, indio_dev);
1036 err_run_postdisable:
1037 if (indio_dev->setup_ops->postdisable)
1038 indio_dev->setup_ops->postdisable(indio_dev);
1040 indio_dev->currentmode = INDIO_DIRECT_MODE;
1041 indio_dev->active_scan_mask = NULL;
1046 static int iio_disable_buffers(struct iio_dev *indio_dev)
1048 struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
1049 struct iio_buffer *buffer;
1053 /* Wind down existing buffers - iff there are any */
1054 if (list_empty(&iio_dev_opaque->buffer_list))
1058 * If things go wrong at some step in disable we still need to continue
1059 * to perform the other steps, otherwise we leave the device in a
1060 * inconsistent state. We return the error code for the first error we
1064 if (indio_dev->setup_ops->predisable) {
1065 ret2 = indio_dev->setup_ops->predisable(indio_dev);
1070 if (indio_dev->currentmode == INDIO_BUFFER_TRIGGERED) {
1071 iio_trigger_detach_poll_func(indio_dev->trig,
1072 indio_dev->pollfunc);
1075 list_for_each_entry(buffer, &iio_dev_opaque->buffer_list, buffer_list) {
1076 ret2 = iio_buffer_disable(buffer, indio_dev);
1081 if (indio_dev->setup_ops->postdisable) {
1082 ret2 = indio_dev->setup_ops->postdisable(indio_dev);
1087 iio_free_scan_mask(indio_dev, indio_dev->active_scan_mask);
1088 indio_dev->active_scan_mask = NULL;
1089 indio_dev->currentmode = INDIO_DIRECT_MODE;
1094 static int __iio_update_buffers(struct iio_dev *indio_dev,
1095 struct iio_buffer *insert_buffer,
1096 struct iio_buffer *remove_buffer)
1098 struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
1099 struct iio_device_config new_config;
1102 ret = iio_verify_update(indio_dev, insert_buffer, remove_buffer,
1107 if (insert_buffer) {
1108 ret = iio_buffer_request_update(indio_dev, insert_buffer);
1110 goto err_free_config;
1113 ret = iio_disable_buffers(indio_dev);
1115 goto err_deactivate_all;
1118 iio_buffer_deactivate(remove_buffer);
1120 iio_buffer_activate(indio_dev, insert_buffer);
1122 /* If no buffers in list, we are done */
1123 if (list_empty(&iio_dev_opaque->buffer_list))
1126 ret = iio_enable_buffers(indio_dev, &new_config);
1128 goto err_deactivate_all;
1134 * We've already verified that the config is valid earlier. If things go
1135 * wrong in either enable or disable the most likely reason is an IO
1136 * error from the device. In this case there is no good recovery
1137 * strategy. Just make sure to disable everything and leave the device
1138 * in a sane state. With a bit of luck the device might come back to
1139 * life again later and userspace can try again.
1141 iio_buffer_deactivate_all(indio_dev);
1144 iio_free_scan_mask(indio_dev, new_config.scan_mask);
1148 int iio_update_buffers(struct iio_dev *indio_dev,
1149 struct iio_buffer *insert_buffer,
1150 struct iio_buffer *remove_buffer)
1152 struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
1155 if (insert_buffer == remove_buffer)
1158 mutex_lock(&iio_dev_opaque->info_exist_lock);
1159 mutex_lock(&indio_dev->mlock);
1161 if (insert_buffer && iio_buffer_is_active(insert_buffer))
1162 insert_buffer = NULL;
1164 if (remove_buffer && !iio_buffer_is_active(remove_buffer))
1165 remove_buffer = NULL;
1167 if (!insert_buffer && !remove_buffer) {
1172 if (indio_dev->info == NULL) {
1177 ret = __iio_update_buffers(indio_dev, insert_buffer, remove_buffer);
1180 mutex_unlock(&indio_dev->mlock);
1181 mutex_unlock(&iio_dev_opaque->info_exist_lock);
1185 EXPORT_SYMBOL_GPL(iio_update_buffers);
1187 void iio_disable_all_buffers(struct iio_dev *indio_dev)
1189 iio_disable_buffers(indio_dev);
1190 iio_buffer_deactivate_all(indio_dev);
1193 static ssize_t iio_buffer_store_enable(struct device *dev,
1194 struct device_attribute *attr,
1199 bool requested_state;
1200 struct iio_dev *indio_dev = dev_to_iio_dev(dev);
1201 struct iio_buffer *buffer = to_iio_dev_attr(attr)->buffer;
1204 ret = strtobool(buf, &requested_state);
1208 mutex_lock(&indio_dev->mlock);
1210 /* Find out if it is in the list */
1211 inlist = iio_buffer_is_active(buffer);
1212 /* Already in desired state */
1213 if (inlist == requested_state)
1216 if (requested_state)
1217 ret = __iio_update_buffers(indio_dev, buffer, NULL);
1219 ret = __iio_update_buffers(indio_dev, NULL, buffer);
1222 mutex_unlock(&indio_dev->mlock);
1223 return (ret < 0) ? ret : len;
1226 static ssize_t iio_buffer_show_watermark(struct device *dev,
1227 struct device_attribute *attr,
1230 struct iio_buffer *buffer = to_iio_dev_attr(attr)->buffer;
1232 return sysfs_emit(buf, "%u\n", buffer->watermark);
1235 static ssize_t iio_buffer_store_watermark(struct device *dev,
1236 struct device_attribute *attr,
1240 struct iio_dev *indio_dev = dev_to_iio_dev(dev);
1241 struct iio_buffer *buffer = to_iio_dev_attr(attr)->buffer;
1245 ret = kstrtouint(buf, 10, &val);
1251 mutex_lock(&indio_dev->mlock);
1253 if (val > buffer->length) {
1258 if (iio_buffer_is_active(buffer)) {
1263 buffer->watermark = val;
1265 mutex_unlock(&indio_dev->mlock);
1267 return ret ? ret : len;
1270 static ssize_t iio_dma_show_data_available(struct device *dev,
1271 struct device_attribute *attr,
1274 struct iio_buffer *buffer = to_iio_dev_attr(attr)->buffer;
1276 return sysfs_emit(buf, "%zu\n", iio_buffer_data_available(buffer));
1279 static DEVICE_ATTR(length, S_IRUGO | S_IWUSR, iio_buffer_read_length,
1280 iio_buffer_write_length);
1281 static struct device_attribute dev_attr_length_ro = __ATTR(length,
1282 S_IRUGO, iio_buffer_read_length, NULL);
1283 static DEVICE_ATTR(enable, S_IRUGO | S_IWUSR,
1284 iio_buffer_show_enable, iio_buffer_store_enable);
1285 static DEVICE_ATTR(watermark, S_IRUGO | S_IWUSR,
1286 iio_buffer_show_watermark, iio_buffer_store_watermark);
1287 static struct device_attribute dev_attr_watermark_ro = __ATTR(watermark,
1288 S_IRUGO, iio_buffer_show_watermark, NULL);
1289 static DEVICE_ATTR(data_available, S_IRUGO,
1290 iio_dma_show_data_available, NULL);
1292 static struct attribute *iio_buffer_attrs[] = {
1293 &dev_attr_length.attr,
1294 &dev_attr_enable.attr,
1295 &dev_attr_watermark.attr,
1296 &dev_attr_data_available.attr,
1299 #define to_dev_attr(_attr) container_of(_attr, struct device_attribute, attr)
1301 static struct attribute *iio_buffer_wrap_attr(struct iio_buffer *buffer,
1302 struct attribute *attr)
1304 struct device_attribute *dattr = to_dev_attr(attr);
1305 struct iio_dev_attr *iio_attr;
1307 iio_attr = kzalloc(sizeof(*iio_attr), GFP_KERNEL);
1311 iio_attr->buffer = buffer;
1312 memcpy(&iio_attr->dev_attr, dattr, sizeof(iio_attr->dev_attr));
1313 iio_attr->dev_attr.attr.name = kstrdup_const(attr->name, GFP_KERNEL);
1314 sysfs_attr_init(&iio_attr->dev_attr.attr);
1316 list_add(&iio_attr->l, &buffer->buffer_attr_list);
1318 return &iio_attr->dev_attr.attr;
1321 static int iio_buffer_register_legacy_sysfs_groups(struct iio_dev *indio_dev,
1322 struct attribute **buffer_attrs,
1323 int buffer_attrcount,
1324 int scan_el_attrcount)
1326 struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
1327 struct attribute_group *group;
1328 struct attribute **attrs;
1331 attrs = kcalloc(buffer_attrcount + 1, sizeof(*attrs), GFP_KERNEL);
1335 memcpy(attrs, buffer_attrs, buffer_attrcount * sizeof(*attrs));
1337 group = &iio_dev_opaque->legacy_buffer_group;
1338 group->attrs = attrs;
1339 group->name = "buffer";
1341 ret = iio_device_register_sysfs_group(indio_dev, group);
1343 goto error_free_buffer_attrs;
1345 attrs = kcalloc(scan_el_attrcount + 1, sizeof(*attrs), GFP_KERNEL);
1348 goto error_free_buffer_attrs;
1351 memcpy(attrs, &buffer_attrs[buffer_attrcount],
1352 scan_el_attrcount * sizeof(*attrs));
1354 group = &iio_dev_opaque->legacy_scan_el_group;
1355 group->attrs = attrs;
1356 group->name = "scan_elements";
1358 ret = iio_device_register_sysfs_group(indio_dev, group);
1360 goto error_free_scan_el_attrs;
1364 error_free_buffer_attrs:
1365 kfree(iio_dev_opaque->legacy_buffer_group.attrs);
1366 error_free_scan_el_attrs:
1367 kfree(iio_dev_opaque->legacy_scan_el_group.attrs);
1372 static void iio_buffer_unregister_legacy_sysfs_groups(struct iio_dev *indio_dev)
1374 struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
1376 kfree(iio_dev_opaque->legacy_buffer_group.attrs);
1377 kfree(iio_dev_opaque->legacy_scan_el_group.attrs);
1380 static int iio_buffer_chrdev_release(struct inode *inode, struct file *filep)
1382 struct iio_dev_buffer_pair *ib = filep->private_data;
1383 struct iio_dev *indio_dev = ib->indio_dev;
1384 struct iio_buffer *buffer = ib->buffer;
1386 wake_up(&buffer->pollq);
1389 clear_bit(IIO_BUSY_BIT_POS, &buffer->flags);
1390 iio_device_put(indio_dev);
1395 static const struct file_operations iio_buffer_chrdev_fileops = {
1396 .owner = THIS_MODULE,
1397 .llseek = noop_llseek,
1398 .read = iio_buffer_read,
1399 .poll = iio_buffer_poll,
1400 .release = iio_buffer_chrdev_release,
1403 static long iio_device_buffer_getfd(struct iio_dev *indio_dev, unsigned long arg)
1405 struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
1406 int __user *ival = (int __user *)arg;
1407 struct iio_dev_buffer_pair *ib;
1408 struct iio_buffer *buffer;
1411 if (copy_from_user(&idx, ival, sizeof(idx)))
1414 if (idx >= iio_dev_opaque->attached_buffers_cnt)
1417 iio_device_get(indio_dev);
1419 buffer = iio_dev_opaque->attached_buffers[idx];
1421 if (test_and_set_bit(IIO_BUSY_BIT_POS, &buffer->flags)) {
1423 goto error_iio_dev_put;
1426 ib = kzalloc(sizeof(*ib), GFP_KERNEL);
1429 goto error_clear_busy_bit;
1432 ib->indio_dev = indio_dev;
1433 ib->buffer = buffer;
1435 fd = anon_inode_getfd("iio:buffer", &iio_buffer_chrdev_fileops,
1436 ib, O_RDWR | O_CLOEXEC);
1442 if (copy_to_user(ival, &fd, sizeof(fd))) {
1452 error_clear_busy_bit:
1453 clear_bit(IIO_BUSY_BIT_POS, &buffer->flags);
1455 iio_device_put(indio_dev);
1459 static long iio_device_buffer_ioctl(struct iio_dev *indio_dev, struct file *filp,
1460 unsigned int cmd, unsigned long arg)
1463 case IIO_BUFFER_GET_FD_IOCTL:
1464 return iio_device_buffer_getfd(indio_dev, arg);
1466 return IIO_IOCTL_UNHANDLED;
1470 static int __iio_buffer_alloc_sysfs_and_mask(struct iio_buffer *buffer,
1471 struct iio_dev *indio_dev,
1474 struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
1475 struct iio_dev_attr *p;
1476 struct attribute **attr;
1477 int ret, i, attrn, scan_el_attrcount, buffer_attrcount;
1478 const struct iio_chan_spec *channels;
1480 buffer_attrcount = 0;
1481 if (buffer->attrs) {
1482 while (buffer->attrs[buffer_attrcount] != NULL)
1486 scan_el_attrcount = 0;
1487 INIT_LIST_HEAD(&buffer->buffer_attr_list);
1488 channels = indio_dev->channels;
1491 for (i = 0; i < indio_dev->num_channels; i++) {
1492 if (channels[i].scan_index < 0)
1495 ret = iio_buffer_add_channel_sysfs(indio_dev, buffer,
1498 goto error_cleanup_dynamic;
1499 scan_el_attrcount += ret;
1500 if (channels[i].type == IIO_TIMESTAMP)
1501 iio_dev_opaque->scan_index_timestamp =
1502 channels[i].scan_index;
1504 if (indio_dev->masklength && buffer->scan_mask == NULL) {
1505 buffer->scan_mask = bitmap_zalloc(indio_dev->masklength,
1507 if (buffer->scan_mask == NULL) {
1509 goto error_cleanup_dynamic;
1514 attrn = buffer_attrcount + scan_el_attrcount + ARRAY_SIZE(iio_buffer_attrs);
1515 attr = kcalloc(attrn + 1, sizeof(* attr), GFP_KERNEL);
1518 goto error_free_scan_mask;
1521 memcpy(attr, iio_buffer_attrs, sizeof(iio_buffer_attrs));
1522 if (!buffer->access->set_length)
1523 attr[0] = &dev_attr_length_ro.attr;
1525 if (buffer->access->flags & INDIO_BUFFER_FLAG_FIXED_WATERMARK)
1526 attr[2] = &dev_attr_watermark_ro.attr;
1529 memcpy(&attr[ARRAY_SIZE(iio_buffer_attrs)], buffer->attrs,
1530 sizeof(struct attribute *) * buffer_attrcount);
1532 buffer_attrcount += ARRAY_SIZE(iio_buffer_attrs);
1534 for (i = 0; i < buffer_attrcount; i++) {
1535 struct attribute *wrapped;
1537 wrapped = iio_buffer_wrap_attr(buffer, attr[i]);
1540 goto error_free_scan_mask;
1546 list_for_each_entry(p, &buffer->buffer_attr_list, l)
1547 attr[attrn++] = &p->dev_attr.attr;
1549 buffer->buffer_group.name = kasprintf(GFP_KERNEL, "buffer%d", index);
1550 if (!buffer->buffer_group.name) {
1552 goto error_free_buffer_attrs;
1555 buffer->buffer_group.attrs = attr;
1557 ret = iio_device_register_sysfs_group(indio_dev, &buffer->buffer_group);
1559 goto error_free_buffer_attr_group_name;
1561 /* we only need to register the legacy groups for the first buffer */
1565 ret = iio_buffer_register_legacy_sysfs_groups(indio_dev, attr,
1569 goto error_free_buffer_attr_group_name;
1573 error_free_buffer_attr_group_name:
1574 kfree(buffer->buffer_group.name);
1575 error_free_buffer_attrs:
1576 kfree(buffer->buffer_group.attrs);
1577 error_free_scan_mask:
1578 bitmap_free(buffer->scan_mask);
1579 error_cleanup_dynamic:
1580 iio_free_chan_devattr_list(&buffer->buffer_attr_list);
1585 static void __iio_buffer_free_sysfs_and_mask(struct iio_buffer *buffer)
1587 bitmap_free(buffer->scan_mask);
1588 kfree(buffer->buffer_group.name);
1589 kfree(buffer->buffer_group.attrs);
1590 iio_free_chan_devattr_list(&buffer->buffer_attr_list);
1593 int iio_buffers_alloc_sysfs_and_mask(struct iio_dev *indio_dev)
1595 struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
1596 const struct iio_chan_spec *channels;
1597 struct iio_buffer *buffer;
1602 channels = indio_dev->channels;
1604 int ml = indio_dev->masklength;
1606 for (i = 0; i < indio_dev->num_channels; i++)
1607 ml = max(ml, channels[i].scan_index + 1);
1608 indio_dev->masklength = ml;
1611 if (!iio_dev_opaque->attached_buffers_cnt)
1614 for (i = 0; i < iio_dev_opaque->attached_buffers_cnt; i++) {
1615 buffer = iio_dev_opaque->attached_buffers[i];
1616 ret = __iio_buffer_alloc_sysfs_and_mask(buffer, indio_dev, i);
1619 goto error_unwind_sysfs_and_mask;
1622 unwind_idx = iio_dev_opaque->attached_buffers_cnt - 1;
1624 sz = sizeof(*(iio_dev_opaque->buffer_ioctl_handler));
1625 iio_dev_opaque->buffer_ioctl_handler = kzalloc(sz, GFP_KERNEL);
1626 if (!iio_dev_opaque->buffer_ioctl_handler) {
1628 goto error_unwind_sysfs_and_mask;
1631 iio_dev_opaque->buffer_ioctl_handler->ioctl = iio_device_buffer_ioctl;
1632 iio_device_ioctl_handler_register(indio_dev,
1633 iio_dev_opaque->buffer_ioctl_handler);
1637 error_unwind_sysfs_and_mask:
1638 for (; unwind_idx >= 0; unwind_idx--) {
1639 buffer = iio_dev_opaque->attached_buffers[unwind_idx];
1640 __iio_buffer_free_sysfs_and_mask(buffer);
1645 void iio_buffers_free_sysfs_and_mask(struct iio_dev *indio_dev)
1647 struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
1648 struct iio_buffer *buffer;
1651 if (!iio_dev_opaque->attached_buffers_cnt)
1654 iio_device_ioctl_handler_unregister(iio_dev_opaque->buffer_ioctl_handler);
1655 kfree(iio_dev_opaque->buffer_ioctl_handler);
1657 iio_buffer_unregister_legacy_sysfs_groups(indio_dev);
1659 for (i = iio_dev_opaque->attached_buffers_cnt - 1; i >= 0; i--) {
1660 buffer = iio_dev_opaque->attached_buffers[i];
1661 __iio_buffer_free_sysfs_and_mask(buffer);
1666 * iio_validate_scan_mask_onehot() - Validates that exactly one channel is selected
1667 * @indio_dev: the iio device
1668 * @mask: scan mask to be checked
1670 * Return true if exactly one bit is set in the scan mask, false otherwise. It
1671 * can be used for devices where only one channel can be active for sampling at
1674 bool iio_validate_scan_mask_onehot(struct iio_dev *indio_dev,
1675 const unsigned long *mask)
1677 return bitmap_weight(mask, indio_dev->masklength) == 1;
1679 EXPORT_SYMBOL_GPL(iio_validate_scan_mask_onehot);
1681 static const void *iio_demux(struct iio_buffer *buffer,
1684 struct iio_demux_table *t;
1686 if (list_empty(&buffer->demux_list))
1688 list_for_each_entry(t, &buffer->demux_list, l)
1689 memcpy(buffer->demux_bounce + t->to,
1690 datain + t->from, t->length);
1692 return buffer->demux_bounce;
1695 static int iio_push_to_buffer(struct iio_buffer *buffer, const void *data)
1697 const void *dataout = iio_demux(buffer, data);
1700 ret = buffer->access->store_to(buffer, dataout);
1705 * We can't just test for watermark to decide if we wake the poll queue
1706 * because read may request less samples than the watermark.
1708 wake_up_interruptible_poll(&buffer->pollq, EPOLLIN | EPOLLRDNORM);
1713 * iio_push_to_buffers() - push to a registered buffer.
1714 * @indio_dev: iio_dev structure for device.
1717 int iio_push_to_buffers(struct iio_dev *indio_dev, const void *data)
1719 struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
1721 struct iio_buffer *buf;
1723 list_for_each_entry(buf, &iio_dev_opaque->buffer_list, buffer_list) {
1724 ret = iio_push_to_buffer(buf, data);
1731 EXPORT_SYMBOL_GPL(iio_push_to_buffers);
1734 * iio_buffer_release() - Free a buffer's resources
1735 * @ref: Pointer to the kref embedded in the iio_buffer struct
1737 * This function is called when the last reference to the buffer has been
1738 * dropped. It will typically free all resources allocated by the buffer. Do not
1739 * call this function manually, always use iio_buffer_put() when done using a
1742 static void iio_buffer_release(struct kref *ref)
1744 struct iio_buffer *buffer = container_of(ref, struct iio_buffer, ref);
1746 buffer->access->release(buffer);
1750 * iio_buffer_get() - Grab a reference to the buffer
1751 * @buffer: The buffer to grab a reference for, may be NULL
1753 * Returns the pointer to the buffer that was passed into the function.
1755 struct iio_buffer *iio_buffer_get(struct iio_buffer *buffer)
1758 kref_get(&buffer->ref);
1762 EXPORT_SYMBOL_GPL(iio_buffer_get);
1765 * iio_buffer_put() - Release the reference to the buffer
1766 * @buffer: The buffer to release the reference for, may be NULL
1768 void iio_buffer_put(struct iio_buffer *buffer)
1771 kref_put(&buffer->ref, iio_buffer_release);
1773 EXPORT_SYMBOL_GPL(iio_buffer_put);
1776 * iio_device_attach_buffer - Attach a buffer to a IIO device
1777 * @indio_dev: The device the buffer should be attached to
1778 * @buffer: The buffer to attach to the device
1780 * Return 0 if successful, negative if error.
1782 * This function attaches a buffer to a IIO device. The buffer stays attached to
1783 * the device until the device is freed. For legacy reasons, the first attached
1784 * buffer will also be assigned to 'indio_dev->buffer'.
1785 * The array allocated here, will be free'd via the iio_device_detach_buffers()
1786 * call which is handled by the iio_device_free().
1788 int iio_device_attach_buffer(struct iio_dev *indio_dev,
1789 struct iio_buffer *buffer)
1791 struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
1792 struct iio_buffer **new, **old = iio_dev_opaque->attached_buffers;
1793 unsigned int cnt = iio_dev_opaque->attached_buffers_cnt;
1797 new = krealloc(old, sizeof(*new) * cnt, GFP_KERNEL);
1800 iio_dev_opaque->attached_buffers = new;
1802 buffer = iio_buffer_get(buffer);
1804 /* first buffer is legacy; attach it to the IIO device directly */
1805 if (!indio_dev->buffer)
1806 indio_dev->buffer = buffer;
1808 iio_dev_opaque->attached_buffers[cnt - 1] = buffer;
1809 iio_dev_opaque->attached_buffers_cnt = cnt;
1813 EXPORT_SYMBOL_GPL(iio_device_attach_buffer);