RDMA/mlx5: Fix query DCT via DEVX
[linux-2.6-microblaze.git] / drivers / iio / industrialio-buffer.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* The industrial I/O core
3  *
4  * Copyright (c) 2008 Jonathan Cameron
5  *
6  * Handling of buffer allocation / resizing.
7  *
8  * Things to look at here.
9  * - Better memory allocation techniques?
10  * - Alternative access techniques?
11  */
12 #include <linux/anon_inodes.h>
13 #include <linux/kernel.h>
14 #include <linux/export.h>
15 #include <linux/device.h>
16 #include <linux/file.h>
17 #include <linux/fs.h>
18 #include <linux/cdev.h>
19 #include <linux/slab.h>
20 #include <linux/poll.h>
21 #include <linux/sched/signal.h>
22
23 #include <linux/iio/iio.h>
24 #include <linux/iio/iio-opaque.h>
25 #include "iio_core.h"
26 #include "iio_core_trigger.h"
27 #include <linux/iio/sysfs.h>
28 #include <linux/iio/buffer.h>
29 #include <linux/iio/buffer_impl.h>
30
31 static const char * const iio_endian_prefix[] = {
32         [IIO_BE] = "be",
33         [IIO_LE] = "le",
34 };
35
36 static bool iio_buffer_is_active(struct iio_buffer *buf)
37 {
38         return !list_empty(&buf->buffer_list);
39 }
40
41 static size_t iio_buffer_data_available(struct iio_buffer *buf)
42 {
43         return buf->access->data_available(buf);
44 }
45
46 static int iio_buffer_flush_hwfifo(struct iio_dev *indio_dev,
47                                    struct iio_buffer *buf, size_t required)
48 {
49         if (!indio_dev->info->hwfifo_flush_to_buffer)
50                 return -ENODEV;
51
52         return indio_dev->info->hwfifo_flush_to_buffer(indio_dev, required);
53 }
54
55 static bool iio_buffer_ready(struct iio_dev *indio_dev, struct iio_buffer *buf,
56                              size_t to_wait, int to_flush)
57 {
58         size_t avail;
59         int flushed = 0;
60
61         /* wakeup if the device was unregistered */
62         if (!indio_dev->info)
63                 return true;
64
65         /* drain the buffer if it was disabled */
66         if (!iio_buffer_is_active(buf)) {
67                 to_wait = min_t(size_t, to_wait, 1);
68                 to_flush = 0;
69         }
70
71         avail = iio_buffer_data_available(buf);
72
73         if (avail >= to_wait) {
74                 /* force a flush for non-blocking reads */
75                 if (!to_wait && avail < to_flush)
76                         iio_buffer_flush_hwfifo(indio_dev, buf,
77                                                 to_flush - avail);
78                 return true;
79         }
80
81         if (to_flush)
82                 flushed = iio_buffer_flush_hwfifo(indio_dev, buf,
83                                                   to_wait - avail);
84         if (flushed <= 0)
85                 return false;
86
87         if (avail + flushed >= to_wait)
88                 return true;
89
90         return false;
91 }
92
93 /**
94  * iio_buffer_read() - chrdev read for buffer access
95  * @filp:       File structure pointer for the char device
96  * @buf:        Destination buffer for iio buffer read
97  * @n:          First n bytes to read
98  * @f_ps:       Long offset provided by the user as a seek position
99  *
100  * This function relies on all buffer implementations having an
101  * iio_buffer as their first element.
102  *
103  * Return: negative values corresponding to error codes or ret != 0
104  *         for ending the reading activity
105  **/
106 static ssize_t iio_buffer_read(struct file *filp, char __user *buf,
107                                size_t n, loff_t *f_ps)
108 {
109         struct iio_dev_buffer_pair *ib = filp->private_data;
110         struct iio_buffer *rb = ib->buffer;
111         struct iio_dev *indio_dev = ib->indio_dev;
112         DEFINE_WAIT_FUNC(wait, woken_wake_function);
113         size_t datum_size;
114         size_t to_wait;
115         int ret = 0;
116
117         if (!indio_dev->info)
118                 return -ENODEV;
119
120         if (!rb || !rb->access->read)
121                 return -EINVAL;
122
123         datum_size = rb->bytes_per_datum;
124
125         /*
126          * If datum_size is 0 there will never be anything to read from the
127          * buffer, so signal end of file now.
128          */
129         if (!datum_size)
130                 return 0;
131
132         if (filp->f_flags & O_NONBLOCK)
133                 to_wait = 0;
134         else
135                 to_wait = min_t(size_t, n / datum_size, rb->watermark);
136
137         add_wait_queue(&rb->pollq, &wait);
138         do {
139                 if (!indio_dev->info) {
140                         ret = -ENODEV;
141                         break;
142                 }
143
144                 if (!iio_buffer_ready(indio_dev, rb, to_wait, n / datum_size)) {
145                         if (signal_pending(current)) {
146                                 ret = -ERESTARTSYS;
147                                 break;
148                         }
149
150                         wait_woken(&wait, TASK_INTERRUPTIBLE,
151                                    MAX_SCHEDULE_TIMEOUT);
152                         continue;
153                 }
154
155                 ret = rb->access->read(rb, n, buf);
156                 if (ret == 0 && (filp->f_flags & O_NONBLOCK))
157                         ret = -EAGAIN;
158         } while (ret == 0);
159         remove_wait_queue(&rb->pollq, &wait);
160
161         return ret;
162 }
163
164 /**
165  * iio_buffer_poll() - poll the buffer to find out if it has data
166  * @filp:       File structure pointer for device access
167  * @wait:       Poll table structure pointer for which the driver adds
168  *              a wait queue
169  *
170  * Return: (EPOLLIN | EPOLLRDNORM) if data is available for reading
171  *         or 0 for other cases
172  */
173 static __poll_t iio_buffer_poll(struct file *filp,
174                                 struct poll_table_struct *wait)
175 {
176         struct iio_dev_buffer_pair *ib = filp->private_data;
177         struct iio_buffer *rb = ib->buffer;
178         struct iio_dev *indio_dev = ib->indio_dev;
179
180         if (!indio_dev->info || rb == NULL)
181                 return 0;
182
183         poll_wait(filp, &rb->pollq, wait);
184         if (iio_buffer_ready(indio_dev, rb, rb->watermark, 0))
185                 return EPOLLIN | EPOLLRDNORM;
186         return 0;
187 }
188
189 ssize_t iio_buffer_read_wrapper(struct file *filp, char __user *buf,
190                                 size_t n, loff_t *f_ps)
191 {
192         struct iio_dev_buffer_pair *ib = filp->private_data;
193         struct iio_buffer *rb = ib->buffer;
194
195         /* check if buffer was opened through new API */
196         if (test_bit(IIO_BUSY_BIT_POS, &rb->flags))
197                 return -EBUSY;
198
199         return iio_buffer_read(filp, buf, n, f_ps);
200 }
201
202 __poll_t iio_buffer_poll_wrapper(struct file *filp,
203                                  struct poll_table_struct *wait)
204 {
205         struct iio_dev_buffer_pair *ib = filp->private_data;
206         struct iio_buffer *rb = ib->buffer;
207
208         /* check if buffer was opened through new API */
209         if (test_bit(IIO_BUSY_BIT_POS, &rb->flags))
210                 return 0;
211
212         return iio_buffer_poll(filp, wait);
213 }
214
215 /**
216  * iio_buffer_wakeup_poll - Wakes up the buffer waitqueue
217  * @indio_dev: The IIO device
218  *
219  * Wakes up the event waitqueue used for poll(). Should usually
220  * be called when the device is unregistered.
221  */
222 void iio_buffer_wakeup_poll(struct iio_dev *indio_dev)
223 {
224         struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
225         struct iio_buffer *buffer;
226         unsigned int i;
227
228         for (i = 0; i < iio_dev_opaque->attached_buffers_cnt; i++) {
229                 buffer = iio_dev_opaque->attached_buffers[i];
230                 wake_up(&buffer->pollq);
231         }
232 }
233
234 void iio_buffer_init(struct iio_buffer *buffer)
235 {
236         INIT_LIST_HEAD(&buffer->demux_list);
237         INIT_LIST_HEAD(&buffer->buffer_list);
238         init_waitqueue_head(&buffer->pollq);
239         kref_init(&buffer->ref);
240         if (!buffer->watermark)
241                 buffer->watermark = 1;
242 }
243 EXPORT_SYMBOL(iio_buffer_init);
244
245 void iio_device_detach_buffers(struct iio_dev *indio_dev)
246 {
247         struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
248         struct iio_buffer *buffer;
249         unsigned int i;
250
251         for (i = 0; i < iio_dev_opaque->attached_buffers_cnt; i++) {
252                 buffer = iio_dev_opaque->attached_buffers[i];
253                 iio_buffer_put(buffer);
254         }
255
256         kfree(iio_dev_opaque->attached_buffers);
257 }
258
259 static ssize_t iio_show_scan_index(struct device *dev,
260                                    struct device_attribute *attr,
261                                    char *buf)
262 {
263         return sysfs_emit(buf, "%u\n", to_iio_dev_attr(attr)->c->scan_index);
264 }
265
266 static ssize_t iio_show_fixed_type(struct device *dev,
267                                    struct device_attribute *attr,
268                                    char *buf)
269 {
270         struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
271         u8 type = this_attr->c->scan_type.endianness;
272
273         if (type == IIO_CPU) {
274 #ifdef __LITTLE_ENDIAN
275                 type = IIO_LE;
276 #else
277                 type = IIO_BE;
278 #endif
279         }
280         if (this_attr->c->scan_type.repeat > 1)
281                 return sysfs_emit(buf, "%s:%c%d/%dX%d>>%u\n",
282                        iio_endian_prefix[type],
283                        this_attr->c->scan_type.sign,
284                        this_attr->c->scan_type.realbits,
285                        this_attr->c->scan_type.storagebits,
286                        this_attr->c->scan_type.repeat,
287                        this_attr->c->scan_type.shift);
288         else
289                 return sysfs_emit(buf, "%s:%c%d/%d>>%u\n",
290                        iio_endian_prefix[type],
291                        this_attr->c->scan_type.sign,
292                        this_attr->c->scan_type.realbits,
293                        this_attr->c->scan_type.storagebits,
294                        this_attr->c->scan_type.shift);
295 }
296
297 static ssize_t iio_scan_el_show(struct device *dev,
298                                 struct device_attribute *attr,
299                                 char *buf)
300 {
301         int ret;
302         struct iio_buffer *buffer = to_iio_dev_attr(attr)->buffer;
303
304         /* Ensure ret is 0 or 1. */
305         ret = !!test_bit(to_iio_dev_attr(attr)->address,
306                        buffer->scan_mask);
307
308         return sysfs_emit(buf, "%d\n", ret);
309 }
310
311 /* Note NULL used as error indicator as it doesn't make sense. */
312 static const unsigned long *iio_scan_mask_match(const unsigned long *av_masks,
313                                           unsigned int masklength,
314                                           const unsigned long *mask,
315                                           bool strict)
316 {
317         if (bitmap_empty(mask, masklength))
318                 return NULL;
319         while (*av_masks) {
320                 if (strict) {
321                         if (bitmap_equal(mask, av_masks, masklength))
322                                 return av_masks;
323                 } else {
324                         if (bitmap_subset(mask, av_masks, masklength))
325                                 return av_masks;
326                 }
327                 av_masks += BITS_TO_LONGS(masklength);
328         }
329         return NULL;
330 }
331
332 static bool iio_validate_scan_mask(struct iio_dev *indio_dev,
333         const unsigned long *mask)
334 {
335         if (!indio_dev->setup_ops->validate_scan_mask)
336                 return true;
337
338         return indio_dev->setup_ops->validate_scan_mask(indio_dev, mask);
339 }
340
341 /**
342  * iio_scan_mask_set() - set particular bit in the scan mask
343  * @indio_dev: the iio device
344  * @buffer: the buffer whose scan mask we are interested in
345  * @bit: the bit to be set.
346  *
347  * Note that at this point we have no way of knowing what other
348  * buffers might request, hence this code only verifies that the
349  * individual buffers request is plausible.
350  */
351 static int iio_scan_mask_set(struct iio_dev *indio_dev,
352                       struct iio_buffer *buffer, int bit)
353 {
354         const unsigned long *mask;
355         unsigned long *trialmask;
356
357         trialmask = bitmap_zalloc(indio_dev->masklength, GFP_KERNEL);
358         if (trialmask == NULL)
359                 return -ENOMEM;
360         if (!indio_dev->masklength) {
361                 WARN(1, "Trying to set scanmask prior to registering buffer\n");
362                 goto err_invalid_mask;
363         }
364         bitmap_copy(trialmask, buffer->scan_mask, indio_dev->masklength);
365         set_bit(bit, trialmask);
366
367         if (!iio_validate_scan_mask(indio_dev, trialmask))
368                 goto err_invalid_mask;
369
370         if (indio_dev->available_scan_masks) {
371                 mask = iio_scan_mask_match(indio_dev->available_scan_masks,
372                                            indio_dev->masklength,
373                                            trialmask, false);
374                 if (!mask)
375                         goto err_invalid_mask;
376         }
377         bitmap_copy(buffer->scan_mask, trialmask, indio_dev->masklength);
378
379         bitmap_free(trialmask);
380
381         return 0;
382
383 err_invalid_mask:
384         bitmap_free(trialmask);
385         return -EINVAL;
386 }
387
388 static int iio_scan_mask_clear(struct iio_buffer *buffer, int bit)
389 {
390         clear_bit(bit, buffer->scan_mask);
391         return 0;
392 }
393
394 static int iio_scan_mask_query(struct iio_dev *indio_dev,
395                                struct iio_buffer *buffer, int bit)
396 {
397         if (bit > indio_dev->masklength)
398                 return -EINVAL;
399
400         if (!buffer->scan_mask)
401                 return 0;
402
403         /* Ensure return value is 0 or 1. */
404         return !!test_bit(bit, buffer->scan_mask);
405 };
406
407 static ssize_t iio_scan_el_store(struct device *dev,
408                                  struct device_attribute *attr,
409                                  const char *buf,
410                                  size_t len)
411 {
412         int ret;
413         bool state;
414         struct iio_dev *indio_dev = dev_to_iio_dev(dev);
415         struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
416         struct iio_buffer *buffer = this_attr->buffer;
417
418         ret = strtobool(buf, &state);
419         if (ret < 0)
420                 return ret;
421         mutex_lock(&indio_dev->mlock);
422         if (iio_buffer_is_active(buffer)) {
423                 ret = -EBUSY;
424                 goto error_ret;
425         }
426         ret = iio_scan_mask_query(indio_dev, buffer, this_attr->address);
427         if (ret < 0)
428                 goto error_ret;
429         if (!state && ret) {
430                 ret = iio_scan_mask_clear(buffer, this_attr->address);
431                 if (ret)
432                         goto error_ret;
433         } else if (state && !ret) {
434                 ret = iio_scan_mask_set(indio_dev, buffer, this_attr->address);
435                 if (ret)
436                         goto error_ret;
437         }
438
439 error_ret:
440         mutex_unlock(&indio_dev->mlock);
441
442         return ret < 0 ? ret : len;
443
444 }
445
446 static ssize_t iio_scan_el_ts_show(struct device *dev,
447                                    struct device_attribute *attr,
448                                    char *buf)
449 {
450         struct iio_buffer *buffer = to_iio_dev_attr(attr)->buffer;
451
452         return sysfs_emit(buf, "%d\n", buffer->scan_timestamp);
453 }
454
455 static ssize_t iio_scan_el_ts_store(struct device *dev,
456                                     struct device_attribute *attr,
457                                     const char *buf,
458                                     size_t len)
459 {
460         int ret;
461         struct iio_dev *indio_dev = dev_to_iio_dev(dev);
462         struct iio_buffer *buffer = to_iio_dev_attr(attr)->buffer;
463         bool state;
464
465         ret = strtobool(buf, &state);
466         if (ret < 0)
467                 return ret;
468
469         mutex_lock(&indio_dev->mlock);
470         if (iio_buffer_is_active(buffer)) {
471                 ret = -EBUSY;
472                 goto error_ret;
473         }
474         buffer->scan_timestamp = state;
475 error_ret:
476         mutex_unlock(&indio_dev->mlock);
477
478         return ret ? ret : len;
479 }
480
481 static int iio_buffer_add_channel_sysfs(struct iio_dev *indio_dev,
482                                         struct iio_buffer *buffer,
483                                         const struct iio_chan_spec *chan)
484 {
485         int ret, attrcount = 0;
486
487         ret = __iio_add_chan_devattr("index",
488                                      chan,
489                                      &iio_show_scan_index,
490                                      NULL,
491                                      0,
492                                      IIO_SEPARATE,
493                                      &indio_dev->dev,
494                                      buffer,
495                                      &buffer->buffer_attr_list);
496         if (ret)
497                 return ret;
498         attrcount++;
499         ret = __iio_add_chan_devattr("type",
500                                      chan,
501                                      &iio_show_fixed_type,
502                                      NULL,
503                                      0,
504                                      0,
505                                      &indio_dev->dev,
506                                      buffer,
507                                      &buffer->buffer_attr_list);
508         if (ret)
509                 return ret;
510         attrcount++;
511         if (chan->type != IIO_TIMESTAMP)
512                 ret = __iio_add_chan_devattr("en",
513                                              chan,
514                                              &iio_scan_el_show,
515                                              &iio_scan_el_store,
516                                              chan->scan_index,
517                                              0,
518                                              &indio_dev->dev,
519                                              buffer,
520                                              &buffer->buffer_attr_list);
521         else
522                 ret = __iio_add_chan_devattr("en",
523                                              chan,
524                                              &iio_scan_el_ts_show,
525                                              &iio_scan_el_ts_store,
526                                              chan->scan_index,
527                                              0,
528                                              &indio_dev->dev,
529                                              buffer,
530                                              &buffer->buffer_attr_list);
531         if (ret)
532                 return ret;
533         attrcount++;
534         ret = attrcount;
535         return ret;
536 }
537
538 static ssize_t iio_buffer_read_length(struct device *dev,
539                                       struct device_attribute *attr,
540                                       char *buf)
541 {
542         struct iio_buffer *buffer = to_iio_dev_attr(attr)->buffer;
543
544         return sysfs_emit(buf, "%d\n", buffer->length);
545 }
546
547 static ssize_t iio_buffer_write_length(struct device *dev,
548                                        struct device_attribute *attr,
549                                        const char *buf, size_t len)
550 {
551         struct iio_dev *indio_dev = dev_to_iio_dev(dev);
552         struct iio_buffer *buffer = to_iio_dev_attr(attr)->buffer;
553         unsigned int val;
554         int ret;
555
556         ret = kstrtouint(buf, 10, &val);
557         if (ret)
558                 return ret;
559
560         if (val == buffer->length)
561                 return len;
562
563         mutex_lock(&indio_dev->mlock);
564         if (iio_buffer_is_active(buffer)) {
565                 ret = -EBUSY;
566         } else {
567                 buffer->access->set_length(buffer, val);
568                 ret = 0;
569         }
570         if (ret)
571                 goto out;
572         if (buffer->length && buffer->length < buffer->watermark)
573                 buffer->watermark = buffer->length;
574 out:
575         mutex_unlock(&indio_dev->mlock);
576
577         return ret ? ret : len;
578 }
579
580 static ssize_t iio_buffer_show_enable(struct device *dev,
581                                       struct device_attribute *attr,
582                                       char *buf)
583 {
584         struct iio_buffer *buffer = to_iio_dev_attr(attr)->buffer;
585
586         return sysfs_emit(buf, "%d\n", iio_buffer_is_active(buffer));
587 }
588
589 static unsigned int iio_storage_bytes_for_si(struct iio_dev *indio_dev,
590                                              unsigned int scan_index)
591 {
592         const struct iio_chan_spec *ch;
593         unsigned int bytes;
594
595         ch = iio_find_channel_from_si(indio_dev, scan_index);
596         bytes = ch->scan_type.storagebits / 8;
597         if (ch->scan_type.repeat > 1)
598                 bytes *= ch->scan_type.repeat;
599         return bytes;
600 }
601
602 static unsigned int iio_storage_bytes_for_timestamp(struct iio_dev *indio_dev)
603 {
604         return iio_storage_bytes_for_si(indio_dev,
605                                         indio_dev->scan_index_timestamp);
606 }
607
608 static int iio_compute_scan_bytes(struct iio_dev *indio_dev,
609                                 const unsigned long *mask, bool timestamp)
610 {
611         unsigned bytes = 0;
612         int length, i, largest = 0;
613
614         /* How much space will the demuxed element take? */
615         for_each_set_bit(i, mask,
616                          indio_dev->masklength) {
617                 length = iio_storage_bytes_for_si(indio_dev, i);
618                 bytes = ALIGN(bytes, length);
619                 bytes += length;
620                 largest = max(largest, length);
621         }
622
623         if (timestamp) {
624                 length = iio_storage_bytes_for_timestamp(indio_dev);
625                 bytes = ALIGN(bytes, length);
626                 bytes += length;
627                 largest = max(largest, length);
628         }
629
630         bytes = ALIGN(bytes, largest);
631         return bytes;
632 }
633
634 static void iio_buffer_activate(struct iio_dev *indio_dev,
635         struct iio_buffer *buffer)
636 {
637         struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
638
639         iio_buffer_get(buffer);
640         list_add(&buffer->buffer_list, &iio_dev_opaque->buffer_list);
641 }
642
643 static void iio_buffer_deactivate(struct iio_buffer *buffer)
644 {
645         list_del_init(&buffer->buffer_list);
646         wake_up_interruptible(&buffer->pollq);
647         iio_buffer_put(buffer);
648 }
649
650 static void iio_buffer_deactivate_all(struct iio_dev *indio_dev)
651 {
652         struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
653         struct iio_buffer *buffer, *_buffer;
654
655         list_for_each_entry_safe(buffer, _buffer,
656                         &iio_dev_opaque->buffer_list, buffer_list)
657                 iio_buffer_deactivate(buffer);
658 }
659
660 static int iio_buffer_enable(struct iio_buffer *buffer,
661         struct iio_dev *indio_dev)
662 {
663         if (!buffer->access->enable)
664                 return 0;
665         return buffer->access->enable(buffer, indio_dev);
666 }
667
668 static int iio_buffer_disable(struct iio_buffer *buffer,
669         struct iio_dev *indio_dev)
670 {
671         if (!buffer->access->disable)
672                 return 0;
673         return buffer->access->disable(buffer, indio_dev);
674 }
675
676 static void iio_buffer_update_bytes_per_datum(struct iio_dev *indio_dev,
677         struct iio_buffer *buffer)
678 {
679         unsigned int bytes;
680
681         if (!buffer->access->set_bytes_per_datum)
682                 return;
683
684         bytes = iio_compute_scan_bytes(indio_dev, buffer->scan_mask,
685                 buffer->scan_timestamp);
686
687         buffer->access->set_bytes_per_datum(buffer, bytes);
688 }
689
690 static int iio_buffer_request_update(struct iio_dev *indio_dev,
691         struct iio_buffer *buffer)
692 {
693         int ret;
694
695         iio_buffer_update_bytes_per_datum(indio_dev, buffer);
696         if (buffer->access->request_update) {
697                 ret = buffer->access->request_update(buffer);
698                 if (ret) {
699                         dev_dbg(&indio_dev->dev,
700                                "Buffer not started: buffer parameter update failed (%d)\n",
701                                 ret);
702                         return ret;
703                 }
704         }
705
706         return 0;
707 }
708
709 static void iio_free_scan_mask(struct iio_dev *indio_dev,
710         const unsigned long *mask)
711 {
712         /* If the mask is dynamically allocated free it, otherwise do nothing */
713         if (!indio_dev->available_scan_masks)
714                 bitmap_free(mask);
715 }
716
717 struct iio_device_config {
718         unsigned int mode;
719         unsigned int watermark;
720         const unsigned long *scan_mask;
721         unsigned int scan_bytes;
722         bool scan_timestamp;
723 };
724
725 static int iio_verify_update(struct iio_dev *indio_dev,
726         struct iio_buffer *insert_buffer, struct iio_buffer *remove_buffer,
727         struct iio_device_config *config)
728 {
729         struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
730         unsigned long *compound_mask;
731         const unsigned long *scan_mask;
732         bool strict_scanmask = false;
733         struct iio_buffer *buffer;
734         bool scan_timestamp;
735         unsigned int modes;
736
737         if (insert_buffer &&
738             bitmap_empty(insert_buffer->scan_mask, indio_dev->masklength)) {
739                 dev_dbg(&indio_dev->dev,
740                         "At least one scan element must be enabled first\n");
741                 return -EINVAL;
742         }
743
744         memset(config, 0, sizeof(*config));
745         config->watermark = ~0;
746
747         /*
748          * If there is just one buffer and we are removing it there is nothing
749          * to verify.
750          */
751         if (remove_buffer && !insert_buffer &&
752                 list_is_singular(&iio_dev_opaque->buffer_list))
753                         return 0;
754
755         modes = indio_dev->modes;
756
757         list_for_each_entry(buffer, &iio_dev_opaque->buffer_list, buffer_list) {
758                 if (buffer == remove_buffer)
759                         continue;
760                 modes &= buffer->access->modes;
761                 config->watermark = min(config->watermark, buffer->watermark);
762         }
763
764         if (insert_buffer) {
765                 modes &= insert_buffer->access->modes;
766                 config->watermark = min(config->watermark,
767                         insert_buffer->watermark);
768         }
769
770         /* Definitely possible for devices to support both of these. */
771         if ((modes & INDIO_BUFFER_TRIGGERED) && indio_dev->trig) {
772                 config->mode = INDIO_BUFFER_TRIGGERED;
773         } else if (modes & INDIO_BUFFER_HARDWARE) {
774                 /*
775                  * Keep things simple for now and only allow a single buffer to
776                  * be connected in hardware mode.
777                  */
778                 if (insert_buffer && !list_empty(&iio_dev_opaque->buffer_list))
779                         return -EINVAL;
780                 config->mode = INDIO_BUFFER_HARDWARE;
781                 strict_scanmask = true;
782         } else if (modes & INDIO_BUFFER_SOFTWARE) {
783                 config->mode = INDIO_BUFFER_SOFTWARE;
784         } else {
785                 /* Can only occur on first buffer */
786                 if (indio_dev->modes & INDIO_BUFFER_TRIGGERED)
787                         dev_dbg(&indio_dev->dev, "Buffer not started: no trigger\n");
788                 return -EINVAL;
789         }
790
791         /* What scan mask do we actually have? */
792         compound_mask = bitmap_zalloc(indio_dev->masklength, GFP_KERNEL);
793         if (compound_mask == NULL)
794                 return -ENOMEM;
795
796         scan_timestamp = false;
797
798         list_for_each_entry(buffer, &iio_dev_opaque->buffer_list, buffer_list) {
799                 if (buffer == remove_buffer)
800                         continue;
801                 bitmap_or(compound_mask, compound_mask, buffer->scan_mask,
802                           indio_dev->masklength);
803                 scan_timestamp |= buffer->scan_timestamp;
804         }
805
806         if (insert_buffer) {
807                 bitmap_or(compound_mask, compound_mask,
808                           insert_buffer->scan_mask, indio_dev->masklength);
809                 scan_timestamp |= insert_buffer->scan_timestamp;
810         }
811
812         if (indio_dev->available_scan_masks) {
813                 scan_mask = iio_scan_mask_match(indio_dev->available_scan_masks,
814                                     indio_dev->masklength,
815                                     compound_mask,
816                                     strict_scanmask);
817                 bitmap_free(compound_mask);
818                 if (scan_mask == NULL)
819                         return -EINVAL;
820         } else {
821             scan_mask = compound_mask;
822         }
823
824         config->scan_bytes = iio_compute_scan_bytes(indio_dev,
825                                     scan_mask, scan_timestamp);
826         config->scan_mask = scan_mask;
827         config->scan_timestamp = scan_timestamp;
828
829         return 0;
830 }
831
832 /**
833  * struct iio_demux_table - table describing demux memcpy ops
834  * @from:       index to copy from
835  * @to:         index to copy to
836  * @length:     how many bytes to copy
837  * @l:          list head used for management
838  */
839 struct iio_demux_table {
840         unsigned from;
841         unsigned to;
842         unsigned length;
843         struct list_head l;
844 };
845
846 static void iio_buffer_demux_free(struct iio_buffer *buffer)
847 {
848         struct iio_demux_table *p, *q;
849         list_for_each_entry_safe(p, q, &buffer->demux_list, l) {
850                 list_del(&p->l);
851                 kfree(p);
852         }
853 }
854
855 static int iio_buffer_add_demux(struct iio_buffer *buffer,
856         struct iio_demux_table **p, unsigned int in_loc, unsigned int out_loc,
857         unsigned int length)
858 {
859
860         if (*p && (*p)->from + (*p)->length == in_loc &&
861                 (*p)->to + (*p)->length == out_loc) {
862                 (*p)->length += length;
863         } else {
864                 *p = kmalloc(sizeof(**p), GFP_KERNEL);
865                 if (*p == NULL)
866                         return -ENOMEM;
867                 (*p)->from = in_loc;
868                 (*p)->to = out_loc;
869                 (*p)->length = length;
870                 list_add_tail(&(*p)->l, &buffer->demux_list);
871         }
872
873         return 0;
874 }
875
876 static int iio_buffer_update_demux(struct iio_dev *indio_dev,
877                                    struct iio_buffer *buffer)
878 {
879         int ret, in_ind = -1, out_ind, length;
880         unsigned in_loc = 0, out_loc = 0;
881         struct iio_demux_table *p = NULL;
882
883         /* Clear out any old demux */
884         iio_buffer_demux_free(buffer);
885         kfree(buffer->demux_bounce);
886         buffer->demux_bounce = NULL;
887
888         /* First work out which scan mode we will actually have */
889         if (bitmap_equal(indio_dev->active_scan_mask,
890                          buffer->scan_mask,
891                          indio_dev->masklength))
892                 return 0;
893
894         /* Now we have the two masks, work from least sig and build up sizes */
895         for_each_set_bit(out_ind,
896                          buffer->scan_mask,
897                          indio_dev->masklength) {
898                 in_ind = find_next_bit(indio_dev->active_scan_mask,
899                                        indio_dev->masklength,
900                                        in_ind + 1);
901                 while (in_ind != out_ind) {
902                         length = iio_storage_bytes_for_si(indio_dev, in_ind);
903                         /* Make sure we are aligned */
904                         in_loc = roundup(in_loc, length) + length;
905                         in_ind = find_next_bit(indio_dev->active_scan_mask,
906                                                indio_dev->masklength,
907                                                in_ind + 1);
908                 }
909                 length = iio_storage_bytes_for_si(indio_dev, in_ind);
910                 out_loc = roundup(out_loc, length);
911                 in_loc = roundup(in_loc, length);
912                 ret = iio_buffer_add_demux(buffer, &p, in_loc, out_loc, length);
913                 if (ret)
914                         goto error_clear_mux_table;
915                 out_loc += length;
916                 in_loc += length;
917         }
918         /* Relies on scan_timestamp being last */
919         if (buffer->scan_timestamp) {
920                 length = iio_storage_bytes_for_timestamp(indio_dev);
921                 out_loc = roundup(out_loc, length);
922                 in_loc = roundup(in_loc, length);
923                 ret = iio_buffer_add_demux(buffer, &p, in_loc, out_loc, length);
924                 if (ret)
925                         goto error_clear_mux_table;
926                 out_loc += length;
927                 in_loc += length;
928         }
929         buffer->demux_bounce = kzalloc(out_loc, GFP_KERNEL);
930         if (buffer->demux_bounce == NULL) {
931                 ret = -ENOMEM;
932                 goto error_clear_mux_table;
933         }
934         return 0;
935
936 error_clear_mux_table:
937         iio_buffer_demux_free(buffer);
938
939         return ret;
940 }
941
942 static int iio_update_demux(struct iio_dev *indio_dev)
943 {
944         struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
945         struct iio_buffer *buffer;
946         int ret;
947
948         list_for_each_entry(buffer, &iio_dev_opaque->buffer_list, buffer_list) {
949                 ret = iio_buffer_update_demux(indio_dev, buffer);
950                 if (ret < 0)
951                         goto error_clear_mux_table;
952         }
953         return 0;
954
955 error_clear_mux_table:
956         list_for_each_entry(buffer, &iio_dev_opaque->buffer_list, buffer_list)
957                 iio_buffer_demux_free(buffer);
958
959         return ret;
960 }
961
962 static int iio_enable_buffers(struct iio_dev *indio_dev,
963         struct iio_device_config *config)
964 {
965         struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
966         struct iio_buffer *buffer;
967         int ret;
968
969         indio_dev->active_scan_mask = config->scan_mask;
970         indio_dev->scan_timestamp = config->scan_timestamp;
971         indio_dev->scan_bytes = config->scan_bytes;
972         indio_dev->currentmode = config->mode;
973
974         iio_update_demux(indio_dev);
975
976         /* Wind up again */
977         if (indio_dev->setup_ops->preenable) {
978                 ret = indio_dev->setup_ops->preenable(indio_dev);
979                 if (ret) {
980                         dev_dbg(&indio_dev->dev,
981                                "Buffer not started: buffer preenable failed (%d)\n", ret);
982                         goto err_undo_config;
983                 }
984         }
985
986         if (indio_dev->info->update_scan_mode) {
987                 ret = indio_dev->info
988                         ->update_scan_mode(indio_dev,
989                                            indio_dev->active_scan_mask);
990                 if (ret < 0) {
991                         dev_dbg(&indio_dev->dev,
992                                 "Buffer not started: update scan mode failed (%d)\n",
993                                 ret);
994                         goto err_run_postdisable;
995                 }
996         }
997
998         if (indio_dev->info->hwfifo_set_watermark)
999                 indio_dev->info->hwfifo_set_watermark(indio_dev,
1000                         config->watermark);
1001
1002         list_for_each_entry(buffer, &iio_dev_opaque->buffer_list, buffer_list) {
1003                 ret = iio_buffer_enable(buffer, indio_dev);
1004                 if (ret)
1005                         goto err_disable_buffers;
1006         }
1007
1008         if (indio_dev->currentmode == INDIO_BUFFER_TRIGGERED) {
1009                 ret = iio_trigger_attach_poll_func(indio_dev->trig,
1010                                                    indio_dev->pollfunc);
1011                 if (ret)
1012                         goto err_disable_buffers;
1013         }
1014
1015         if (indio_dev->setup_ops->postenable) {
1016                 ret = indio_dev->setup_ops->postenable(indio_dev);
1017                 if (ret) {
1018                         dev_dbg(&indio_dev->dev,
1019                                "Buffer not started: postenable failed (%d)\n", ret);
1020                         goto err_detach_pollfunc;
1021                 }
1022         }
1023
1024         return 0;
1025
1026 err_detach_pollfunc:
1027         if (indio_dev->currentmode == INDIO_BUFFER_TRIGGERED) {
1028                 iio_trigger_detach_poll_func(indio_dev->trig,
1029                                              indio_dev->pollfunc);
1030         }
1031 err_disable_buffers:
1032         list_for_each_entry_continue_reverse(buffer, &iio_dev_opaque->buffer_list,
1033                                              buffer_list)
1034                 iio_buffer_disable(buffer, indio_dev);
1035 err_run_postdisable:
1036         if (indio_dev->setup_ops->postdisable)
1037                 indio_dev->setup_ops->postdisable(indio_dev);
1038 err_undo_config:
1039         indio_dev->currentmode = INDIO_DIRECT_MODE;
1040         indio_dev->active_scan_mask = NULL;
1041
1042         return ret;
1043 }
1044
1045 static int iio_disable_buffers(struct iio_dev *indio_dev)
1046 {
1047         struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
1048         struct iio_buffer *buffer;
1049         int ret = 0;
1050         int ret2;
1051
1052         /* Wind down existing buffers - iff there are any */
1053         if (list_empty(&iio_dev_opaque->buffer_list))
1054                 return 0;
1055
1056         /*
1057          * If things go wrong at some step in disable we still need to continue
1058          * to perform the other steps, otherwise we leave the device in a
1059          * inconsistent state. We return the error code for the first error we
1060          * encountered.
1061          */
1062
1063         if (indio_dev->setup_ops->predisable) {
1064                 ret2 = indio_dev->setup_ops->predisable(indio_dev);
1065                 if (ret2 && !ret)
1066                         ret = ret2;
1067         }
1068
1069         if (indio_dev->currentmode == INDIO_BUFFER_TRIGGERED) {
1070                 iio_trigger_detach_poll_func(indio_dev->trig,
1071                                              indio_dev->pollfunc);
1072         }
1073
1074         list_for_each_entry(buffer, &iio_dev_opaque->buffer_list, buffer_list) {
1075                 ret2 = iio_buffer_disable(buffer, indio_dev);
1076                 if (ret2 && !ret)
1077                         ret = ret2;
1078         }
1079
1080         if (indio_dev->setup_ops->postdisable) {
1081                 ret2 = indio_dev->setup_ops->postdisable(indio_dev);
1082                 if (ret2 && !ret)
1083                         ret = ret2;
1084         }
1085
1086         iio_free_scan_mask(indio_dev, indio_dev->active_scan_mask);
1087         indio_dev->active_scan_mask = NULL;
1088         indio_dev->currentmode = INDIO_DIRECT_MODE;
1089
1090         return ret;
1091 }
1092
1093 static int __iio_update_buffers(struct iio_dev *indio_dev,
1094                        struct iio_buffer *insert_buffer,
1095                        struct iio_buffer *remove_buffer)
1096 {
1097         struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
1098         struct iio_device_config new_config;
1099         int ret;
1100
1101         ret = iio_verify_update(indio_dev, insert_buffer, remove_buffer,
1102                 &new_config);
1103         if (ret)
1104                 return ret;
1105
1106         if (insert_buffer) {
1107                 ret = iio_buffer_request_update(indio_dev, insert_buffer);
1108                 if (ret)
1109                         goto err_free_config;
1110         }
1111
1112         ret = iio_disable_buffers(indio_dev);
1113         if (ret)
1114                 goto err_deactivate_all;
1115
1116         if (remove_buffer)
1117                 iio_buffer_deactivate(remove_buffer);
1118         if (insert_buffer)
1119                 iio_buffer_activate(indio_dev, insert_buffer);
1120
1121         /* If no buffers in list, we are done */
1122         if (list_empty(&iio_dev_opaque->buffer_list))
1123                 return 0;
1124
1125         ret = iio_enable_buffers(indio_dev, &new_config);
1126         if (ret)
1127                 goto err_deactivate_all;
1128
1129         return 0;
1130
1131 err_deactivate_all:
1132         /*
1133          * We've already verified that the config is valid earlier. If things go
1134          * wrong in either enable or disable the most likely reason is an IO
1135          * error from the device. In this case there is no good recovery
1136          * strategy. Just make sure to disable everything and leave the device
1137          * in a sane state.  With a bit of luck the device might come back to
1138          * life again later and userspace can try again.
1139          */
1140         iio_buffer_deactivate_all(indio_dev);
1141
1142 err_free_config:
1143         iio_free_scan_mask(indio_dev, new_config.scan_mask);
1144         return ret;
1145 }
1146
1147 int iio_update_buffers(struct iio_dev *indio_dev,
1148                        struct iio_buffer *insert_buffer,
1149                        struct iio_buffer *remove_buffer)
1150 {
1151         int ret;
1152
1153         if (insert_buffer == remove_buffer)
1154                 return 0;
1155
1156         mutex_lock(&indio_dev->info_exist_lock);
1157         mutex_lock(&indio_dev->mlock);
1158
1159         if (insert_buffer && iio_buffer_is_active(insert_buffer))
1160                 insert_buffer = NULL;
1161
1162         if (remove_buffer && !iio_buffer_is_active(remove_buffer))
1163                 remove_buffer = NULL;
1164
1165         if (!insert_buffer && !remove_buffer) {
1166                 ret = 0;
1167                 goto out_unlock;
1168         }
1169
1170         if (indio_dev->info == NULL) {
1171                 ret = -ENODEV;
1172                 goto out_unlock;
1173         }
1174
1175         ret = __iio_update_buffers(indio_dev, insert_buffer, remove_buffer);
1176
1177 out_unlock:
1178         mutex_unlock(&indio_dev->mlock);
1179         mutex_unlock(&indio_dev->info_exist_lock);
1180
1181         return ret;
1182 }
1183 EXPORT_SYMBOL_GPL(iio_update_buffers);
1184
1185 void iio_disable_all_buffers(struct iio_dev *indio_dev)
1186 {
1187         iio_disable_buffers(indio_dev);
1188         iio_buffer_deactivate_all(indio_dev);
1189 }
1190
1191 static ssize_t iio_buffer_store_enable(struct device *dev,
1192                                        struct device_attribute *attr,
1193                                        const char *buf,
1194                                        size_t len)
1195 {
1196         int ret;
1197         bool requested_state;
1198         struct iio_dev *indio_dev = dev_to_iio_dev(dev);
1199         struct iio_buffer *buffer = to_iio_dev_attr(attr)->buffer;
1200         bool inlist;
1201
1202         ret = strtobool(buf, &requested_state);
1203         if (ret < 0)
1204                 return ret;
1205
1206         mutex_lock(&indio_dev->mlock);
1207
1208         /* Find out if it is in the list */
1209         inlist = iio_buffer_is_active(buffer);
1210         /* Already in desired state */
1211         if (inlist == requested_state)
1212                 goto done;
1213
1214         if (requested_state)
1215                 ret = __iio_update_buffers(indio_dev, buffer, NULL);
1216         else
1217                 ret = __iio_update_buffers(indio_dev, NULL, buffer);
1218
1219 done:
1220         mutex_unlock(&indio_dev->mlock);
1221         return (ret < 0) ? ret : len;
1222 }
1223
1224 static ssize_t iio_buffer_show_watermark(struct device *dev,
1225                                          struct device_attribute *attr,
1226                                          char *buf)
1227 {
1228         struct iio_buffer *buffer = to_iio_dev_attr(attr)->buffer;
1229
1230         return sysfs_emit(buf, "%u\n", buffer->watermark);
1231 }
1232
1233 static ssize_t iio_buffer_store_watermark(struct device *dev,
1234                                           struct device_attribute *attr,
1235                                           const char *buf,
1236                                           size_t len)
1237 {
1238         struct iio_dev *indio_dev = dev_to_iio_dev(dev);
1239         struct iio_buffer *buffer = to_iio_dev_attr(attr)->buffer;
1240         unsigned int val;
1241         int ret;
1242
1243         ret = kstrtouint(buf, 10, &val);
1244         if (ret)
1245                 return ret;
1246         if (!val)
1247                 return -EINVAL;
1248
1249         mutex_lock(&indio_dev->mlock);
1250
1251         if (val > buffer->length) {
1252                 ret = -EINVAL;
1253                 goto out;
1254         }
1255
1256         if (iio_buffer_is_active(buffer)) {
1257                 ret = -EBUSY;
1258                 goto out;
1259         }
1260
1261         buffer->watermark = val;
1262 out:
1263         mutex_unlock(&indio_dev->mlock);
1264
1265         return ret ? ret : len;
1266 }
1267
1268 static ssize_t iio_dma_show_data_available(struct device *dev,
1269                                                 struct device_attribute *attr,
1270                                                 char *buf)
1271 {
1272         struct iio_buffer *buffer = to_iio_dev_attr(attr)->buffer;
1273
1274         return sysfs_emit(buf, "%zu\n", iio_buffer_data_available(buffer));
1275 }
1276
1277 static DEVICE_ATTR(length, S_IRUGO | S_IWUSR, iio_buffer_read_length,
1278                    iio_buffer_write_length);
1279 static struct device_attribute dev_attr_length_ro = __ATTR(length,
1280         S_IRUGO, iio_buffer_read_length, NULL);
1281 static DEVICE_ATTR(enable, S_IRUGO | S_IWUSR,
1282                    iio_buffer_show_enable, iio_buffer_store_enable);
1283 static DEVICE_ATTR(watermark, S_IRUGO | S_IWUSR,
1284                    iio_buffer_show_watermark, iio_buffer_store_watermark);
1285 static struct device_attribute dev_attr_watermark_ro = __ATTR(watermark,
1286         S_IRUGO, iio_buffer_show_watermark, NULL);
1287 static DEVICE_ATTR(data_available, S_IRUGO,
1288                 iio_dma_show_data_available, NULL);
1289
1290 static struct attribute *iio_buffer_attrs[] = {
1291         &dev_attr_length.attr,
1292         &dev_attr_enable.attr,
1293         &dev_attr_watermark.attr,
1294         &dev_attr_data_available.attr,
1295 };
1296
1297 #define to_dev_attr(_attr) container_of(_attr, struct device_attribute, attr)
1298
1299 static struct attribute *iio_buffer_wrap_attr(struct iio_buffer *buffer,
1300                                               struct attribute *attr)
1301 {
1302         struct device_attribute *dattr = to_dev_attr(attr);
1303         struct iio_dev_attr *iio_attr;
1304
1305         iio_attr = kzalloc(sizeof(*iio_attr), GFP_KERNEL);
1306         if (!iio_attr)
1307                 return NULL;
1308
1309         iio_attr->buffer = buffer;
1310         memcpy(&iio_attr->dev_attr, dattr, sizeof(iio_attr->dev_attr));
1311         iio_attr->dev_attr.attr.name = kstrdup_const(attr->name, GFP_KERNEL);
1312         sysfs_attr_init(&iio_attr->dev_attr.attr);
1313
1314         list_add(&iio_attr->l, &buffer->buffer_attr_list);
1315
1316         return &iio_attr->dev_attr.attr;
1317 }
1318
1319 static int iio_buffer_register_legacy_sysfs_groups(struct iio_dev *indio_dev,
1320                                                    struct attribute **buffer_attrs,
1321                                                    int buffer_attrcount,
1322                                                    int scan_el_attrcount)
1323 {
1324         struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
1325         struct attribute_group *group;
1326         struct attribute **attrs;
1327         int ret;
1328
1329         attrs = kcalloc(buffer_attrcount + 1, sizeof(*attrs), GFP_KERNEL);
1330         if (!attrs)
1331                 return -ENOMEM;
1332
1333         memcpy(attrs, buffer_attrs, buffer_attrcount * sizeof(*attrs));
1334
1335         group = &iio_dev_opaque->legacy_buffer_group;
1336         group->attrs = attrs;
1337         group->name = "buffer";
1338
1339         ret = iio_device_register_sysfs_group(indio_dev, group);
1340         if (ret)
1341                 goto error_free_buffer_attrs;
1342
1343         attrs = kcalloc(scan_el_attrcount + 1, sizeof(*attrs), GFP_KERNEL);
1344         if (!attrs) {
1345                 ret = -ENOMEM;
1346                 goto error_free_buffer_attrs;
1347         }
1348
1349         memcpy(attrs, &buffer_attrs[buffer_attrcount],
1350                scan_el_attrcount * sizeof(*attrs));
1351
1352         group = &iio_dev_opaque->legacy_scan_el_group;
1353         group->attrs = attrs;
1354         group->name = "scan_elements";
1355
1356         ret = iio_device_register_sysfs_group(indio_dev, group);
1357         if (ret)
1358                 goto error_free_scan_el_attrs;
1359
1360         return 0;
1361
1362 error_free_buffer_attrs:
1363         kfree(iio_dev_opaque->legacy_buffer_group.attrs);
1364 error_free_scan_el_attrs:
1365         kfree(iio_dev_opaque->legacy_scan_el_group.attrs);
1366
1367         return ret;
1368 }
1369
1370 static void iio_buffer_unregister_legacy_sysfs_groups(struct iio_dev *indio_dev)
1371 {
1372         struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
1373
1374         kfree(iio_dev_opaque->legacy_buffer_group.attrs);
1375         kfree(iio_dev_opaque->legacy_scan_el_group.attrs);
1376 }
1377
1378 static int iio_buffer_chrdev_release(struct inode *inode, struct file *filep)
1379 {
1380         struct iio_dev_buffer_pair *ib = filep->private_data;
1381         struct iio_dev *indio_dev = ib->indio_dev;
1382         struct iio_buffer *buffer = ib->buffer;
1383
1384         wake_up(&buffer->pollq);
1385
1386         kfree(ib);
1387         clear_bit(IIO_BUSY_BIT_POS, &buffer->flags);
1388         iio_device_put(indio_dev);
1389
1390         return 0;
1391 }
1392
1393 static const struct file_operations iio_buffer_chrdev_fileops = {
1394         .owner = THIS_MODULE,
1395         .llseek = noop_llseek,
1396         .read = iio_buffer_read,
1397         .poll = iio_buffer_poll,
1398         .release = iio_buffer_chrdev_release,
1399 };
1400
1401 static long iio_device_buffer_getfd(struct iio_dev *indio_dev, unsigned long arg)
1402 {
1403         struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
1404         int __user *ival = (int __user *)arg;
1405         struct iio_dev_buffer_pair *ib;
1406         struct iio_buffer *buffer;
1407         int fd, idx, ret;
1408
1409         if (copy_from_user(&idx, ival, sizeof(idx)))
1410                 return -EFAULT;
1411
1412         if (idx >= iio_dev_opaque->attached_buffers_cnt)
1413                 return -ENODEV;
1414
1415         iio_device_get(indio_dev);
1416
1417         buffer = iio_dev_opaque->attached_buffers[idx];
1418
1419         if (test_and_set_bit(IIO_BUSY_BIT_POS, &buffer->flags)) {
1420                 ret = -EBUSY;
1421                 goto error_iio_dev_put;
1422         }
1423
1424         ib = kzalloc(sizeof(*ib), GFP_KERNEL);
1425         if (!ib) {
1426                 ret = -ENOMEM;
1427                 goto error_clear_busy_bit;
1428         }
1429
1430         ib->indio_dev = indio_dev;
1431         ib->buffer = buffer;
1432
1433         fd = anon_inode_getfd("iio:buffer", &iio_buffer_chrdev_fileops,
1434                               ib, O_RDWR | O_CLOEXEC);
1435         if (fd < 0) {
1436                 ret = fd;
1437                 goto error_free_ib;
1438         }
1439
1440         if (copy_to_user(ival, &fd, sizeof(fd))) {
1441                 put_unused_fd(fd);
1442                 ret = -EFAULT;
1443                 goto error_free_ib;
1444         }
1445
1446         return 0;
1447
1448 error_free_ib:
1449         kfree(ib);
1450 error_clear_busy_bit:
1451         clear_bit(IIO_BUSY_BIT_POS, &buffer->flags);
1452 error_iio_dev_put:
1453         iio_device_put(indio_dev);
1454         return ret;
1455 }
1456
1457 static long iio_device_buffer_ioctl(struct iio_dev *indio_dev, struct file *filp,
1458                                     unsigned int cmd, unsigned long arg)
1459 {
1460         switch (cmd) {
1461         case IIO_BUFFER_GET_FD_IOCTL:
1462                 return iio_device_buffer_getfd(indio_dev, arg);
1463         default:
1464                 return IIO_IOCTL_UNHANDLED;
1465         }
1466 }
1467
1468 static int __iio_buffer_alloc_sysfs_and_mask(struct iio_buffer *buffer,
1469                                              struct iio_dev *indio_dev,
1470                                              int index)
1471 {
1472         struct iio_dev_attr *p;
1473         struct attribute **attr;
1474         int ret, i, attrn, scan_el_attrcount, buffer_attrcount;
1475         const struct iio_chan_spec *channels;
1476
1477         buffer_attrcount = 0;
1478         if (buffer->attrs) {
1479                 while (buffer->attrs[buffer_attrcount] != NULL)
1480                         buffer_attrcount++;
1481         }
1482
1483         scan_el_attrcount = 0;
1484         INIT_LIST_HEAD(&buffer->buffer_attr_list);
1485         channels = indio_dev->channels;
1486         if (channels) {
1487                 /* new magic */
1488                 for (i = 0; i < indio_dev->num_channels; i++) {
1489                         if (channels[i].scan_index < 0)
1490                                 continue;
1491
1492                         ret = iio_buffer_add_channel_sysfs(indio_dev, buffer,
1493                                                          &channels[i]);
1494                         if (ret < 0)
1495                                 goto error_cleanup_dynamic;
1496                         scan_el_attrcount += ret;
1497                         if (channels[i].type == IIO_TIMESTAMP)
1498                                 indio_dev->scan_index_timestamp =
1499                                         channels[i].scan_index;
1500                 }
1501                 if (indio_dev->masklength && buffer->scan_mask == NULL) {
1502                         buffer->scan_mask = bitmap_zalloc(indio_dev->masklength,
1503                                                           GFP_KERNEL);
1504                         if (buffer->scan_mask == NULL) {
1505                                 ret = -ENOMEM;
1506                                 goto error_cleanup_dynamic;
1507                         }
1508                 }
1509         }
1510
1511         attrn = buffer_attrcount + scan_el_attrcount + ARRAY_SIZE(iio_buffer_attrs);
1512         attr = kcalloc(attrn + 1, sizeof(* attr), GFP_KERNEL);
1513         if (!attr) {
1514                 ret = -ENOMEM;
1515                 goto error_free_scan_mask;
1516         }
1517
1518         memcpy(attr, iio_buffer_attrs, sizeof(iio_buffer_attrs));
1519         if (!buffer->access->set_length)
1520                 attr[0] = &dev_attr_length_ro.attr;
1521
1522         if (buffer->access->flags & INDIO_BUFFER_FLAG_FIXED_WATERMARK)
1523                 attr[2] = &dev_attr_watermark_ro.attr;
1524
1525         if (buffer->attrs)
1526                 memcpy(&attr[ARRAY_SIZE(iio_buffer_attrs)], buffer->attrs,
1527                        sizeof(struct attribute *) * buffer_attrcount);
1528
1529         buffer_attrcount += ARRAY_SIZE(iio_buffer_attrs);
1530
1531         for (i = 0; i < buffer_attrcount; i++) {
1532                 struct attribute *wrapped;
1533
1534                 wrapped = iio_buffer_wrap_attr(buffer, attr[i]);
1535                 if (!wrapped) {
1536                         ret = -ENOMEM;
1537                         goto error_free_scan_mask;
1538                 }
1539                 attr[i] = wrapped;
1540         }
1541
1542         attrn = 0;
1543         list_for_each_entry(p, &buffer->buffer_attr_list, l)
1544                 attr[attrn++] = &p->dev_attr.attr;
1545
1546         buffer->buffer_group.name = kasprintf(GFP_KERNEL, "buffer%d", index);
1547         if (!buffer->buffer_group.name) {
1548                 ret = -ENOMEM;
1549                 goto error_free_buffer_attrs;
1550         }
1551
1552         buffer->buffer_group.attrs = attr;
1553
1554         ret = iio_device_register_sysfs_group(indio_dev, &buffer->buffer_group);
1555         if (ret)
1556                 goto error_free_buffer_attr_group_name;
1557
1558         /* we only need to register the legacy groups for the first buffer */
1559         if (index > 0)
1560                 return 0;
1561
1562         ret = iio_buffer_register_legacy_sysfs_groups(indio_dev, attr,
1563                                                       buffer_attrcount,
1564                                                       scan_el_attrcount);
1565         if (ret)
1566                 goto error_free_buffer_attr_group_name;
1567
1568         return 0;
1569
1570 error_free_buffer_attr_group_name:
1571         kfree(buffer->buffer_group.name);
1572 error_free_buffer_attrs:
1573         kfree(buffer->buffer_group.attrs);
1574 error_free_scan_mask:
1575         bitmap_free(buffer->scan_mask);
1576 error_cleanup_dynamic:
1577         iio_free_chan_devattr_list(&buffer->buffer_attr_list);
1578
1579         return ret;
1580 }
1581
1582 static void __iio_buffer_free_sysfs_and_mask(struct iio_buffer *buffer)
1583 {
1584         bitmap_free(buffer->scan_mask);
1585         kfree(buffer->buffer_group.name);
1586         kfree(buffer->buffer_group.attrs);
1587         iio_free_chan_devattr_list(&buffer->buffer_attr_list);
1588 }
1589
1590 int iio_buffers_alloc_sysfs_and_mask(struct iio_dev *indio_dev)
1591 {
1592         struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
1593         const struct iio_chan_spec *channels;
1594         struct iio_buffer *buffer;
1595         int unwind_idx;
1596         int ret, i;
1597         size_t sz;
1598
1599         channels = indio_dev->channels;
1600         if (channels) {
1601                 int ml = indio_dev->masklength;
1602
1603                 for (i = 0; i < indio_dev->num_channels; i++)
1604                         ml = max(ml, channels[i].scan_index + 1);
1605                 indio_dev->masklength = ml;
1606         }
1607
1608         if (!iio_dev_opaque->attached_buffers_cnt)
1609                 return 0;
1610
1611         for (i = 0; i < iio_dev_opaque->attached_buffers_cnt; i++) {
1612                 buffer = iio_dev_opaque->attached_buffers[i];
1613                 ret = __iio_buffer_alloc_sysfs_and_mask(buffer, indio_dev, i);
1614                 if (ret) {
1615                         unwind_idx = i;
1616                         goto error_unwind_sysfs_and_mask;
1617                 }
1618         }
1619         unwind_idx = iio_dev_opaque->attached_buffers_cnt - 1;
1620
1621         sz = sizeof(*(iio_dev_opaque->buffer_ioctl_handler));
1622         iio_dev_opaque->buffer_ioctl_handler = kzalloc(sz, GFP_KERNEL);
1623         if (!iio_dev_opaque->buffer_ioctl_handler) {
1624                 ret = -ENOMEM;
1625                 goto error_unwind_sysfs_and_mask;
1626         }
1627
1628         iio_dev_opaque->buffer_ioctl_handler->ioctl = iio_device_buffer_ioctl;
1629         iio_device_ioctl_handler_register(indio_dev,
1630                                           iio_dev_opaque->buffer_ioctl_handler);
1631
1632         return 0;
1633
1634 error_unwind_sysfs_and_mask:
1635         for (; unwind_idx >= 0; unwind_idx--) {
1636                 buffer = iio_dev_opaque->attached_buffers[unwind_idx];
1637                 __iio_buffer_free_sysfs_and_mask(buffer);
1638         }
1639         return ret;
1640 }
1641
1642 void iio_buffers_free_sysfs_and_mask(struct iio_dev *indio_dev)
1643 {
1644         struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
1645         struct iio_buffer *buffer;
1646         int i;
1647
1648         if (!iio_dev_opaque->attached_buffers_cnt)
1649                 return;
1650
1651         iio_device_ioctl_handler_unregister(iio_dev_opaque->buffer_ioctl_handler);
1652         kfree(iio_dev_opaque->buffer_ioctl_handler);
1653
1654         iio_buffer_unregister_legacy_sysfs_groups(indio_dev);
1655
1656         for (i = iio_dev_opaque->attached_buffers_cnt - 1; i >= 0; i--) {
1657                 buffer = iio_dev_opaque->attached_buffers[i];
1658                 __iio_buffer_free_sysfs_and_mask(buffer);
1659         }
1660 }
1661
1662 /**
1663  * iio_validate_scan_mask_onehot() - Validates that exactly one channel is selected
1664  * @indio_dev: the iio device
1665  * @mask: scan mask to be checked
1666  *
1667  * Return true if exactly one bit is set in the scan mask, false otherwise. It
1668  * can be used for devices where only one channel can be active for sampling at
1669  * a time.
1670  */
1671 bool iio_validate_scan_mask_onehot(struct iio_dev *indio_dev,
1672         const unsigned long *mask)
1673 {
1674         return bitmap_weight(mask, indio_dev->masklength) == 1;
1675 }
1676 EXPORT_SYMBOL_GPL(iio_validate_scan_mask_onehot);
1677
1678 static const void *iio_demux(struct iio_buffer *buffer,
1679                                  const void *datain)
1680 {
1681         struct iio_demux_table *t;
1682
1683         if (list_empty(&buffer->demux_list))
1684                 return datain;
1685         list_for_each_entry(t, &buffer->demux_list, l)
1686                 memcpy(buffer->demux_bounce + t->to,
1687                        datain + t->from, t->length);
1688
1689         return buffer->demux_bounce;
1690 }
1691
1692 static int iio_push_to_buffer(struct iio_buffer *buffer, const void *data)
1693 {
1694         const void *dataout = iio_demux(buffer, data);
1695         int ret;
1696
1697         ret = buffer->access->store_to(buffer, dataout);
1698         if (ret)
1699                 return ret;
1700
1701         /*
1702          * We can't just test for watermark to decide if we wake the poll queue
1703          * because read may request less samples than the watermark.
1704          */
1705         wake_up_interruptible_poll(&buffer->pollq, EPOLLIN | EPOLLRDNORM);
1706         return 0;
1707 }
1708
1709 /**
1710  * iio_push_to_buffers() - push to a registered buffer.
1711  * @indio_dev:          iio_dev structure for device.
1712  * @data:               Full scan.
1713  */
1714 int iio_push_to_buffers(struct iio_dev *indio_dev, const void *data)
1715 {
1716         struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
1717         int ret;
1718         struct iio_buffer *buf;
1719
1720         list_for_each_entry(buf, &iio_dev_opaque->buffer_list, buffer_list) {
1721                 ret = iio_push_to_buffer(buf, data);
1722                 if (ret < 0)
1723                         return ret;
1724         }
1725
1726         return 0;
1727 }
1728 EXPORT_SYMBOL_GPL(iio_push_to_buffers);
1729
1730 /**
1731  * iio_buffer_release() - Free a buffer's resources
1732  * @ref: Pointer to the kref embedded in the iio_buffer struct
1733  *
1734  * This function is called when the last reference to the buffer has been
1735  * dropped. It will typically free all resources allocated by the buffer. Do not
1736  * call this function manually, always use iio_buffer_put() when done using a
1737  * buffer.
1738  */
1739 static void iio_buffer_release(struct kref *ref)
1740 {
1741         struct iio_buffer *buffer = container_of(ref, struct iio_buffer, ref);
1742
1743         buffer->access->release(buffer);
1744 }
1745
1746 /**
1747  * iio_buffer_get() - Grab a reference to the buffer
1748  * @buffer: The buffer to grab a reference for, may be NULL
1749  *
1750  * Returns the pointer to the buffer that was passed into the function.
1751  */
1752 struct iio_buffer *iio_buffer_get(struct iio_buffer *buffer)
1753 {
1754         if (buffer)
1755                 kref_get(&buffer->ref);
1756
1757         return buffer;
1758 }
1759 EXPORT_SYMBOL_GPL(iio_buffer_get);
1760
1761 /**
1762  * iio_buffer_put() - Release the reference to the buffer
1763  * @buffer: The buffer to release the reference for, may be NULL
1764  */
1765 void iio_buffer_put(struct iio_buffer *buffer)
1766 {
1767         if (buffer)
1768                 kref_put(&buffer->ref, iio_buffer_release);
1769 }
1770 EXPORT_SYMBOL_GPL(iio_buffer_put);
1771
1772 /**
1773  * iio_device_attach_buffer - Attach a buffer to a IIO device
1774  * @indio_dev: The device the buffer should be attached to
1775  * @buffer: The buffer to attach to the device
1776  *
1777  * Return 0 if successful, negative if error.
1778  *
1779  * This function attaches a buffer to a IIO device. The buffer stays attached to
1780  * the device until the device is freed. For legacy reasons, the first attached
1781  * buffer will also be assigned to 'indio_dev->buffer'.
1782  * The array allocated here, will be free'd via the iio_device_detach_buffers()
1783  * call which is handled by the iio_device_free().
1784  */
1785 int iio_device_attach_buffer(struct iio_dev *indio_dev,
1786                              struct iio_buffer *buffer)
1787 {
1788         struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
1789         struct iio_buffer **new, **old = iio_dev_opaque->attached_buffers;
1790         unsigned int cnt = iio_dev_opaque->attached_buffers_cnt;
1791
1792         cnt++;
1793
1794         new = krealloc(old, sizeof(*new) * cnt, GFP_KERNEL);
1795         if (!new)
1796                 return -ENOMEM;
1797         iio_dev_opaque->attached_buffers = new;
1798
1799         buffer = iio_buffer_get(buffer);
1800
1801         /* first buffer is legacy; attach it to the IIO device directly */
1802         if (!indio_dev->buffer)
1803                 indio_dev->buffer = buffer;
1804
1805         iio_dev_opaque->attached_buffers[cnt - 1] = buffer;
1806         iio_dev_opaque->attached_buffers_cnt = cnt;
1807
1808         return 0;
1809 }
1810 EXPORT_SYMBOL_GPL(iio_device_attach_buffer);