GNU Linux-libre 6.7.9-gnu
[releases.git] / drivers / iio / industrialio-buffer.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* The industrial I/O core
3  *
4  * Copyright (c) 2008 Jonathan Cameron
5  *
6  * Handling of buffer allocation / resizing.
7  *
8  * Things to look at here.
9  * - Better memory allocation techniques?
10  * - Alternative access techniques?
11  */
12 #include <linux/anon_inodes.h>
13 #include <linux/kernel.h>
14 #include <linux/export.h>
15 #include <linux/device.h>
16 #include <linux/file.h>
17 #include <linux/fs.h>
18 #include <linux/cdev.h>
19 #include <linux/slab.h>
20 #include <linux/poll.h>
21 #include <linux/sched/signal.h>
22
23 #include <linux/iio/iio.h>
24 #include <linux/iio/iio-opaque.h>
25 #include "iio_core.h"
26 #include "iio_core_trigger.h"
27 #include <linux/iio/sysfs.h>
28 #include <linux/iio/buffer.h>
29 #include <linux/iio/buffer_impl.h>
30
31 static const char * const iio_endian_prefix[] = {
32         [IIO_BE] = "be",
33         [IIO_LE] = "le",
34 };
35
36 static bool iio_buffer_is_active(struct iio_buffer *buf)
37 {
38         return !list_empty(&buf->buffer_list);
39 }
40
41 static size_t iio_buffer_data_available(struct iio_buffer *buf)
42 {
43         return buf->access->data_available(buf);
44 }
45
46 static int iio_buffer_flush_hwfifo(struct iio_dev *indio_dev,
47                                    struct iio_buffer *buf, size_t required)
48 {
49         if (!indio_dev->info->hwfifo_flush_to_buffer)
50                 return -ENODEV;
51
52         return indio_dev->info->hwfifo_flush_to_buffer(indio_dev, required);
53 }
54
55 static bool iio_buffer_ready(struct iio_dev *indio_dev, struct iio_buffer *buf,
56                              size_t to_wait, int to_flush)
57 {
58         size_t avail;
59         int flushed = 0;
60
61         /* wakeup if the device was unregistered */
62         if (!indio_dev->info)
63                 return true;
64
65         /* drain the buffer if it was disabled */
66         if (!iio_buffer_is_active(buf)) {
67                 to_wait = min_t(size_t, to_wait, 1);
68                 to_flush = 0;
69         }
70
71         avail = iio_buffer_data_available(buf);
72
73         if (avail >= to_wait) {
74                 /* force a flush for non-blocking reads */
75                 if (!to_wait && avail < to_flush)
76                         iio_buffer_flush_hwfifo(indio_dev, buf,
77                                                 to_flush - avail);
78                 return true;
79         }
80
81         if (to_flush)
82                 flushed = iio_buffer_flush_hwfifo(indio_dev, buf,
83                                                   to_wait - avail);
84         if (flushed <= 0)
85                 return false;
86
87         if (avail + flushed >= to_wait)
88                 return true;
89
90         return false;
91 }
92
93 /**
94  * iio_buffer_read() - chrdev read for buffer access
95  * @filp:       File structure pointer for the char device
96  * @buf:        Destination buffer for iio buffer read
97  * @n:          First n bytes to read
98  * @f_ps:       Long offset provided by the user as a seek position
99  *
100  * This function relies on all buffer implementations having an
101  * iio_buffer as their first element.
102  *
103  * Return: negative values corresponding to error codes or ret != 0
104  *         for ending the reading activity
105  **/
106 static ssize_t iio_buffer_read(struct file *filp, char __user *buf,
107                                size_t n, loff_t *f_ps)
108 {
109         struct iio_dev_buffer_pair *ib = filp->private_data;
110         struct iio_buffer *rb = ib->buffer;
111         struct iio_dev *indio_dev = ib->indio_dev;
112         DEFINE_WAIT_FUNC(wait, woken_wake_function);
113         size_t datum_size;
114         size_t to_wait;
115         int ret = 0;
116
117         if (!indio_dev->info)
118                 return -ENODEV;
119
120         if (!rb || !rb->access->read)
121                 return -EINVAL;
122
123         if (rb->direction != IIO_BUFFER_DIRECTION_IN)
124                 return -EPERM;
125
126         datum_size = rb->bytes_per_datum;
127
128         /*
129          * If datum_size is 0 there will never be anything to read from the
130          * buffer, so signal end of file now.
131          */
132         if (!datum_size)
133                 return 0;
134
135         if (filp->f_flags & O_NONBLOCK)
136                 to_wait = 0;
137         else
138                 to_wait = min_t(size_t, n / datum_size, rb->watermark);
139
140         add_wait_queue(&rb->pollq, &wait);
141         do {
142                 if (!indio_dev->info) {
143                         ret = -ENODEV;
144                         break;
145                 }
146
147                 if (!iio_buffer_ready(indio_dev, rb, to_wait, n / datum_size)) {
148                         if (signal_pending(current)) {
149                                 ret = -ERESTARTSYS;
150                                 break;
151                         }
152
153                         wait_woken(&wait, TASK_INTERRUPTIBLE,
154                                    MAX_SCHEDULE_TIMEOUT);
155                         continue;
156                 }
157
158                 ret = rb->access->read(rb, n, buf);
159                 if (ret == 0 && (filp->f_flags & O_NONBLOCK))
160                         ret = -EAGAIN;
161         } while (ret == 0);
162         remove_wait_queue(&rb->pollq, &wait);
163
164         return ret;
165 }
166
167 static size_t iio_buffer_space_available(struct iio_buffer *buf)
168 {
169         if (buf->access->space_available)
170                 return buf->access->space_available(buf);
171
172         return SIZE_MAX;
173 }
174
175 static ssize_t iio_buffer_write(struct file *filp, const char __user *buf,
176                                 size_t n, loff_t *f_ps)
177 {
178         struct iio_dev_buffer_pair *ib = filp->private_data;
179         struct iio_buffer *rb = ib->buffer;
180         struct iio_dev *indio_dev = ib->indio_dev;
181         DEFINE_WAIT_FUNC(wait, woken_wake_function);
182         int ret = 0;
183         size_t written;
184
185         if (!indio_dev->info)
186                 return -ENODEV;
187
188         if (!rb || !rb->access->write)
189                 return -EINVAL;
190
191         if (rb->direction != IIO_BUFFER_DIRECTION_OUT)
192                 return -EPERM;
193
194         written = 0;
195         add_wait_queue(&rb->pollq, &wait);
196         do {
197                 if (!indio_dev->info)
198                         return -ENODEV;
199
200                 if (!iio_buffer_space_available(rb)) {
201                         if (signal_pending(current)) {
202                                 ret = -ERESTARTSYS;
203                                 break;
204                         }
205
206                         if (filp->f_flags & O_NONBLOCK) {
207                                 if (!written)
208                                         ret = -EAGAIN;
209                                 break;
210                         }
211
212                         wait_woken(&wait, TASK_INTERRUPTIBLE,
213                                    MAX_SCHEDULE_TIMEOUT);
214                         continue;
215                 }
216
217                 ret = rb->access->write(rb, n - written, buf + written);
218                 if (ret < 0)
219                         break;
220
221                 written += ret;
222
223         } while (written != n);
224         remove_wait_queue(&rb->pollq, &wait);
225
226         return ret < 0 ? ret : written;
227 }
228
229 /**
230  * iio_buffer_poll() - poll the buffer to find out if it has data
231  * @filp:       File structure pointer for device access
232  * @wait:       Poll table structure pointer for which the driver adds
233  *              a wait queue
234  *
235  * Return: (EPOLLIN | EPOLLRDNORM) if data is available for reading
236  *         or 0 for other cases
237  */
238 static __poll_t iio_buffer_poll(struct file *filp,
239                                 struct poll_table_struct *wait)
240 {
241         struct iio_dev_buffer_pair *ib = filp->private_data;
242         struct iio_buffer *rb = ib->buffer;
243         struct iio_dev *indio_dev = ib->indio_dev;
244
245         if (!indio_dev->info || !rb)
246                 return 0;
247
248         poll_wait(filp, &rb->pollq, wait);
249
250         switch (rb->direction) {
251         case IIO_BUFFER_DIRECTION_IN:
252                 if (iio_buffer_ready(indio_dev, rb, rb->watermark, 0))
253                         return EPOLLIN | EPOLLRDNORM;
254                 break;
255         case IIO_BUFFER_DIRECTION_OUT:
256                 if (iio_buffer_space_available(rb))
257                         return EPOLLOUT | EPOLLWRNORM;
258                 break;
259         }
260
261         return 0;
262 }
263
264 ssize_t iio_buffer_read_wrapper(struct file *filp, char __user *buf,
265                                 size_t n, loff_t *f_ps)
266 {
267         struct iio_dev_buffer_pair *ib = filp->private_data;
268         struct iio_buffer *rb = ib->buffer;
269
270         /* check if buffer was opened through new API */
271         if (test_bit(IIO_BUSY_BIT_POS, &rb->flags))
272                 return -EBUSY;
273
274         return iio_buffer_read(filp, buf, n, f_ps);
275 }
276
277 ssize_t iio_buffer_write_wrapper(struct file *filp, const char __user *buf,
278                                  size_t n, loff_t *f_ps)
279 {
280         struct iio_dev_buffer_pair *ib = filp->private_data;
281         struct iio_buffer *rb = ib->buffer;
282
283         /* check if buffer was opened through new API */
284         if (test_bit(IIO_BUSY_BIT_POS, &rb->flags))
285                 return -EBUSY;
286
287         return iio_buffer_write(filp, buf, n, f_ps);
288 }
289
290 __poll_t iio_buffer_poll_wrapper(struct file *filp,
291                                  struct poll_table_struct *wait)
292 {
293         struct iio_dev_buffer_pair *ib = filp->private_data;
294         struct iio_buffer *rb = ib->buffer;
295
296         /* check if buffer was opened through new API */
297         if (test_bit(IIO_BUSY_BIT_POS, &rb->flags))
298                 return 0;
299
300         return iio_buffer_poll(filp, wait);
301 }
302
303 /**
304  * iio_buffer_wakeup_poll - Wakes up the buffer waitqueue
305  * @indio_dev: The IIO device
306  *
307  * Wakes up the event waitqueue used for poll(). Should usually
308  * be called when the device is unregistered.
309  */
310 void iio_buffer_wakeup_poll(struct iio_dev *indio_dev)
311 {
312         struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
313         struct iio_buffer *buffer;
314         unsigned int i;
315
316         for (i = 0; i < iio_dev_opaque->attached_buffers_cnt; i++) {
317                 buffer = iio_dev_opaque->attached_buffers[i];
318                 wake_up(&buffer->pollq);
319         }
320 }
321
322 int iio_pop_from_buffer(struct iio_buffer *buffer, void *data)
323 {
324         if (!buffer || !buffer->access || !buffer->access->remove_from)
325                 return -EINVAL;
326
327         return buffer->access->remove_from(buffer, data);
328 }
329 EXPORT_SYMBOL_GPL(iio_pop_from_buffer);
330
331 void iio_buffer_init(struct iio_buffer *buffer)
332 {
333         INIT_LIST_HEAD(&buffer->demux_list);
334         INIT_LIST_HEAD(&buffer->buffer_list);
335         init_waitqueue_head(&buffer->pollq);
336         kref_init(&buffer->ref);
337         if (!buffer->watermark)
338                 buffer->watermark = 1;
339 }
340 EXPORT_SYMBOL(iio_buffer_init);
341
342 void iio_device_detach_buffers(struct iio_dev *indio_dev)
343 {
344         struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
345         struct iio_buffer *buffer;
346         unsigned int i;
347
348         for (i = 0; i < iio_dev_opaque->attached_buffers_cnt; i++) {
349                 buffer = iio_dev_opaque->attached_buffers[i];
350                 iio_buffer_put(buffer);
351         }
352
353         kfree(iio_dev_opaque->attached_buffers);
354 }
355
356 static ssize_t iio_show_scan_index(struct device *dev,
357                                    struct device_attribute *attr,
358                                    char *buf)
359 {
360         return sysfs_emit(buf, "%u\n", to_iio_dev_attr(attr)->c->scan_index);
361 }
362
363 static ssize_t iio_show_fixed_type(struct device *dev,
364                                    struct device_attribute *attr,
365                                    char *buf)
366 {
367         struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
368         u8 type = this_attr->c->scan_type.endianness;
369
370         if (type == IIO_CPU) {
371 #ifdef __LITTLE_ENDIAN
372                 type = IIO_LE;
373 #else
374                 type = IIO_BE;
375 #endif
376         }
377         if (this_attr->c->scan_type.repeat > 1)
378                 return sysfs_emit(buf, "%s:%c%d/%dX%d>>%u\n",
379                        iio_endian_prefix[type],
380                        this_attr->c->scan_type.sign,
381                        this_attr->c->scan_type.realbits,
382                        this_attr->c->scan_type.storagebits,
383                        this_attr->c->scan_type.repeat,
384                        this_attr->c->scan_type.shift);
385         else
386                 return sysfs_emit(buf, "%s:%c%d/%d>>%u\n",
387                        iio_endian_prefix[type],
388                        this_attr->c->scan_type.sign,
389                        this_attr->c->scan_type.realbits,
390                        this_attr->c->scan_type.storagebits,
391                        this_attr->c->scan_type.shift);
392 }
393
394 static ssize_t iio_scan_el_show(struct device *dev,
395                                 struct device_attribute *attr,
396                                 char *buf)
397 {
398         int ret;
399         struct iio_buffer *buffer = to_iio_dev_attr(attr)->buffer;
400
401         /* Ensure ret is 0 or 1. */
402         ret = !!test_bit(to_iio_dev_attr(attr)->address,
403                        buffer->scan_mask);
404
405         return sysfs_emit(buf, "%d\n", ret);
406 }
407
408 /* Note NULL used as error indicator as it doesn't make sense. */
409 static const unsigned long *iio_scan_mask_match(const unsigned long *av_masks,
410                                                 unsigned int masklength,
411                                                 const unsigned long *mask,
412                                                 bool strict)
413 {
414         if (bitmap_empty(mask, masklength))
415                 return NULL;
416         while (*av_masks) {
417                 if (strict) {
418                         if (bitmap_equal(mask, av_masks, masklength))
419                                 return av_masks;
420                 } else {
421                         if (bitmap_subset(mask, av_masks, masklength))
422                                 return av_masks;
423                 }
424                 av_masks += BITS_TO_LONGS(masklength);
425         }
426         return NULL;
427 }
428
429 static bool iio_validate_scan_mask(struct iio_dev *indio_dev,
430                                    const unsigned long *mask)
431 {
432         if (!indio_dev->setup_ops->validate_scan_mask)
433                 return true;
434
435         return indio_dev->setup_ops->validate_scan_mask(indio_dev, mask);
436 }
437
438 /**
439  * iio_scan_mask_set() - set particular bit in the scan mask
440  * @indio_dev: the iio device
441  * @buffer: the buffer whose scan mask we are interested in
442  * @bit: the bit to be set.
443  *
444  * Note that at this point we have no way of knowing what other
445  * buffers might request, hence this code only verifies that the
446  * individual buffers request is plausible.
447  */
448 static int iio_scan_mask_set(struct iio_dev *indio_dev,
449                              struct iio_buffer *buffer, int bit)
450 {
451         const unsigned long *mask;
452         unsigned long *trialmask;
453
454         if (!indio_dev->masklength) {
455                 WARN(1, "Trying to set scanmask prior to registering buffer\n");
456                 return -EINVAL;
457         }
458
459         trialmask = bitmap_alloc(indio_dev->masklength, GFP_KERNEL);
460         if (!trialmask)
461                 return -ENOMEM;
462         bitmap_copy(trialmask, buffer->scan_mask, indio_dev->masklength);
463         set_bit(bit, trialmask);
464
465         if (!iio_validate_scan_mask(indio_dev, trialmask))
466                 goto err_invalid_mask;
467
468         if (indio_dev->available_scan_masks) {
469                 mask = iio_scan_mask_match(indio_dev->available_scan_masks,
470                                            indio_dev->masklength,
471                                            trialmask, false);
472                 if (!mask)
473                         goto err_invalid_mask;
474         }
475         bitmap_copy(buffer->scan_mask, trialmask, indio_dev->masklength);
476
477         bitmap_free(trialmask);
478
479         return 0;
480
481 err_invalid_mask:
482         bitmap_free(trialmask);
483         return -EINVAL;
484 }
485
486 static int iio_scan_mask_clear(struct iio_buffer *buffer, int bit)
487 {
488         clear_bit(bit, buffer->scan_mask);
489         return 0;
490 }
491
492 static int iio_scan_mask_query(struct iio_dev *indio_dev,
493                                struct iio_buffer *buffer, int bit)
494 {
495         if (bit > indio_dev->masklength)
496                 return -EINVAL;
497
498         if (!buffer->scan_mask)
499                 return 0;
500
501         /* Ensure return value is 0 or 1. */
502         return !!test_bit(bit, buffer->scan_mask);
503 };
504
505 static ssize_t iio_scan_el_store(struct device *dev,
506                                  struct device_attribute *attr,
507                                  const char *buf,
508                                  size_t len)
509 {
510         int ret;
511         bool state;
512         struct iio_dev *indio_dev = dev_to_iio_dev(dev);
513         struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
514         struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
515         struct iio_buffer *buffer = this_attr->buffer;
516
517         ret = kstrtobool(buf, &state);
518         if (ret < 0)
519                 return ret;
520         mutex_lock(&iio_dev_opaque->mlock);
521         if (iio_buffer_is_active(buffer)) {
522                 ret = -EBUSY;
523                 goto error_ret;
524         }
525         ret = iio_scan_mask_query(indio_dev, buffer, this_attr->address);
526         if (ret < 0)
527                 goto error_ret;
528         if (!state && ret) {
529                 ret = iio_scan_mask_clear(buffer, this_attr->address);
530                 if (ret)
531                         goto error_ret;
532         } else if (state && !ret) {
533                 ret = iio_scan_mask_set(indio_dev, buffer, this_attr->address);
534                 if (ret)
535                         goto error_ret;
536         }
537
538 error_ret:
539         mutex_unlock(&iio_dev_opaque->mlock);
540
541         return ret < 0 ? ret : len;
542 }
543
544 static ssize_t iio_scan_el_ts_show(struct device *dev,
545                                    struct device_attribute *attr,
546                                    char *buf)
547 {
548         struct iio_buffer *buffer = to_iio_dev_attr(attr)->buffer;
549
550         return sysfs_emit(buf, "%d\n", buffer->scan_timestamp);
551 }
552
553 static ssize_t iio_scan_el_ts_store(struct device *dev,
554                                     struct device_attribute *attr,
555                                     const char *buf,
556                                     size_t len)
557 {
558         int ret;
559         struct iio_dev *indio_dev = dev_to_iio_dev(dev);
560         struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
561         struct iio_buffer *buffer = to_iio_dev_attr(attr)->buffer;
562         bool state;
563
564         ret = kstrtobool(buf, &state);
565         if (ret < 0)
566                 return ret;
567
568         mutex_lock(&iio_dev_opaque->mlock);
569         if (iio_buffer_is_active(buffer)) {
570                 ret = -EBUSY;
571                 goto error_ret;
572         }
573         buffer->scan_timestamp = state;
574 error_ret:
575         mutex_unlock(&iio_dev_opaque->mlock);
576
577         return ret ? ret : len;
578 }
579
580 static int iio_buffer_add_channel_sysfs(struct iio_dev *indio_dev,
581                                         struct iio_buffer *buffer,
582                                         const struct iio_chan_spec *chan)
583 {
584         int ret, attrcount = 0;
585
586         ret = __iio_add_chan_devattr("index",
587                                      chan,
588                                      &iio_show_scan_index,
589                                      NULL,
590                                      0,
591                                      IIO_SEPARATE,
592                                      &indio_dev->dev,
593                                      buffer,
594                                      &buffer->buffer_attr_list);
595         if (ret)
596                 return ret;
597         attrcount++;
598         ret = __iio_add_chan_devattr("type",
599                                      chan,
600                                      &iio_show_fixed_type,
601                                      NULL,
602                                      0,
603                                      0,
604                                      &indio_dev->dev,
605                                      buffer,
606                                      &buffer->buffer_attr_list);
607         if (ret)
608                 return ret;
609         attrcount++;
610         if (chan->type != IIO_TIMESTAMP)
611                 ret = __iio_add_chan_devattr("en",
612                                              chan,
613                                              &iio_scan_el_show,
614                                              &iio_scan_el_store,
615                                              chan->scan_index,
616                                              0,
617                                              &indio_dev->dev,
618                                              buffer,
619                                              &buffer->buffer_attr_list);
620         else
621                 ret = __iio_add_chan_devattr("en",
622                                              chan,
623                                              &iio_scan_el_ts_show,
624                                              &iio_scan_el_ts_store,
625                                              chan->scan_index,
626                                              0,
627                                              &indio_dev->dev,
628                                              buffer,
629                                              &buffer->buffer_attr_list);
630         if (ret)
631                 return ret;
632         attrcount++;
633         ret = attrcount;
634         return ret;
635 }
636
637 static ssize_t length_show(struct device *dev, struct device_attribute *attr,
638                            char *buf)
639 {
640         struct iio_buffer *buffer = to_iio_dev_attr(attr)->buffer;
641
642         return sysfs_emit(buf, "%d\n", buffer->length);
643 }
644
645 static ssize_t length_store(struct device *dev, struct device_attribute *attr,
646                             const char *buf, size_t len)
647 {
648         struct iio_dev *indio_dev = dev_to_iio_dev(dev);
649         struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
650         struct iio_buffer *buffer = to_iio_dev_attr(attr)->buffer;
651         unsigned int val;
652         int ret;
653
654         ret = kstrtouint(buf, 10, &val);
655         if (ret)
656                 return ret;
657
658         if (val == buffer->length)
659                 return len;
660
661         mutex_lock(&iio_dev_opaque->mlock);
662         if (iio_buffer_is_active(buffer)) {
663                 ret = -EBUSY;
664         } else {
665                 buffer->access->set_length(buffer, val);
666                 ret = 0;
667         }
668         if (ret)
669                 goto out;
670         if (buffer->length && buffer->length < buffer->watermark)
671                 buffer->watermark = buffer->length;
672 out:
673         mutex_unlock(&iio_dev_opaque->mlock);
674
675         return ret ? ret : len;
676 }
677
678 static ssize_t enable_show(struct device *dev, struct device_attribute *attr,
679                            char *buf)
680 {
681         struct iio_buffer *buffer = to_iio_dev_attr(attr)->buffer;
682
683         return sysfs_emit(buf, "%d\n", iio_buffer_is_active(buffer));
684 }
685
686 static unsigned int iio_storage_bytes_for_si(struct iio_dev *indio_dev,
687                                              unsigned int scan_index)
688 {
689         const struct iio_chan_spec *ch;
690         unsigned int bytes;
691
692         ch = iio_find_channel_from_si(indio_dev, scan_index);
693         bytes = ch->scan_type.storagebits / 8;
694         if (ch->scan_type.repeat > 1)
695                 bytes *= ch->scan_type.repeat;
696         return bytes;
697 }
698
699 static unsigned int iio_storage_bytes_for_timestamp(struct iio_dev *indio_dev)
700 {
701         struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
702
703         return iio_storage_bytes_for_si(indio_dev,
704                                         iio_dev_opaque->scan_index_timestamp);
705 }
706
707 static int iio_compute_scan_bytes(struct iio_dev *indio_dev,
708                                   const unsigned long *mask, bool timestamp)
709 {
710         unsigned int bytes = 0;
711         int length, i, largest = 0;
712
713         /* How much space will the demuxed element take? */
714         for_each_set_bit(i, mask,
715                          indio_dev->masklength) {
716                 length = iio_storage_bytes_for_si(indio_dev, i);
717                 bytes = ALIGN(bytes, length);
718                 bytes += length;
719                 largest = max(largest, length);
720         }
721
722         if (timestamp) {
723                 length = iio_storage_bytes_for_timestamp(indio_dev);
724                 bytes = ALIGN(bytes, length);
725                 bytes += length;
726                 largest = max(largest, length);
727         }
728
729         bytes = ALIGN(bytes, largest);
730         return bytes;
731 }
732
733 static void iio_buffer_activate(struct iio_dev *indio_dev,
734                                 struct iio_buffer *buffer)
735 {
736         struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
737
738         iio_buffer_get(buffer);
739         list_add(&buffer->buffer_list, &iio_dev_opaque->buffer_list);
740 }
741
742 static void iio_buffer_deactivate(struct iio_buffer *buffer)
743 {
744         list_del_init(&buffer->buffer_list);
745         wake_up_interruptible(&buffer->pollq);
746         iio_buffer_put(buffer);
747 }
748
749 static void iio_buffer_deactivate_all(struct iio_dev *indio_dev)
750 {
751         struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
752         struct iio_buffer *buffer, *_buffer;
753
754         list_for_each_entry_safe(buffer, _buffer,
755                                  &iio_dev_opaque->buffer_list, buffer_list)
756                 iio_buffer_deactivate(buffer);
757 }
758
759 static int iio_buffer_enable(struct iio_buffer *buffer,
760                              struct iio_dev *indio_dev)
761 {
762         if (!buffer->access->enable)
763                 return 0;
764         return buffer->access->enable(buffer, indio_dev);
765 }
766
767 static int iio_buffer_disable(struct iio_buffer *buffer,
768                               struct iio_dev *indio_dev)
769 {
770         if (!buffer->access->disable)
771                 return 0;
772         return buffer->access->disable(buffer, indio_dev);
773 }
774
775 static void iio_buffer_update_bytes_per_datum(struct iio_dev *indio_dev,
776                                               struct iio_buffer *buffer)
777 {
778         unsigned int bytes;
779
780         if (!buffer->access->set_bytes_per_datum)
781                 return;
782
783         bytes = iio_compute_scan_bytes(indio_dev, buffer->scan_mask,
784                                        buffer->scan_timestamp);
785
786         buffer->access->set_bytes_per_datum(buffer, bytes);
787 }
788
789 static int iio_buffer_request_update(struct iio_dev *indio_dev,
790                                      struct iio_buffer *buffer)
791 {
792         int ret;
793
794         iio_buffer_update_bytes_per_datum(indio_dev, buffer);
795         if (buffer->access->request_update) {
796                 ret = buffer->access->request_update(buffer);
797                 if (ret) {
798                         dev_dbg(&indio_dev->dev,
799                                 "Buffer not started: buffer parameter update failed (%d)\n",
800                                 ret);
801                         return ret;
802                 }
803         }
804
805         return 0;
806 }
807
808 static void iio_free_scan_mask(struct iio_dev *indio_dev,
809                                const unsigned long *mask)
810 {
811         /* If the mask is dynamically allocated free it, otherwise do nothing */
812         if (!indio_dev->available_scan_masks)
813                 bitmap_free(mask);
814 }
815
816 struct iio_device_config {
817         unsigned int mode;
818         unsigned int watermark;
819         const unsigned long *scan_mask;
820         unsigned int scan_bytes;
821         bool scan_timestamp;
822 };
823
824 static int iio_verify_update(struct iio_dev *indio_dev,
825                              struct iio_buffer *insert_buffer,
826                              struct iio_buffer *remove_buffer,
827                              struct iio_device_config *config)
828 {
829         struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
830         unsigned long *compound_mask;
831         const unsigned long *scan_mask;
832         bool strict_scanmask = false;
833         struct iio_buffer *buffer;
834         bool scan_timestamp;
835         unsigned int modes;
836
837         if (insert_buffer &&
838             bitmap_empty(insert_buffer->scan_mask, indio_dev->masklength)) {
839                 dev_dbg(&indio_dev->dev,
840                         "At least one scan element must be enabled first\n");
841                 return -EINVAL;
842         }
843
844         memset(config, 0, sizeof(*config));
845         config->watermark = ~0;
846
847         /*
848          * If there is just one buffer and we are removing it there is nothing
849          * to verify.
850          */
851         if (remove_buffer && !insert_buffer &&
852             list_is_singular(&iio_dev_opaque->buffer_list))
853                 return 0;
854
855         modes = indio_dev->modes;
856
857         list_for_each_entry(buffer, &iio_dev_opaque->buffer_list, buffer_list) {
858                 if (buffer == remove_buffer)
859                         continue;
860                 modes &= buffer->access->modes;
861                 config->watermark = min(config->watermark, buffer->watermark);
862         }
863
864         if (insert_buffer) {
865                 modes &= insert_buffer->access->modes;
866                 config->watermark = min(config->watermark,
867                                         insert_buffer->watermark);
868         }
869
870         /* Definitely possible for devices to support both of these. */
871         if ((modes & INDIO_BUFFER_TRIGGERED) && indio_dev->trig) {
872                 config->mode = INDIO_BUFFER_TRIGGERED;
873         } else if (modes & INDIO_BUFFER_HARDWARE) {
874                 /*
875                  * Keep things simple for now and only allow a single buffer to
876                  * be connected in hardware mode.
877                  */
878                 if (insert_buffer && !list_empty(&iio_dev_opaque->buffer_list))
879                         return -EINVAL;
880                 config->mode = INDIO_BUFFER_HARDWARE;
881                 strict_scanmask = true;
882         } else if (modes & INDIO_BUFFER_SOFTWARE) {
883                 config->mode = INDIO_BUFFER_SOFTWARE;
884         } else {
885                 /* Can only occur on first buffer */
886                 if (indio_dev->modes & INDIO_BUFFER_TRIGGERED)
887                         dev_dbg(&indio_dev->dev, "Buffer not started: no trigger\n");
888                 return -EINVAL;
889         }
890
891         /* What scan mask do we actually have? */
892         compound_mask = bitmap_zalloc(indio_dev->masklength, GFP_KERNEL);
893         if (!compound_mask)
894                 return -ENOMEM;
895
896         scan_timestamp = false;
897
898         list_for_each_entry(buffer, &iio_dev_opaque->buffer_list, buffer_list) {
899                 if (buffer == remove_buffer)
900                         continue;
901                 bitmap_or(compound_mask, compound_mask, buffer->scan_mask,
902                           indio_dev->masklength);
903                 scan_timestamp |= buffer->scan_timestamp;
904         }
905
906         if (insert_buffer) {
907                 bitmap_or(compound_mask, compound_mask,
908                           insert_buffer->scan_mask, indio_dev->masklength);
909                 scan_timestamp |= insert_buffer->scan_timestamp;
910         }
911
912         if (indio_dev->available_scan_masks) {
913                 scan_mask = iio_scan_mask_match(indio_dev->available_scan_masks,
914                                                 indio_dev->masklength,
915                                                 compound_mask,
916                                                 strict_scanmask);
917                 bitmap_free(compound_mask);
918                 if (!scan_mask)
919                         return -EINVAL;
920         } else {
921                 scan_mask = compound_mask;
922         }
923
924         config->scan_bytes = iio_compute_scan_bytes(indio_dev,
925                                                     scan_mask, scan_timestamp);
926         config->scan_mask = scan_mask;
927         config->scan_timestamp = scan_timestamp;
928
929         return 0;
930 }
931
932 /**
933  * struct iio_demux_table - table describing demux memcpy ops
934  * @from:       index to copy from
935  * @to:         index to copy to
936  * @length:     how many bytes to copy
937  * @l:          list head used for management
938  */
939 struct iio_demux_table {
940         unsigned int from;
941         unsigned int to;
942         unsigned int length;
943         struct list_head l;
944 };
945
946 static void iio_buffer_demux_free(struct iio_buffer *buffer)
947 {
948         struct iio_demux_table *p, *q;
949
950         list_for_each_entry_safe(p, q, &buffer->demux_list, l) {
951                 list_del(&p->l);
952                 kfree(p);
953         }
954 }
955
956 static int iio_buffer_add_demux(struct iio_buffer *buffer,
957                                 struct iio_demux_table **p, unsigned int in_loc,
958                                 unsigned int out_loc,
959                                 unsigned int length)
960 {
961         if (*p && (*p)->from + (*p)->length == in_loc &&
962             (*p)->to + (*p)->length == out_loc) {
963                 (*p)->length += length;
964         } else {
965                 *p = kmalloc(sizeof(**p), GFP_KERNEL);
966                 if (!(*p))
967                         return -ENOMEM;
968                 (*p)->from = in_loc;
969                 (*p)->to = out_loc;
970                 (*p)->length = length;
971                 list_add_tail(&(*p)->l, &buffer->demux_list);
972         }
973
974         return 0;
975 }
976
977 static int iio_buffer_update_demux(struct iio_dev *indio_dev,
978                                    struct iio_buffer *buffer)
979 {
980         int ret, in_ind = -1, out_ind, length;
981         unsigned int in_loc = 0, out_loc = 0;
982         struct iio_demux_table *p = NULL;
983
984         /* Clear out any old demux */
985         iio_buffer_demux_free(buffer);
986         kfree(buffer->demux_bounce);
987         buffer->demux_bounce = NULL;
988
989         /* First work out which scan mode we will actually have */
990         if (bitmap_equal(indio_dev->active_scan_mask,
991                          buffer->scan_mask,
992                          indio_dev->masklength))
993                 return 0;
994
995         /* Now we have the two masks, work from least sig and build up sizes */
996         for_each_set_bit(out_ind,
997                          buffer->scan_mask,
998                          indio_dev->masklength) {
999                 in_ind = find_next_bit(indio_dev->active_scan_mask,
1000                                        indio_dev->masklength,
1001                                        in_ind + 1);
1002                 while (in_ind != out_ind) {
1003                         length = iio_storage_bytes_for_si(indio_dev, in_ind);
1004                         /* Make sure we are aligned */
1005                         in_loc = roundup(in_loc, length) + length;
1006                         in_ind = find_next_bit(indio_dev->active_scan_mask,
1007                                                indio_dev->masklength,
1008                                                in_ind + 1);
1009                 }
1010                 length = iio_storage_bytes_for_si(indio_dev, in_ind);
1011                 out_loc = roundup(out_loc, length);
1012                 in_loc = roundup(in_loc, length);
1013                 ret = iio_buffer_add_demux(buffer, &p, in_loc, out_loc, length);
1014                 if (ret)
1015                         goto error_clear_mux_table;
1016                 out_loc += length;
1017                 in_loc += length;
1018         }
1019         /* Relies on scan_timestamp being last */
1020         if (buffer->scan_timestamp) {
1021                 length = iio_storage_bytes_for_timestamp(indio_dev);
1022                 out_loc = roundup(out_loc, length);
1023                 in_loc = roundup(in_loc, length);
1024                 ret = iio_buffer_add_demux(buffer, &p, in_loc, out_loc, length);
1025                 if (ret)
1026                         goto error_clear_mux_table;
1027                 out_loc += length;
1028         }
1029         buffer->demux_bounce = kzalloc(out_loc, GFP_KERNEL);
1030         if (!buffer->demux_bounce) {
1031                 ret = -ENOMEM;
1032                 goto error_clear_mux_table;
1033         }
1034         return 0;
1035
1036 error_clear_mux_table:
1037         iio_buffer_demux_free(buffer);
1038
1039         return ret;
1040 }
1041
1042 static int iio_update_demux(struct iio_dev *indio_dev)
1043 {
1044         struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
1045         struct iio_buffer *buffer;
1046         int ret;
1047
1048         list_for_each_entry(buffer, &iio_dev_opaque->buffer_list, buffer_list) {
1049                 ret = iio_buffer_update_demux(indio_dev, buffer);
1050                 if (ret < 0)
1051                         goto error_clear_mux_table;
1052         }
1053         return 0;
1054
1055 error_clear_mux_table:
1056         list_for_each_entry(buffer, &iio_dev_opaque->buffer_list, buffer_list)
1057                 iio_buffer_demux_free(buffer);
1058
1059         return ret;
1060 }
1061
1062 static int iio_enable_buffers(struct iio_dev *indio_dev,
1063                               struct iio_device_config *config)
1064 {
1065         struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
1066         struct iio_buffer *buffer, *tmp = NULL;
1067         int ret;
1068
1069         indio_dev->active_scan_mask = config->scan_mask;
1070         indio_dev->scan_timestamp = config->scan_timestamp;
1071         indio_dev->scan_bytes = config->scan_bytes;
1072         iio_dev_opaque->currentmode = config->mode;
1073
1074         iio_update_demux(indio_dev);
1075
1076         /* Wind up again */
1077         if (indio_dev->setup_ops->preenable) {
1078                 ret = indio_dev->setup_ops->preenable(indio_dev);
1079                 if (ret) {
1080                         dev_dbg(&indio_dev->dev,
1081                                 "Buffer not started: buffer preenable failed (%d)\n", ret);
1082                         goto err_undo_config;
1083                 }
1084         }
1085
1086         if (indio_dev->info->update_scan_mode) {
1087                 ret = indio_dev->info
1088                         ->update_scan_mode(indio_dev,
1089                                            indio_dev->active_scan_mask);
1090                 if (ret < 0) {
1091                         dev_dbg(&indio_dev->dev,
1092                                 "Buffer not started: update scan mode failed (%d)\n",
1093                                 ret);
1094                         goto err_run_postdisable;
1095                 }
1096         }
1097
1098         if (indio_dev->info->hwfifo_set_watermark)
1099                 indio_dev->info->hwfifo_set_watermark(indio_dev,
1100                         config->watermark);
1101
1102         list_for_each_entry(buffer, &iio_dev_opaque->buffer_list, buffer_list) {
1103                 ret = iio_buffer_enable(buffer, indio_dev);
1104                 if (ret) {
1105                         tmp = buffer;
1106                         goto err_disable_buffers;
1107                 }
1108         }
1109
1110         if (iio_dev_opaque->currentmode == INDIO_BUFFER_TRIGGERED) {
1111                 ret = iio_trigger_attach_poll_func(indio_dev->trig,
1112                                                    indio_dev->pollfunc);
1113                 if (ret)
1114                         goto err_disable_buffers;
1115         }
1116
1117         if (indio_dev->setup_ops->postenable) {
1118                 ret = indio_dev->setup_ops->postenable(indio_dev);
1119                 if (ret) {
1120                         dev_dbg(&indio_dev->dev,
1121                                 "Buffer not started: postenable failed (%d)\n", ret);
1122                         goto err_detach_pollfunc;
1123                 }
1124         }
1125
1126         return 0;
1127
1128 err_detach_pollfunc:
1129         if (iio_dev_opaque->currentmode == INDIO_BUFFER_TRIGGERED) {
1130                 iio_trigger_detach_poll_func(indio_dev->trig,
1131                                              indio_dev->pollfunc);
1132         }
1133 err_disable_buffers:
1134         buffer = list_prepare_entry(tmp, &iio_dev_opaque->buffer_list, buffer_list);
1135         list_for_each_entry_continue_reverse(buffer, &iio_dev_opaque->buffer_list,
1136                                              buffer_list)
1137                 iio_buffer_disable(buffer, indio_dev);
1138 err_run_postdisable:
1139         if (indio_dev->setup_ops->postdisable)
1140                 indio_dev->setup_ops->postdisable(indio_dev);
1141 err_undo_config:
1142         iio_dev_opaque->currentmode = INDIO_DIRECT_MODE;
1143         indio_dev->active_scan_mask = NULL;
1144
1145         return ret;
1146 }
1147
1148 static int iio_disable_buffers(struct iio_dev *indio_dev)
1149 {
1150         struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
1151         struct iio_buffer *buffer;
1152         int ret = 0;
1153         int ret2;
1154
1155         /* Wind down existing buffers - iff there are any */
1156         if (list_empty(&iio_dev_opaque->buffer_list))
1157                 return 0;
1158
1159         /*
1160          * If things go wrong at some step in disable we still need to continue
1161          * to perform the other steps, otherwise we leave the device in a
1162          * inconsistent state. We return the error code for the first error we
1163          * encountered.
1164          */
1165
1166         if (indio_dev->setup_ops->predisable) {
1167                 ret2 = indio_dev->setup_ops->predisable(indio_dev);
1168                 if (ret2 && !ret)
1169                         ret = ret2;
1170         }
1171
1172         if (iio_dev_opaque->currentmode == INDIO_BUFFER_TRIGGERED) {
1173                 iio_trigger_detach_poll_func(indio_dev->trig,
1174                                              indio_dev->pollfunc);
1175         }
1176
1177         list_for_each_entry(buffer, &iio_dev_opaque->buffer_list, buffer_list) {
1178                 ret2 = iio_buffer_disable(buffer, indio_dev);
1179                 if (ret2 && !ret)
1180                         ret = ret2;
1181         }
1182
1183         if (indio_dev->setup_ops->postdisable) {
1184                 ret2 = indio_dev->setup_ops->postdisable(indio_dev);
1185                 if (ret2 && !ret)
1186                         ret = ret2;
1187         }
1188
1189         iio_free_scan_mask(indio_dev, indio_dev->active_scan_mask);
1190         indio_dev->active_scan_mask = NULL;
1191         iio_dev_opaque->currentmode = INDIO_DIRECT_MODE;
1192
1193         return ret;
1194 }
1195
1196 static int __iio_update_buffers(struct iio_dev *indio_dev,
1197                                 struct iio_buffer *insert_buffer,
1198                                 struct iio_buffer *remove_buffer)
1199 {
1200         struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
1201         struct iio_device_config new_config;
1202         int ret;
1203
1204         ret = iio_verify_update(indio_dev, insert_buffer, remove_buffer,
1205                                 &new_config);
1206         if (ret)
1207                 return ret;
1208
1209         if (insert_buffer) {
1210                 ret = iio_buffer_request_update(indio_dev, insert_buffer);
1211                 if (ret)
1212                         goto err_free_config;
1213         }
1214
1215         ret = iio_disable_buffers(indio_dev);
1216         if (ret)
1217                 goto err_deactivate_all;
1218
1219         if (remove_buffer)
1220                 iio_buffer_deactivate(remove_buffer);
1221         if (insert_buffer)
1222                 iio_buffer_activate(indio_dev, insert_buffer);
1223
1224         /* If no buffers in list, we are done */
1225         if (list_empty(&iio_dev_opaque->buffer_list))
1226                 return 0;
1227
1228         ret = iio_enable_buffers(indio_dev, &new_config);
1229         if (ret)
1230                 goto err_deactivate_all;
1231
1232         return 0;
1233
1234 err_deactivate_all:
1235         /*
1236          * We've already verified that the config is valid earlier. If things go
1237          * wrong in either enable or disable the most likely reason is an IO
1238          * error from the device. In this case there is no good recovery
1239          * strategy. Just make sure to disable everything and leave the device
1240          * in a sane state.  With a bit of luck the device might come back to
1241          * life again later and userspace can try again.
1242          */
1243         iio_buffer_deactivate_all(indio_dev);
1244
1245 err_free_config:
1246         iio_free_scan_mask(indio_dev, new_config.scan_mask);
1247         return ret;
1248 }
1249
1250 int iio_update_buffers(struct iio_dev *indio_dev,
1251                        struct iio_buffer *insert_buffer,
1252                        struct iio_buffer *remove_buffer)
1253 {
1254         struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
1255         int ret;
1256
1257         if (insert_buffer == remove_buffer)
1258                 return 0;
1259
1260         if (insert_buffer &&
1261             insert_buffer->direction == IIO_BUFFER_DIRECTION_OUT)
1262                 return -EINVAL;
1263
1264         mutex_lock(&iio_dev_opaque->info_exist_lock);
1265         mutex_lock(&iio_dev_opaque->mlock);
1266
1267         if (insert_buffer && iio_buffer_is_active(insert_buffer))
1268                 insert_buffer = NULL;
1269
1270         if (remove_buffer && !iio_buffer_is_active(remove_buffer))
1271                 remove_buffer = NULL;
1272
1273         if (!insert_buffer && !remove_buffer) {
1274                 ret = 0;
1275                 goto out_unlock;
1276         }
1277
1278         if (!indio_dev->info) {
1279                 ret = -ENODEV;
1280                 goto out_unlock;
1281         }
1282
1283         ret = __iio_update_buffers(indio_dev, insert_buffer, remove_buffer);
1284
1285 out_unlock:
1286         mutex_unlock(&iio_dev_opaque->mlock);
1287         mutex_unlock(&iio_dev_opaque->info_exist_lock);
1288
1289         return ret;
1290 }
1291 EXPORT_SYMBOL_GPL(iio_update_buffers);
1292
1293 void iio_disable_all_buffers(struct iio_dev *indio_dev)
1294 {
1295         iio_disable_buffers(indio_dev);
1296         iio_buffer_deactivate_all(indio_dev);
1297 }
1298
1299 static ssize_t enable_store(struct device *dev, struct device_attribute *attr,
1300                             const char *buf, size_t len)
1301 {
1302         int ret;
1303         bool requested_state;
1304         struct iio_dev *indio_dev = dev_to_iio_dev(dev);
1305         struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
1306         struct iio_buffer *buffer = to_iio_dev_attr(attr)->buffer;
1307         bool inlist;
1308
1309         ret = kstrtobool(buf, &requested_state);
1310         if (ret < 0)
1311                 return ret;
1312
1313         mutex_lock(&iio_dev_opaque->mlock);
1314
1315         /* Find out if it is in the list */
1316         inlist = iio_buffer_is_active(buffer);
1317         /* Already in desired state */
1318         if (inlist == requested_state)
1319                 goto done;
1320
1321         if (requested_state)
1322                 ret = __iio_update_buffers(indio_dev, buffer, NULL);
1323         else
1324                 ret = __iio_update_buffers(indio_dev, NULL, buffer);
1325
1326 done:
1327         mutex_unlock(&iio_dev_opaque->mlock);
1328         return (ret < 0) ? ret : len;
1329 }
1330
1331 static ssize_t watermark_show(struct device *dev, struct device_attribute *attr,
1332                               char *buf)
1333 {
1334         struct iio_buffer *buffer = to_iio_dev_attr(attr)->buffer;
1335
1336         return sysfs_emit(buf, "%u\n", buffer->watermark);
1337 }
1338
1339 static ssize_t watermark_store(struct device *dev,
1340                                struct device_attribute *attr,
1341                                const char *buf, size_t len)
1342 {
1343         struct iio_dev *indio_dev = dev_to_iio_dev(dev);
1344         struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
1345         struct iio_buffer *buffer = to_iio_dev_attr(attr)->buffer;
1346         unsigned int val;
1347         int ret;
1348
1349         ret = kstrtouint(buf, 10, &val);
1350         if (ret)
1351                 return ret;
1352         if (!val)
1353                 return -EINVAL;
1354
1355         mutex_lock(&iio_dev_opaque->mlock);
1356
1357         if (val > buffer->length) {
1358                 ret = -EINVAL;
1359                 goto out;
1360         }
1361
1362         if (iio_buffer_is_active(buffer)) {
1363                 ret = -EBUSY;
1364                 goto out;
1365         }
1366
1367         buffer->watermark = val;
1368 out:
1369         mutex_unlock(&iio_dev_opaque->mlock);
1370
1371         return ret ? ret : len;
1372 }
1373
1374 static ssize_t data_available_show(struct device *dev,
1375                                    struct device_attribute *attr, char *buf)
1376 {
1377         struct iio_buffer *buffer = to_iio_dev_attr(attr)->buffer;
1378
1379         return sysfs_emit(buf, "%zu\n", iio_buffer_data_available(buffer));
1380 }
1381
1382 static ssize_t direction_show(struct device *dev,
1383                               struct device_attribute *attr,
1384                               char *buf)
1385 {
1386         struct iio_buffer *buffer = to_iio_dev_attr(attr)->buffer;
1387
1388         switch (buffer->direction) {
1389         case IIO_BUFFER_DIRECTION_IN:
1390                 return sysfs_emit(buf, "in\n");
1391         case IIO_BUFFER_DIRECTION_OUT:
1392                 return sysfs_emit(buf, "out\n");
1393         default:
1394                 return -EINVAL;
1395         }
1396 }
1397
1398 static DEVICE_ATTR_RW(length);
1399 static struct device_attribute dev_attr_length_ro = __ATTR_RO(length);
1400 static DEVICE_ATTR_RW(enable);
1401 static DEVICE_ATTR_RW(watermark);
1402 static struct device_attribute dev_attr_watermark_ro = __ATTR_RO(watermark);
1403 static DEVICE_ATTR_RO(data_available);
1404 static DEVICE_ATTR_RO(direction);
1405
1406 /*
1407  * When adding new attributes here, put the at the end, at least until
1408  * the code that handles the length/length_ro & watermark/watermark_ro
1409  * assignments gets cleaned up. Otherwise these can create some weird
1410  * duplicate attributes errors under some setups.
1411  */
1412 static struct attribute *iio_buffer_attrs[] = {
1413         &dev_attr_length.attr,
1414         &dev_attr_enable.attr,
1415         &dev_attr_watermark.attr,
1416         &dev_attr_data_available.attr,
1417         &dev_attr_direction.attr,
1418 };
1419
1420 #define to_dev_attr(_attr) container_of(_attr, struct device_attribute, attr)
1421
1422 static struct attribute *iio_buffer_wrap_attr(struct iio_buffer *buffer,
1423                                               struct attribute *attr)
1424 {
1425         struct device_attribute *dattr = to_dev_attr(attr);
1426         struct iio_dev_attr *iio_attr;
1427
1428         iio_attr = kzalloc(sizeof(*iio_attr), GFP_KERNEL);
1429         if (!iio_attr)
1430                 return NULL;
1431
1432         iio_attr->buffer = buffer;
1433         memcpy(&iio_attr->dev_attr, dattr, sizeof(iio_attr->dev_attr));
1434         iio_attr->dev_attr.attr.name = kstrdup_const(attr->name, GFP_KERNEL);
1435         if (!iio_attr->dev_attr.attr.name) {
1436                 kfree(iio_attr);
1437                 return NULL;
1438         }
1439
1440         sysfs_attr_init(&iio_attr->dev_attr.attr);
1441
1442         list_add(&iio_attr->l, &buffer->buffer_attr_list);
1443
1444         return &iio_attr->dev_attr.attr;
1445 }
1446
1447 static int iio_buffer_register_legacy_sysfs_groups(struct iio_dev *indio_dev,
1448                                                    struct attribute **buffer_attrs,
1449                                                    int buffer_attrcount,
1450                                                    int scan_el_attrcount)
1451 {
1452         struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
1453         struct attribute_group *group;
1454         struct attribute **attrs;
1455         int ret;
1456
1457         attrs = kcalloc(buffer_attrcount + 1, sizeof(*attrs), GFP_KERNEL);
1458         if (!attrs)
1459                 return -ENOMEM;
1460
1461         memcpy(attrs, buffer_attrs, buffer_attrcount * sizeof(*attrs));
1462
1463         group = &iio_dev_opaque->legacy_buffer_group;
1464         group->attrs = attrs;
1465         group->name = "buffer";
1466
1467         ret = iio_device_register_sysfs_group(indio_dev, group);
1468         if (ret)
1469                 goto error_free_buffer_attrs;
1470
1471         attrs = kcalloc(scan_el_attrcount + 1, sizeof(*attrs), GFP_KERNEL);
1472         if (!attrs) {
1473                 ret = -ENOMEM;
1474                 goto error_free_buffer_attrs;
1475         }
1476
1477         memcpy(attrs, &buffer_attrs[buffer_attrcount],
1478                scan_el_attrcount * sizeof(*attrs));
1479
1480         group = &iio_dev_opaque->legacy_scan_el_group;
1481         group->attrs = attrs;
1482         group->name = "scan_elements";
1483
1484         ret = iio_device_register_sysfs_group(indio_dev, group);
1485         if (ret)
1486                 goto error_free_scan_el_attrs;
1487
1488         return 0;
1489
1490 error_free_scan_el_attrs:
1491         kfree(iio_dev_opaque->legacy_scan_el_group.attrs);
1492 error_free_buffer_attrs:
1493         kfree(iio_dev_opaque->legacy_buffer_group.attrs);
1494
1495         return ret;
1496 }
1497
1498 static void iio_buffer_unregister_legacy_sysfs_groups(struct iio_dev *indio_dev)
1499 {
1500         struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
1501
1502         kfree(iio_dev_opaque->legacy_buffer_group.attrs);
1503         kfree(iio_dev_opaque->legacy_scan_el_group.attrs);
1504 }
1505
1506 static int iio_buffer_chrdev_release(struct inode *inode, struct file *filep)
1507 {
1508         struct iio_dev_buffer_pair *ib = filep->private_data;
1509         struct iio_dev *indio_dev = ib->indio_dev;
1510         struct iio_buffer *buffer = ib->buffer;
1511
1512         wake_up(&buffer->pollq);
1513
1514         kfree(ib);
1515         clear_bit(IIO_BUSY_BIT_POS, &buffer->flags);
1516         iio_device_put(indio_dev);
1517
1518         return 0;
1519 }
1520
1521 static const struct file_operations iio_buffer_chrdev_fileops = {
1522         .owner = THIS_MODULE,
1523         .llseek = noop_llseek,
1524         .read = iio_buffer_read,
1525         .write = iio_buffer_write,
1526         .poll = iio_buffer_poll,
1527         .release = iio_buffer_chrdev_release,
1528 };
1529
1530 static long iio_device_buffer_getfd(struct iio_dev *indio_dev, unsigned long arg)
1531 {
1532         struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
1533         int __user *ival = (int __user *)arg;
1534         struct iio_dev_buffer_pair *ib;
1535         struct iio_buffer *buffer;
1536         int fd, idx, ret;
1537
1538         if (copy_from_user(&idx, ival, sizeof(idx)))
1539                 return -EFAULT;
1540
1541         if (idx >= iio_dev_opaque->attached_buffers_cnt)
1542                 return -ENODEV;
1543
1544         iio_device_get(indio_dev);
1545
1546         buffer = iio_dev_opaque->attached_buffers[idx];
1547
1548         if (test_and_set_bit(IIO_BUSY_BIT_POS, &buffer->flags)) {
1549                 ret = -EBUSY;
1550                 goto error_iio_dev_put;
1551         }
1552
1553         ib = kzalloc(sizeof(*ib), GFP_KERNEL);
1554         if (!ib) {
1555                 ret = -ENOMEM;
1556                 goto error_clear_busy_bit;
1557         }
1558
1559         ib->indio_dev = indio_dev;
1560         ib->buffer = buffer;
1561
1562         fd = anon_inode_getfd("iio:buffer", &iio_buffer_chrdev_fileops,
1563                               ib, O_RDWR | O_CLOEXEC);
1564         if (fd < 0) {
1565                 ret = fd;
1566                 goto error_free_ib;
1567         }
1568
1569         if (copy_to_user(ival, &fd, sizeof(fd))) {
1570                 /*
1571                  * "Leak" the fd, as there's not much we can do about this
1572                  * anyway. 'fd' might have been closed already, as
1573                  * anon_inode_getfd() called fd_install() on it, which made
1574                  * it reachable by userland.
1575                  *
1576                  * Instead of allowing a malicious user to play tricks with
1577                  * us, rely on the process exit path to do any necessary
1578                  * cleanup, as in releasing the file, if still needed.
1579                  */
1580                 return -EFAULT;
1581         }
1582
1583         return 0;
1584
1585 error_free_ib:
1586         kfree(ib);
1587 error_clear_busy_bit:
1588         clear_bit(IIO_BUSY_BIT_POS, &buffer->flags);
1589 error_iio_dev_put:
1590         iio_device_put(indio_dev);
1591         return ret;
1592 }
1593
1594 static long iio_device_buffer_ioctl(struct iio_dev *indio_dev, struct file *filp,
1595                                     unsigned int cmd, unsigned long arg)
1596 {
1597         switch (cmd) {
1598         case IIO_BUFFER_GET_FD_IOCTL:
1599                 return iio_device_buffer_getfd(indio_dev, arg);
1600         default:
1601                 return IIO_IOCTL_UNHANDLED;
1602         }
1603 }
1604
1605 static int __iio_buffer_alloc_sysfs_and_mask(struct iio_buffer *buffer,
1606                                              struct iio_dev *indio_dev,
1607                                              int index)
1608 {
1609         struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
1610         struct iio_dev_attr *p;
1611         const struct iio_dev_attr *id_attr;
1612         struct attribute **attr;
1613         int ret, i, attrn, scan_el_attrcount, buffer_attrcount;
1614         const struct iio_chan_spec *channels;
1615
1616         buffer_attrcount = 0;
1617         if (buffer->attrs) {
1618                 while (buffer->attrs[buffer_attrcount])
1619                         buffer_attrcount++;
1620         }
1621         buffer_attrcount += ARRAY_SIZE(iio_buffer_attrs);
1622
1623         scan_el_attrcount = 0;
1624         INIT_LIST_HEAD(&buffer->buffer_attr_list);
1625         channels = indio_dev->channels;
1626         if (channels) {
1627                 /* new magic */
1628                 for (i = 0; i < indio_dev->num_channels; i++) {
1629                         if (channels[i].scan_index < 0)
1630                                 continue;
1631
1632                         /* Verify that sample bits fit into storage */
1633                         if (channels[i].scan_type.storagebits <
1634                             channels[i].scan_type.realbits +
1635                             channels[i].scan_type.shift) {
1636                                 dev_err(&indio_dev->dev,
1637                                         "Channel %d storagebits (%d) < shifted realbits (%d + %d)\n",
1638                                         i, channels[i].scan_type.storagebits,
1639                                         channels[i].scan_type.realbits,
1640                                         channels[i].scan_type.shift);
1641                                 ret = -EINVAL;
1642                                 goto error_cleanup_dynamic;
1643                         }
1644
1645                         ret = iio_buffer_add_channel_sysfs(indio_dev, buffer,
1646                                                            &channels[i]);
1647                         if (ret < 0)
1648                                 goto error_cleanup_dynamic;
1649                         scan_el_attrcount += ret;
1650                         if (channels[i].type == IIO_TIMESTAMP)
1651                                 iio_dev_opaque->scan_index_timestamp =
1652                                         channels[i].scan_index;
1653                 }
1654                 if (indio_dev->masklength && !buffer->scan_mask) {
1655                         buffer->scan_mask = bitmap_zalloc(indio_dev->masklength,
1656                                                           GFP_KERNEL);
1657                         if (!buffer->scan_mask) {
1658                                 ret = -ENOMEM;
1659                                 goto error_cleanup_dynamic;
1660                         }
1661                 }
1662         }
1663
1664         attrn = buffer_attrcount + scan_el_attrcount;
1665         attr = kcalloc(attrn + 1, sizeof(*attr), GFP_KERNEL);
1666         if (!attr) {
1667                 ret = -ENOMEM;
1668                 goto error_free_scan_mask;
1669         }
1670
1671         memcpy(attr, iio_buffer_attrs, sizeof(iio_buffer_attrs));
1672         if (!buffer->access->set_length)
1673                 attr[0] = &dev_attr_length_ro.attr;
1674
1675         if (buffer->access->flags & INDIO_BUFFER_FLAG_FIXED_WATERMARK)
1676                 attr[2] = &dev_attr_watermark_ro.attr;
1677
1678         if (buffer->attrs)
1679                 for (i = 0, id_attr = buffer->attrs[i];
1680                      (id_attr = buffer->attrs[i]); i++)
1681                         attr[ARRAY_SIZE(iio_buffer_attrs) + i] =
1682                                 (struct attribute *)&id_attr->dev_attr.attr;
1683
1684         buffer->buffer_group.attrs = attr;
1685
1686         for (i = 0; i < buffer_attrcount; i++) {
1687                 struct attribute *wrapped;
1688
1689                 wrapped = iio_buffer_wrap_attr(buffer, attr[i]);
1690                 if (!wrapped) {
1691                         ret = -ENOMEM;
1692                         goto error_free_buffer_attrs;
1693                 }
1694                 attr[i] = wrapped;
1695         }
1696
1697         attrn = 0;
1698         list_for_each_entry(p, &buffer->buffer_attr_list, l)
1699                 attr[attrn++] = &p->dev_attr.attr;
1700
1701         buffer->buffer_group.name = kasprintf(GFP_KERNEL, "buffer%d", index);
1702         if (!buffer->buffer_group.name) {
1703                 ret = -ENOMEM;
1704                 goto error_free_buffer_attrs;
1705         }
1706
1707         ret = iio_device_register_sysfs_group(indio_dev, &buffer->buffer_group);
1708         if (ret)
1709                 goto error_free_buffer_attr_group_name;
1710
1711         /* we only need to register the legacy groups for the first buffer */
1712         if (index > 0)
1713                 return 0;
1714
1715         ret = iio_buffer_register_legacy_sysfs_groups(indio_dev, attr,
1716                                                       buffer_attrcount,
1717                                                       scan_el_attrcount);
1718         if (ret)
1719                 goto error_free_buffer_attr_group_name;
1720
1721         return 0;
1722
1723 error_free_buffer_attr_group_name:
1724         kfree(buffer->buffer_group.name);
1725 error_free_buffer_attrs:
1726         kfree(buffer->buffer_group.attrs);
1727 error_free_scan_mask:
1728         bitmap_free(buffer->scan_mask);
1729 error_cleanup_dynamic:
1730         iio_free_chan_devattr_list(&buffer->buffer_attr_list);
1731
1732         return ret;
1733 }
1734
1735 static void __iio_buffer_free_sysfs_and_mask(struct iio_buffer *buffer,
1736                                              struct iio_dev *indio_dev,
1737                                              int index)
1738 {
1739         if (index == 0)
1740                 iio_buffer_unregister_legacy_sysfs_groups(indio_dev);
1741         bitmap_free(buffer->scan_mask);
1742         kfree(buffer->buffer_group.name);
1743         kfree(buffer->buffer_group.attrs);
1744         iio_free_chan_devattr_list(&buffer->buffer_attr_list);
1745 }
1746
1747 int iio_buffers_alloc_sysfs_and_mask(struct iio_dev *indio_dev)
1748 {
1749         struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
1750         const struct iio_chan_spec *channels;
1751         struct iio_buffer *buffer;
1752         int ret, i, idx;
1753         size_t sz;
1754
1755         channels = indio_dev->channels;
1756         if (channels) {
1757                 int ml = indio_dev->masklength;
1758
1759                 for (i = 0; i < indio_dev->num_channels; i++)
1760                         ml = max(ml, channels[i].scan_index + 1);
1761                 indio_dev->masklength = ml;
1762         }
1763
1764         if (!iio_dev_opaque->attached_buffers_cnt)
1765                 return 0;
1766
1767         for (idx = 0; idx < iio_dev_opaque->attached_buffers_cnt; idx++) {
1768                 buffer = iio_dev_opaque->attached_buffers[idx];
1769                 ret = __iio_buffer_alloc_sysfs_and_mask(buffer, indio_dev, idx);
1770                 if (ret)
1771                         goto error_unwind_sysfs_and_mask;
1772         }
1773
1774         sz = sizeof(*iio_dev_opaque->buffer_ioctl_handler);
1775         iio_dev_opaque->buffer_ioctl_handler = kzalloc(sz, GFP_KERNEL);
1776         if (!iio_dev_opaque->buffer_ioctl_handler) {
1777                 ret = -ENOMEM;
1778                 goto error_unwind_sysfs_and_mask;
1779         }
1780
1781         iio_dev_opaque->buffer_ioctl_handler->ioctl = iio_device_buffer_ioctl;
1782         iio_device_ioctl_handler_register(indio_dev,
1783                                           iio_dev_opaque->buffer_ioctl_handler);
1784
1785         return 0;
1786
1787 error_unwind_sysfs_and_mask:
1788         while (idx--) {
1789                 buffer = iio_dev_opaque->attached_buffers[idx];
1790                 __iio_buffer_free_sysfs_and_mask(buffer, indio_dev, idx);
1791         }
1792         return ret;
1793 }
1794
1795 void iio_buffers_free_sysfs_and_mask(struct iio_dev *indio_dev)
1796 {
1797         struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
1798         struct iio_buffer *buffer;
1799         int i;
1800
1801         if (!iio_dev_opaque->attached_buffers_cnt)
1802                 return;
1803
1804         iio_device_ioctl_handler_unregister(iio_dev_opaque->buffer_ioctl_handler);
1805         kfree(iio_dev_opaque->buffer_ioctl_handler);
1806
1807         for (i = iio_dev_opaque->attached_buffers_cnt - 1; i >= 0; i--) {
1808                 buffer = iio_dev_opaque->attached_buffers[i];
1809                 __iio_buffer_free_sysfs_and_mask(buffer, indio_dev, i);
1810         }
1811 }
1812
1813 /**
1814  * iio_validate_scan_mask_onehot() - Validates that exactly one channel is selected
1815  * @indio_dev: the iio device
1816  * @mask: scan mask to be checked
1817  *
1818  * Return true if exactly one bit is set in the scan mask, false otherwise. It
1819  * can be used for devices where only one channel can be active for sampling at
1820  * a time.
1821  */
1822 bool iio_validate_scan_mask_onehot(struct iio_dev *indio_dev,
1823                                    const unsigned long *mask)
1824 {
1825         return bitmap_weight(mask, indio_dev->masklength) == 1;
1826 }
1827 EXPORT_SYMBOL_GPL(iio_validate_scan_mask_onehot);
1828
1829 static const void *iio_demux(struct iio_buffer *buffer,
1830                              const void *datain)
1831 {
1832         struct iio_demux_table *t;
1833
1834         if (list_empty(&buffer->demux_list))
1835                 return datain;
1836         list_for_each_entry(t, &buffer->demux_list, l)
1837                 memcpy(buffer->demux_bounce + t->to,
1838                        datain + t->from, t->length);
1839
1840         return buffer->demux_bounce;
1841 }
1842
1843 static int iio_push_to_buffer(struct iio_buffer *buffer, const void *data)
1844 {
1845         const void *dataout = iio_demux(buffer, data);
1846         int ret;
1847
1848         ret = buffer->access->store_to(buffer, dataout);
1849         if (ret)
1850                 return ret;
1851
1852         /*
1853          * We can't just test for watermark to decide if we wake the poll queue
1854          * because read may request less samples than the watermark.
1855          */
1856         wake_up_interruptible_poll(&buffer->pollq, EPOLLIN | EPOLLRDNORM);
1857         return 0;
1858 }
1859
1860 /**
1861  * iio_push_to_buffers() - push to a registered buffer.
1862  * @indio_dev:          iio_dev structure for device.
1863  * @data:               Full scan.
1864  */
1865 int iio_push_to_buffers(struct iio_dev *indio_dev, const void *data)
1866 {
1867         struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
1868         int ret;
1869         struct iio_buffer *buf;
1870
1871         list_for_each_entry(buf, &iio_dev_opaque->buffer_list, buffer_list) {
1872                 ret = iio_push_to_buffer(buf, data);
1873                 if (ret < 0)
1874                         return ret;
1875         }
1876
1877         return 0;
1878 }
1879 EXPORT_SYMBOL_GPL(iio_push_to_buffers);
1880
1881 /**
1882  * iio_push_to_buffers_with_ts_unaligned() - push to registered buffer,
1883  *    no alignment or space requirements.
1884  * @indio_dev:          iio_dev structure for device.
1885  * @data:               channel data excluding the timestamp.
1886  * @data_sz:            size of data.
1887  * @timestamp:          timestamp for the sample data.
1888  *
1889  * This special variant of iio_push_to_buffers_with_timestamp() does
1890  * not require space for the timestamp, or 8 byte alignment of data.
1891  * It does however require an allocation on first call and additional
1892  * copies on all calls, so should be avoided if possible.
1893  */
1894 int iio_push_to_buffers_with_ts_unaligned(struct iio_dev *indio_dev,
1895                                           const void *data,
1896                                           size_t data_sz,
1897                                           int64_t timestamp)
1898 {
1899         struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
1900
1901         /*
1902          * Conservative estimate - we can always safely copy the minimum
1903          * of either the data provided or the length of the destination buffer.
1904          * This relaxed limit allows the calling drivers to be lax about
1905          * tracking the size of the data they are pushing, at the cost of
1906          * unnecessary copying of padding.
1907          */
1908         data_sz = min_t(size_t, indio_dev->scan_bytes, data_sz);
1909         if (iio_dev_opaque->bounce_buffer_size !=  indio_dev->scan_bytes) {
1910                 void *bb;
1911
1912                 bb = devm_krealloc(&indio_dev->dev,
1913                                    iio_dev_opaque->bounce_buffer,
1914                                    indio_dev->scan_bytes, GFP_KERNEL);
1915                 if (!bb)
1916                         return -ENOMEM;
1917                 iio_dev_opaque->bounce_buffer = bb;
1918                 iio_dev_opaque->bounce_buffer_size = indio_dev->scan_bytes;
1919         }
1920         memcpy(iio_dev_opaque->bounce_buffer, data, data_sz);
1921         return iio_push_to_buffers_with_timestamp(indio_dev,
1922                                                   iio_dev_opaque->bounce_buffer,
1923                                                   timestamp);
1924 }
1925 EXPORT_SYMBOL_GPL(iio_push_to_buffers_with_ts_unaligned);
1926
1927 /**
1928  * iio_buffer_release() - Free a buffer's resources
1929  * @ref: Pointer to the kref embedded in the iio_buffer struct
1930  *
1931  * This function is called when the last reference to the buffer has been
1932  * dropped. It will typically free all resources allocated by the buffer. Do not
1933  * call this function manually, always use iio_buffer_put() when done using a
1934  * buffer.
1935  */
1936 static void iio_buffer_release(struct kref *ref)
1937 {
1938         struct iio_buffer *buffer = container_of(ref, struct iio_buffer, ref);
1939
1940         buffer->access->release(buffer);
1941 }
1942
1943 /**
1944  * iio_buffer_get() - Grab a reference to the buffer
1945  * @buffer: The buffer to grab a reference for, may be NULL
1946  *
1947  * Returns the pointer to the buffer that was passed into the function.
1948  */
1949 struct iio_buffer *iio_buffer_get(struct iio_buffer *buffer)
1950 {
1951         if (buffer)
1952                 kref_get(&buffer->ref);
1953
1954         return buffer;
1955 }
1956 EXPORT_SYMBOL_GPL(iio_buffer_get);
1957
1958 /**
1959  * iio_buffer_put() - Release the reference to the buffer
1960  * @buffer: The buffer to release the reference for, may be NULL
1961  */
1962 void iio_buffer_put(struct iio_buffer *buffer)
1963 {
1964         if (buffer)
1965                 kref_put(&buffer->ref, iio_buffer_release);
1966 }
1967 EXPORT_SYMBOL_GPL(iio_buffer_put);
1968
1969 /**
1970  * iio_device_attach_buffer - Attach a buffer to a IIO device
1971  * @indio_dev: The device the buffer should be attached to
1972  * @buffer: The buffer to attach to the device
1973  *
1974  * Return 0 if successful, negative if error.
1975  *
1976  * This function attaches a buffer to a IIO device. The buffer stays attached to
1977  * the device until the device is freed. For legacy reasons, the first attached
1978  * buffer will also be assigned to 'indio_dev->buffer'.
1979  * The array allocated here, will be free'd via the iio_device_detach_buffers()
1980  * call which is handled by the iio_device_free().
1981  */
1982 int iio_device_attach_buffer(struct iio_dev *indio_dev,
1983                              struct iio_buffer *buffer)
1984 {
1985         struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
1986         struct iio_buffer **new, **old = iio_dev_opaque->attached_buffers;
1987         unsigned int cnt = iio_dev_opaque->attached_buffers_cnt;
1988
1989         cnt++;
1990
1991         new = krealloc(old, sizeof(*new) * cnt, GFP_KERNEL);
1992         if (!new)
1993                 return -ENOMEM;
1994         iio_dev_opaque->attached_buffers = new;
1995
1996         buffer = iio_buffer_get(buffer);
1997
1998         /* first buffer is legacy; attach it to the IIO device directly */
1999         if (!indio_dev->buffer)
2000                 indio_dev->buffer = buffer;
2001
2002         iio_dev_opaque->attached_buffers[cnt - 1] = buffer;
2003         iio_dev_opaque->attached_buffers_cnt = cnt;
2004
2005         return 0;
2006 }
2007 EXPORT_SYMBOL_GPL(iio_device_attach_buffer);