1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright 2013-2015 Analog Devices Inc.
4 * Author: Lars-Peter Clausen <lars@metafoo.de>
7 #include <linux/slab.h>
8 #include <linux/kernel.h>
9 #include <linux/module.h>
10 #include <linux/device.h>
11 #include <linux/workqueue.h>
12 #include <linux/mutex.h>
13 #include <linux/sched.h>
14 #include <linux/poll.h>
15 #include <linux/iio/buffer_impl.h>
16 #include <linux/iio/buffer-dma.h>
17 #include <linux/dma-mapping.h>
18 #include <linux/sizes.h>
21 * For DMA buffers the storage is sub-divided into so called blocks. Each block
22 * has its own memory buffer. The size of the block is the granularity at which
23 * memory is exchanged between the hardware and the application. Increasing the
24 * basic unit of data exchange from one sample to one block decreases the
25 * management overhead that is associated with each sample. E.g. if we say the
26 * management overhead for one exchange is x and the unit of exchange is one
27 * sample the overhead will be x for each sample. Whereas when using a block
28 * which contains n samples the overhead per sample is reduced to x/n. This
29 * allows to achieve much higher samplerates than what can be sustained with
30 * the one sample approach.
32 * Blocks are exchanged between the DMA controller and the application via the
33 * means of two queues. The incoming queue and the outgoing queue. Blocks on the
34 * incoming queue are waiting for the DMA controller to pick them up and fill
35 * them with data. Block on the outgoing queue have been filled with data and
36 * are waiting for the application to dequeue them and read the data.
38 * A block can be in one of the following states:
39 * * Owned by the application. In this state the application can read data from
41 * * On the incoming list: Blocks on the incoming list are queued up to be
42 * processed by the DMA controller.
43 * * Owned by the DMA controller: The DMA controller is processing the block
44 * and filling it with data.
45 * * On the outgoing list: Blocks on the outgoing list have been successfully
46 * processed by the DMA controller and contain data. They can be dequeued by
48 * * Dead: A block that is dead has been marked as to be freed. It might still
49 * be owned by either the application or the DMA controller at the moment.
50 * But once they are done processing it instead of going to either the
51 * incoming or outgoing queue the block will be freed.
53 * In addition to this blocks are reference counted and the memory associated
54 * with both the block structure as well as the storage memory for the block
55 * will be freed when the last reference to the block is dropped. This means a
56 * block must not be accessed without holding a reference.
58 * The iio_dma_buffer implementation provides a generic infrastructure for
59 * managing the blocks.
61 * A driver for a specific piece of hardware that has DMA capabilities need to
62 * implement the submit() callback from the iio_dma_buffer_ops structure. This
63 * callback is supposed to initiate the DMA transfer copying data from the
64 * converter to the memory region of the block. Once the DMA transfer has been
65 * completed the driver must call iio_dma_buffer_block_done() for the completed
68 * Prior to this it must set the bytes_used field of the block contains
69 * the actual number of bytes in the buffer. Typically this will be equal to the
70 * size of the block, but if the DMA hardware has certain alignment requirements
71 * for the transfer length it might choose to use less than the full size. In
72 * either case it is expected that bytes_used is a multiple of the bytes per
73 * datum, i.e. the block must not contain partial samples.
75 * The driver must call iio_dma_buffer_block_done() for each block it has
76 * received through its submit_block() callback, even if it does not actually
77 * perform a DMA transfer for the block, e.g. because the buffer was disabled
78 * before the block transfer was started. In this case it should set bytes_used
81 * In addition it is recommended that a driver implements the abort() callback.
82 * It will be called when the buffer is disabled and can be used to cancel
83 * pending and stop active transfers.
85 * The specific driver implementation should use the default callback
86 * implementations provided by this module for the iio_buffer_access_funcs
87 * struct. It may overload some callbacks with custom variants if the hardware
88 * has special requirements that are not handled by the generic functions. If a
89 * driver chooses to overload a callback it has to ensure that the generic
90 * callback is called from within the custom callback.
93 static void iio_buffer_block_release(struct kref *kref)
95 struct iio_dma_buffer_block *block = container_of(kref,
96 struct iio_dma_buffer_block, kref);
98 WARN_ON(block->state != IIO_BLOCK_STATE_DEAD);
100 dma_free_coherent(block->queue->dev, PAGE_ALIGN(block->size),
101 block->vaddr, block->phys_addr);
103 iio_buffer_put(&block->queue->buffer);
107 static void iio_buffer_block_get(struct iio_dma_buffer_block *block)
109 kref_get(&block->kref);
112 static void iio_buffer_block_put(struct iio_dma_buffer_block *block)
114 kref_put(&block->kref, iio_buffer_block_release);
118 * dma_free_coherent can sleep, hence we need to take some special care to be
119 * able to drop a reference from an atomic context.
121 static LIST_HEAD(iio_dma_buffer_dead_blocks);
122 static DEFINE_SPINLOCK(iio_dma_buffer_dead_blocks_lock);
124 static void iio_dma_buffer_cleanup_worker(struct work_struct *work)
126 struct iio_dma_buffer_block *block, *_block;
127 LIST_HEAD(block_list);
129 spin_lock_irq(&iio_dma_buffer_dead_blocks_lock);
130 list_splice_tail_init(&iio_dma_buffer_dead_blocks, &block_list);
131 spin_unlock_irq(&iio_dma_buffer_dead_blocks_lock);
133 list_for_each_entry_safe(block, _block, &block_list, head)
134 iio_buffer_block_release(&block->kref);
136 static DECLARE_WORK(iio_dma_buffer_cleanup_work, iio_dma_buffer_cleanup_worker);
138 static void iio_buffer_block_release_atomic(struct kref *kref)
140 struct iio_dma_buffer_block *block;
143 block = container_of(kref, struct iio_dma_buffer_block, kref);
145 spin_lock_irqsave(&iio_dma_buffer_dead_blocks_lock, flags);
146 list_add_tail(&block->head, &iio_dma_buffer_dead_blocks);
147 spin_unlock_irqrestore(&iio_dma_buffer_dead_blocks_lock, flags);
149 schedule_work(&iio_dma_buffer_cleanup_work);
153 * Version of iio_buffer_block_put() that can be called from atomic context
155 static void iio_buffer_block_put_atomic(struct iio_dma_buffer_block *block)
157 kref_put(&block->kref, iio_buffer_block_release_atomic);
160 static struct iio_dma_buffer_queue *iio_buffer_to_queue(struct iio_buffer *buf)
162 return container_of(buf, struct iio_dma_buffer_queue, buffer);
165 static struct iio_dma_buffer_block *iio_dma_buffer_alloc_block(
166 struct iio_dma_buffer_queue *queue, size_t size)
168 struct iio_dma_buffer_block *block;
170 block = kzalloc(sizeof(*block), GFP_KERNEL);
174 block->vaddr = dma_alloc_coherent(queue->dev, PAGE_ALIGN(size),
175 &block->phys_addr, GFP_KERNEL);
182 block->state = IIO_BLOCK_STATE_DONE;
183 block->queue = queue;
184 INIT_LIST_HEAD(&block->head);
185 kref_init(&block->kref);
187 iio_buffer_get(&queue->buffer);
192 static void _iio_dma_buffer_block_done(struct iio_dma_buffer_block *block)
194 if (block->state != IIO_BLOCK_STATE_DEAD)
195 block->state = IIO_BLOCK_STATE_DONE;
199 * iio_dma_buffer_block_done() - Indicate that a block has been completed
200 * @block: The completed block
202 * Should be called when the DMA controller has finished handling the block to
203 * pass back ownership of the block to the queue.
205 void iio_dma_buffer_block_done(struct iio_dma_buffer_block *block)
207 struct iio_dma_buffer_queue *queue = block->queue;
210 spin_lock_irqsave(&queue->list_lock, flags);
211 _iio_dma_buffer_block_done(block);
212 spin_unlock_irqrestore(&queue->list_lock, flags);
214 iio_buffer_block_put_atomic(block);
215 wake_up_interruptible_poll(&queue->buffer.pollq, EPOLLIN | EPOLLRDNORM);
217 EXPORT_SYMBOL_GPL(iio_dma_buffer_block_done);
220 * iio_dma_buffer_block_list_abort() - Indicate that a list block has been
222 * @queue: Queue for which to complete blocks.
223 * @list: List of aborted blocks. All blocks in this list must be from @queue.
225 * Typically called from the abort() callback after the DMA controller has been
226 * stopped. This will set bytes_used to 0 for each block in the list and then
227 * hand the blocks back to the queue.
229 void iio_dma_buffer_block_list_abort(struct iio_dma_buffer_queue *queue,
230 struct list_head *list)
232 struct iio_dma_buffer_block *block, *_block;
235 spin_lock_irqsave(&queue->list_lock, flags);
236 list_for_each_entry_safe(block, _block, list, head) {
237 list_del(&block->head);
238 block->bytes_used = 0;
239 _iio_dma_buffer_block_done(block);
240 iio_buffer_block_put_atomic(block);
242 spin_unlock_irqrestore(&queue->list_lock, flags);
244 wake_up_interruptible_poll(&queue->buffer.pollq, EPOLLIN | EPOLLRDNORM);
246 EXPORT_SYMBOL_GPL(iio_dma_buffer_block_list_abort);
248 static bool iio_dma_block_reusable(struct iio_dma_buffer_block *block)
251 * If the core owns the block it can be re-used. This should be the
252 * default case when enabling the buffer, unless the DMA controller does
253 * not support abort and has not given back the block yet.
255 switch (block->state) {
256 case IIO_BLOCK_STATE_QUEUED:
257 case IIO_BLOCK_STATE_DONE:
265 * iio_dma_buffer_request_update() - DMA buffer request_update callback
266 * @buffer: The buffer which to request an update
268 * Should be used as the iio_dma_buffer_request_update() callback for
269 * iio_buffer_access_ops struct for DMA buffers.
271 int iio_dma_buffer_request_update(struct iio_buffer *buffer)
273 struct iio_dma_buffer_queue *queue = iio_buffer_to_queue(buffer);
274 struct iio_dma_buffer_block *block;
275 bool try_reuse = false;
281 * Split the buffer into two even parts. This is used as a double
282 * buffering scheme with usually one block at a time being used by the
283 * DMA and the other one by the application.
285 size = DIV_ROUND_UP(queue->buffer.bytes_per_datum *
286 queue->buffer.length, 2);
288 mutex_lock(&queue->lock);
290 /* Allocations are page aligned */
291 if (PAGE_ALIGN(queue->fileio.block_size) == PAGE_ALIGN(size))
294 queue->fileio.block_size = size;
295 queue->fileio.active_block = NULL;
297 spin_lock_irq(&queue->list_lock);
298 for (i = 0; i < ARRAY_SIZE(queue->fileio.blocks); i++) {
299 block = queue->fileio.blocks[i];
301 /* If we can't re-use it free it */
302 if (block && (!iio_dma_block_reusable(block) || !try_reuse))
303 block->state = IIO_BLOCK_STATE_DEAD;
307 * At this point all blocks are either owned by the core or marked as
308 * dead. This means we can reset the lists without having to fear
311 spin_unlock_irq(&queue->list_lock);
313 INIT_LIST_HEAD(&queue->incoming);
315 for (i = 0; i < ARRAY_SIZE(queue->fileio.blocks); i++) {
316 if (queue->fileio.blocks[i]) {
317 block = queue->fileio.blocks[i];
318 if (block->state == IIO_BLOCK_STATE_DEAD) {
319 /* Could not reuse it */
320 iio_buffer_block_put(block);
330 block = iio_dma_buffer_alloc_block(queue, size);
335 queue->fileio.blocks[i] = block;
338 block->state = IIO_BLOCK_STATE_QUEUED;
339 list_add_tail(&block->head, &queue->incoming);
343 mutex_unlock(&queue->lock);
347 EXPORT_SYMBOL_GPL(iio_dma_buffer_request_update);
349 static void iio_dma_buffer_fileio_free(struct iio_dma_buffer_queue *queue)
353 spin_lock_irq(&queue->list_lock);
354 for (i = 0; i < ARRAY_SIZE(queue->fileio.blocks); i++) {
355 if (!queue->fileio.blocks[i])
357 queue->fileio.blocks[i]->state = IIO_BLOCK_STATE_DEAD;
359 spin_unlock_irq(&queue->list_lock);
361 INIT_LIST_HEAD(&queue->incoming);
363 for (i = 0; i < ARRAY_SIZE(queue->fileio.blocks); i++) {
364 if (!queue->fileio.blocks[i])
366 iio_buffer_block_put(queue->fileio.blocks[i]);
367 queue->fileio.blocks[i] = NULL;
369 queue->fileio.active_block = NULL;
372 static void iio_dma_buffer_submit_block(struct iio_dma_buffer_queue *queue,
373 struct iio_dma_buffer_block *block)
378 * If the hardware has already been removed we put the block into
379 * limbo. It will neither be on the incoming nor outgoing list, nor will
380 * it ever complete. It will just wait to be freed eventually.
385 block->state = IIO_BLOCK_STATE_ACTIVE;
386 iio_buffer_block_get(block);
387 ret = queue->ops->submit(queue, block);
390 * This is a bit of a problem and there is not much we can do
391 * other then wait for the buffer to be disabled and re-enabled
392 * and try again. But it should not really happen unless we run
393 * out of memory or something similar.
395 * TODO: Implement support in the IIO core to allow buffers to
396 * notify consumers that something went wrong and the buffer
397 * should be disabled.
399 iio_buffer_block_put(block);
404 * iio_dma_buffer_enable() - Enable DMA buffer
405 * @buffer: IIO buffer to enable
406 * @indio_dev: IIO device the buffer is attached to
408 * Needs to be called when the device that the buffer is attached to starts
409 * sampling. Typically should be the iio_buffer_access_ops enable callback.
411 * This will allocate the DMA buffers and start the DMA transfers.
413 int iio_dma_buffer_enable(struct iio_buffer *buffer,
414 struct iio_dev *indio_dev)
416 struct iio_dma_buffer_queue *queue = iio_buffer_to_queue(buffer);
417 struct iio_dma_buffer_block *block, *_block;
419 mutex_lock(&queue->lock);
420 queue->active = true;
421 list_for_each_entry_safe(block, _block, &queue->incoming, head) {
422 list_del(&block->head);
423 iio_dma_buffer_submit_block(queue, block);
425 mutex_unlock(&queue->lock);
429 EXPORT_SYMBOL_GPL(iio_dma_buffer_enable);
432 * iio_dma_buffer_disable() - Disable DMA buffer
433 * @buffer: IIO DMA buffer to disable
434 * @indio_dev: IIO device the buffer is attached to
436 * Needs to be called when the device that the buffer is attached to stops
437 * sampling. Typically should be the iio_buffer_access_ops disable callback.
439 int iio_dma_buffer_disable(struct iio_buffer *buffer,
440 struct iio_dev *indio_dev)
442 struct iio_dma_buffer_queue *queue = iio_buffer_to_queue(buffer);
444 mutex_lock(&queue->lock);
445 queue->active = false;
447 if (queue->ops && queue->ops->abort)
448 queue->ops->abort(queue);
449 mutex_unlock(&queue->lock);
453 EXPORT_SYMBOL_GPL(iio_dma_buffer_disable);
455 static void iio_dma_buffer_enqueue(struct iio_dma_buffer_queue *queue,
456 struct iio_dma_buffer_block *block)
458 if (block->state == IIO_BLOCK_STATE_DEAD) {
459 iio_buffer_block_put(block);
460 } else if (queue->active) {
461 iio_dma_buffer_submit_block(queue, block);
463 block->state = IIO_BLOCK_STATE_QUEUED;
464 list_add_tail(&block->head, &queue->incoming);
468 static struct iio_dma_buffer_block *iio_dma_buffer_dequeue(
469 struct iio_dma_buffer_queue *queue)
471 struct iio_dma_buffer_block *block;
474 spin_lock_irq(&queue->list_lock);
476 idx = queue->fileio.next_dequeue;
477 block = queue->fileio.blocks[idx];
479 if (block->state == IIO_BLOCK_STATE_DONE) {
480 idx = (idx + 1) % ARRAY_SIZE(queue->fileio.blocks);
481 queue->fileio.next_dequeue = idx;
486 spin_unlock_irq(&queue->list_lock);
492 * iio_dma_buffer_read() - DMA buffer read callback
493 * @buffer: Buffer to read form
494 * @n: Number of bytes to read
495 * @user_buffer: Userspace buffer to copy the data to
497 * Should be used as the read callback for iio_buffer_access_ops
498 * struct for DMA buffers.
500 int iio_dma_buffer_read(struct iio_buffer *buffer, size_t n,
501 char __user *user_buffer)
503 struct iio_dma_buffer_queue *queue = iio_buffer_to_queue(buffer);
504 struct iio_dma_buffer_block *block;
507 if (n < buffer->bytes_per_datum)
510 mutex_lock(&queue->lock);
512 if (!queue->fileio.active_block) {
513 block = iio_dma_buffer_dequeue(queue);
518 queue->fileio.pos = 0;
519 queue->fileio.active_block = block;
521 block = queue->fileio.active_block;
524 n = rounddown(n, buffer->bytes_per_datum);
525 if (n > block->bytes_used - queue->fileio.pos)
526 n = block->bytes_used - queue->fileio.pos;
528 if (copy_to_user(user_buffer, block->vaddr + queue->fileio.pos, n)) {
533 queue->fileio.pos += n;
535 if (queue->fileio.pos == block->bytes_used) {
536 queue->fileio.active_block = NULL;
537 iio_dma_buffer_enqueue(queue, block);
543 mutex_unlock(&queue->lock);
547 EXPORT_SYMBOL_GPL(iio_dma_buffer_read);
550 * iio_dma_buffer_data_available() - DMA buffer data_available callback
551 * @buf: Buffer to check for data availability
553 * Should be used as the data_available callback for iio_buffer_access_ops
554 * struct for DMA buffers.
556 size_t iio_dma_buffer_data_available(struct iio_buffer *buf)
558 struct iio_dma_buffer_queue *queue = iio_buffer_to_queue(buf);
559 struct iio_dma_buffer_block *block;
560 size_t data_available = 0;
564 * For counting the available bytes we'll use the size of the block not
565 * the number of actual bytes available in the block. Otherwise it is
566 * possible that we end up with a value that is lower than the watermark
567 * but won't increase since all blocks are in use.
570 mutex_lock(&queue->lock);
571 if (queue->fileio.active_block)
572 data_available += queue->fileio.active_block->size;
574 spin_lock_irq(&queue->list_lock);
576 for (i = 0; i < ARRAY_SIZE(queue->fileio.blocks); i++) {
577 block = queue->fileio.blocks[i];
579 if (block != queue->fileio.active_block
580 && block->state == IIO_BLOCK_STATE_DONE)
581 data_available += block->size;
584 spin_unlock_irq(&queue->list_lock);
585 mutex_unlock(&queue->lock);
587 return data_available;
589 EXPORT_SYMBOL_GPL(iio_dma_buffer_data_available);
592 * iio_dma_buffer_set_bytes_per_datum() - DMA buffer set_bytes_per_datum callback
593 * @buffer: Buffer to set the bytes-per-datum for
594 * @bpd: The new bytes-per-datum value
596 * Should be used as the set_bytes_per_datum callback for iio_buffer_access_ops
597 * struct for DMA buffers.
599 int iio_dma_buffer_set_bytes_per_datum(struct iio_buffer *buffer, size_t bpd)
601 buffer->bytes_per_datum = bpd;
605 EXPORT_SYMBOL_GPL(iio_dma_buffer_set_bytes_per_datum);
608 * iio_dma_buffer_set_length - DMA buffer set_length callback
609 * @buffer: Buffer to set the length for
610 * @length: The new buffer length
612 * Should be used as the set_length callback for iio_buffer_access_ops
613 * struct for DMA buffers.
615 int iio_dma_buffer_set_length(struct iio_buffer *buffer, unsigned int length)
617 /* Avoid an invalid state */
620 buffer->length = length;
621 buffer->watermark = length / 2;
625 EXPORT_SYMBOL_GPL(iio_dma_buffer_set_length);
628 * iio_dma_buffer_init() - Initialize DMA buffer queue
629 * @queue: Buffer to initialize
631 * @ops: DMA buffer queue callback operations
633 * The DMA device will be used by the queue to do DMA memory allocations. So it
634 * should refer to the device that will perform the DMA to ensure that
635 * allocations are done from a memory region that can be accessed by the device.
637 int iio_dma_buffer_init(struct iio_dma_buffer_queue *queue,
638 struct device *dev, const struct iio_dma_buffer_ops *ops)
640 iio_buffer_init(&queue->buffer);
641 queue->buffer.length = PAGE_SIZE;
642 queue->buffer.watermark = queue->buffer.length / 2;
646 INIT_LIST_HEAD(&queue->incoming);
648 mutex_init(&queue->lock);
649 spin_lock_init(&queue->list_lock);
653 EXPORT_SYMBOL_GPL(iio_dma_buffer_init);
656 * iio_dma_buffer_exit() - Cleanup DMA buffer queue
657 * @queue: Buffer to cleanup
659 * After this function has completed it is safe to free any resources that are
660 * associated with the buffer and are accessed inside the callback operations.
662 void iio_dma_buffer_exit(struct iio_dma_buffer_queue *queue)
664 mutex_lock(&queue->lock);
666 iio_dma_buffer_fileio_free(queue);
669 mutex_unlock(&queue->lock);
671 EXPORT_SYMBOL_GPL(iio_dma_buffer_exit);
674 * iio_dma_buffer_release() - Release final buffer resources
675 * @queue: Buffer to release
677 * Frees resources that can't yet be freed in iio_dma_buffer_exit(). Should be
678 * called in the buffers release callback implementation right before freeing
679 * the memory associated with the buffer.
681 void iio_dma_buffer_release(struct iio_dma_buffer_queue *queue)
683 mutex_destroy(&queue->lock);
685 EXPORT_SYMBOL_GPL(iio_dma_buffer_release);
687 MODULE_AUTHOR("Lars-Peter Clausen <lars@metafoo.de>");
688 MODULE_DESCRIPTION("DMA buffer for the IIO framework");
689 MODULE_LICENSE("GPL v2");