1 // SPDX-License-Identifier: GPL-2.0-only
3 * Framework for buffer objects that can be shared across devices/subsystems.
5 * Copyright(C) 2011 Linaro Limited. All rights reserved.
6 * Author: Sumit Semwal <sumit.semwal@ti.com>
8 * Many thanks to linaro-mm-sig list, and specially
9 * Arnd Bergmann <arnd@arndb.de>, Rob Clark <rob@ti.com> and
10 * Daniel Vetter <daniel@ffwll.ch> for their support in creation and
11 * refining of this idea.
15 #include <linux/slab.h>
16 #include <linux/dma-buf.h>
17 #include <linux/dma-fence.h>
18 #include <linux/anon_inodes.h>
19 #include <linux/export.h>
20 #include <linux/debugfs.h>
21 #include <linux/module.h>
22 #include <linux/seq_file.h>
23 #include <linux/poll.h>
24 #include <linux/dma-resv.h>
26 #include <linux/mount.h>
27 #include <linux/pseudo_fs.h>
29 #include <uapi/linux/dma-buf.h>
30 #include <uapi/linux/magic.h>
32 static inline int is_dma_buf_file(struct file *);
35 struct list_head head;
39 static struct dma_buf_list db_list;
41 static char *dmabuffs_dname(struct dentry *dentry, char *buffer, int buflen)
43 struct dma_buf *dmabuf;
44 char name[DMA_BUF_NAME_LEN];
47 dmabuf = dentry->d_fsdata;
48 spin_lock(&dmabuf->name_lock);
50 ret = strlcpy(name, dmabuf->name, DMA_BUF_NAME_LEN);
51 spin_unlock(&dmabuf->name_lock);
53 return dynamic_dname(dentry, buffer, buflen, "/%s:%s",
54 dentry->d_name.name, ret > 0 ? name : "");
57 static void dma_buf_release(struct dentry *dentry)
59 struct dma_buf *dmabuf;
61 dmabuf = dentry->d_fsdata;
62 if (unlikely(!dmabuf))
65 BUG_ON(dmabuf->vmapping_counter);
68 * Any fences that a dma-buf poll can wait on should be signaled
69 * before releasing dma-buf. This is the responsibility of each
70 * driver that uses the reservation objects.
72 * If you hit this BUG() it means someone dropped their ref to the
73 * dma-buf while still having pending operation to the buffer.
75 BUG_ON(dmabuf->cb_shared.active || dmabuf->cb_excl.active);
77 dmabuf->ops->release(dmabuf);
79 if (dmabuf->resv == (struct dma_resv *)&dmabuf[1])
80 dma_resv_fini(dmabuf->resv);
82 WARN_ON(!list_empty(&dmabuf->attachments));
83 module_put(dmabuf->owner);
88 static int dma_buf_file_release(struct inode *inode, struct file *file)
90 struct dma_buf *dmabuf;
92 if (!is_dma_buf_file(file))
95 dmabuf = file->private_data;
97 mutex_lock(&db_list.lock);
98 list_del(&dmabuf->list_node);
99 mutex_unlock(&db_list.lock);
104 static const struct dentry_operations dma_buf_dentry_ops = {
105 .d_dname = dmabuffs_dname,
106 .d_release = dma_buf_release,
109 static struct vfsmount *dma_buf_mnt;
111 static int dma_buf_fs_init_context(struct fs_context *fc)
113 struct pseudo_fs_context *ctx;
115 ctx = init_pseudo(fc, DMA_BUF_MAGIC);
118 ctx->dops = &dma_buf_dentry_ops;
122 static struct file_system_type dma_buf_fs_type = {
124 .init_fs_context = dma_buf_fs_init_context,
125 .kill_sb = kill_anon_super,
128 static int dma_buf_mmap_internal(struct file *file, struct vm_area_struct *vma)
130 struct dma_buf *dmabuf;
132 if (!is_dma_buf_file(file))
135 dmabuf = file->private_data;
137 /* check if buffer supports mmap */
138 if (!dmabuf->ops->mmap)
141 /* check for overflowing the buffer's size */
142 if (vma->vm_pgoff + vma_pages(vma) >
143 dmabuf->size >> PAGE_SHIFT)
146 return dmabuf->ops->mmap(dmabuf, vma);
149 static loff_t dma_buf_llseek(struct file *file, loff_t offset, int whence)
151 struct dma_buf *dmabuf;
154 if (!is_dma_buf_file(file))
157 dmabuf = file->private_data;
159 /* only support discovering the end of the buffer,
160 but also allow SEEK_SET to maintain the idiomatic
161 SEEK_END(0), SEEK_CUR(0) pattern */
162 if (whence == SEEK_END)
164 else if (whence == SEEK_SET)
172 return base + offset;
178 * To support cross-device and cross-driver synchronization of buffer access
179 * implicit fences (represented internally in the kernel with &struct fence) can
180 * be attached to a &dma_buf. The glue for that and a few related things are
181 * provided in the &dma_resv structure.
183 * Userspace can query the state of these implicitly tracked fences using poll()
184 * and related system calls:
186 * - Checking for EPOLLIN, i.e. read access, can be use to query the state of the
187 * most recent write or exclusive fence.
189 * - Checking for EPOLLOUT, i.e. write access, can be used to query the state of
190 * all attached fences, shared and exclusive ones.
192 * Note that this only signals the completion of the respective fences, i.e. the
193 * DMA transfers are complete. Cache flushing and any other necessary
194 * preparations before CPU access can begin still need to happen.
197 static void dma_buf_poll_cb(struct dma_fence *fence, struct dma_fence_cb *cb)
199 struct dma_buf_poll_cb_t *dcb = (struct dma_buf_poll_cb_t *)cb;
202 spin_lock_irqsave(&dcb->poll->lock, flags);
203 wake_up_locked_poll(dcb->poll, dcb->active);
205 spin_unlock_irqrestore(&dcb->poll->lock, flags);
208 static __poll_t dma_buf_poll(struct file *file, poll_table *poll)
210 struct dma_buf *dmabuf;
211 struct dma_resv *resv;
212 struct dma_resv_list *fobj;
213 struct dma_fence *fence_excl;
215 unsigned shared_count, seq;
217 dmabuf = file->private_data;
218 if (!dmabuf || !dmabuf->resv)
223 poll_wait(file, &dmabuf->poll, poll);
225 events = poll_requested_events(poll) & (EPOLLIN | EPOLLOUT);
230 seq = read_seqcount_begin(&resv->seq);
233 fobj = rcu_dereference(resv->fence);
235 shared_count = fobj->shared_count;
238 fence_excl = rcu_dereference(resv->fence_excl);
239 if (read_seqcount_retry(&resv->seq, seq)) {
244 if (fence_excl && (!(events & EPOLLOUT) || shared_count == 0)) {
245 struct dma_buf_poll_cb_t *dcb = &dmabuf->cb_excl;
246 __poll_t pevents = EPOLLIN;
248 if (shared_count == 0)
251 spin_lock_irq(&dmabuf->poll.lock);
253 dcb->active |= pevents;
256 dcb->active = pevents;
257 spin_unlock_irq(&dmabuf->poll.lock);
259 if (events & pevents) {
260 if (!dma_fence_get_rcu(fence_excl)) {
261 /* force a recheck */
263 dma_buf_poll_cb(NULL, &dcb->cb);
264 } else if (!dma_fence_add_callback(fence_excl, &dcb->cb,
267 dma_fence_put(fence_excl);
270 * No callback queued, wake up any additional
273 dma_fence_put(fence_excl);
274 dma_buf_poll_cb(NULL, &dcb->cb);
279 if ((events & EPOLLOUT) && shared_count > 0) {
280 struct dma_buf_poll_cb_t *dcb = &dmabuf->cb_shared;
283 /* Only queue a new callback if no event has fired yet */
284 spin_lock_irq(&dmabuf->poll.lock);
288 dcb->active = EPOLLOUT;
289 spin_unlock_irq(&dmabuf->poll.lock);
291 if (!(events & EPOLLOUT))
294 for (i = 0; i < shared_count; ++i) {
295 struct dma_fence *fence = rcu_dereference(fobj->shared[i]);
297 if (!dma_fence_get_rcu(fence)) {
299 * fence refcount dropped to zero, this means
300 * that fobj has been freed
302 * call dma_buf_poll_cb and force a recheck!
305 dma_buf_poll_cb(NULL, &dcb->cb);
308 if (!dma_fence_add_callback(fence, &dcb->cb,
310 dma_fence_put(fence);
314 dma_fence_put(fence);
317 /* No callback queued, wake up any additional waiters. */
318 if (i == shared_count)
319 dma_buf_poll_cb(NULL, &dcb->cb);
328 * dma_buf_set_name - Set a name to a specific dma_buf to track the usage.
329 * The name of the dma-buf buffer can only be set when the dma-buf is not
330 * attached to any devices. It could theoritically support changing the
331 * name of the dma-buf if the same piece of memory is used for multiple
332 * purpose between different devices.
334 * @dmabuf [in] dmabuf buffer that will be renamed.
335 * @buf: [in] A piece of userspace memory that contains the name of
338 * Returns 0 on success. If the dma-buf buffer is already attached to
339 * devices, return -EBUSY.
342 static long dma_buf_set_name(struct dma_buf *dmabuf, const char __user *buf)
344 char *name = strndup_user(buf, DMA_BUF_NAME_LEN);
348 return PTR_ERR(name);
350 mutex_lock(&dmabuf->lock);
351 if (!list_empty(&dmabuf->attachments)) {
356 spin_lock(&dmabuf->name_lock);
359 spin_unlock(&dmabuf->name_lock);
362 mutex_unlock(&dmabuf->lock);
366 static long dma_buf_ioctl(struct file *file,
367 unsigned int cmd, unsigned long arg)
369 struct dma_buf *dmabuf;
370 struct dma_buf_sync sync;
371 enum dma_data_direction direction;
374 dmabuf = file->private_data;
377 case DMA_BUF_IOCTL_SYNC:
378 if (copy_from_user(&sync, (void __user *) arg, sizeof(sync)))
381 if (sync.flags & ~DMA_BUF_SYNC_VALID_FLAGS_MASK)
384 switch (sync.flags & DMA_BUF_SYNC_RW) {
385 case DMA_BUF_SYNC_READ:
386 direction = DMA_FROM_DEVICE;
388 case DMA_BUF_SYNC_WRITE:
389 direction = DMA_TO_DEVICE;
391 case DMA_BUF_SYNC_RW:
392 direction = DMA_BIDIRECTIONAL;
398 if (sync.flags & DMA_BUF_SYNC_END)
399 ret = dma_buf_end_cpu_access(dmabuf, direction);
401 ret = dma_buf_begin_cpu_access(dmabuf, direction);
405 case DMA_BUF_SET_NAME_A:
406 case DMA_BUF_SET_NAME_B:
407 return dma_buf_set_name(dmabuf, (const char __user *)arg);
414 static void dma_buf_show_fdinfo(struct seq_file *m, struct file *file)
416 struct dma_buf *dmabuf = file->private_data;
418 seq_printf(m, "size:\t%zu\n", dmabuf->size);
419 /* Don't count the temporary reference taken inside procfs seq_show */
420 seq_printf(m, "count:\t%ld\n", file_count(dmabuf->file) - 1);
421 seq_printf(m, "exp_name:\t%s\n", dmabuf->exp_name);
422 spin_lock(&dmabuf->name_lock);
424 seq_printf(m, "name:\t%s\n", dmabuf->name);
425 spin_unlock(&dmabuf->name_lock);
428 static const struct file_operations dma_buf_fops = {
429 .release = dma_buf_file_release,
430 .mmap = dma_buf_mmap_internal,
431 .llseek = dma_buf_llseek,
432 .poll = dma_buf_poll,
433 .unlocked_ioctl = dma_buf_ioctl,
435 .compat_ioctl = dma_buf_ioctl,
437 .show_fdinfo = dma_buf_show_fdinfo,
441 * is_dma_buf_file - Check if struct file* is associated with dma_buf
443 static inline int is_dma_buf_file(struct file *file)
445 return file->f_op == &dma_buf_fops;
448 static struct file *dma_buf_getfile(struct dma_buf *dmabuf, int flags)
451 struct inode *inode = alloc_anon_inode(dma_buf_mnt->mnt_sb);
454 return ERR_CAST(inode);
456 inode->i_size = dmabuf->size;
457 inode_set_bytes(inode, dmabuf->size);
459 file = alloc_file_pseudo(inode, dma_buf_mnt, "dmabuf",
460 flags, &dma_buf_fops);
463 file->f_flags = flags & (O_ACCMODE | O_NONBLOCK);
464 file->private_data = dmabuf;
465 file->f_path.dentry->d_fsdata = dmabuf;
475 * DOC: dma buf device access
477 * For device DMA access to a shared DMA buffer the usual sequence of operations
480 * 1. The exporter defines his exporter instance using
481 * DEFINE_DMA_BUF_EXPORT_INFO() and calls dma_buf_export() to wrap a private
482 * buffer object into a &dma_buf. It then exports that &dma_buf to userspace
483 * as a file descriptor by calling dma_buf_fd().
485 * 2. Userspace passes this file-descriptors to all drivers it wants this buffer
486 * to share with: First the filedescriptor is converted to a &dma_buf using
487 * dma_buf_get(). Then the buffer is attached to the device using
490 * Up to this stage the exporter is still free to migrate or reallocate the
493 * 3. Once the buffer is attached to all devices userspace can initiate DMA
494 * access to the shared buffer. In the kernel this is done by calling
495 * dma_buf_map_attachment() and dma_buf_unmap_attachment().
497 * 4. Once a driver is done with a shared buffer it needs to call
498 * dma_buf_detach() (after cleaning up any mappings) and then release the
499 * reference acquired with dma_buf_get by calling dma_buf_put().
501 * For the detailed semantics exporters are expected to implement see
506 * dma_buf_export - Creates a new dma_buf, and associates an anon file
507 * with this buffer, so it can be exported.
508 * Also connect the allocator specific data and ops to the buffer.
509 * Additionally, provide a name string for exporter; useful in debugging.
511 * @exp_info: [in] holds all the export related information provided
512 * by the exporter. see &struct dma_buf_export_info
513 * for further details.
515 * Returns, on success, a newly created dma_buf object, which wraps the
516 * supplied private data and operations for dma_buf_ops. On either missing
517 * ops, or error in allocating struct dma_buf, will return negative error.
519 * For most cases the easiest way to create @exp_info is through the
520 * %DEFINE_DMA_BUF_EXPORT_INFO macro.
522 struct dma_buf *dma_buf_export(const struct dma_buf_export_info *exp_info)
524 struct dma_buf *dmabuf;
525 struct dma_resv *resv = exp_info->resv;
527 size_t alloc_size = sizeof(struct dma_buf);
531 alloc_size += sizeof(struct dma_resv);
533 /* prevent &dma_buf[1] == dma_buf->resv */
536 if (WARN_ON(!exp_info->priv
538 || !exp_info->ops->map_dma_buf
539 || !exp_info->ops->unmap_dma_buf
540 || !exp_info->ops->release)) {
541 return ERR_PTR(-EINVAL);
544 if (!try_module_get(exp_info->owner))
545 return ERR_PTR(-ENOENT);
547 dmabuf = kzalloc(alloc_size, GFP_KERNEL);
553 dmabuf->priv = exp_info->priv;
554 dmabuf->ops = exp_info->ops;
555 dmabuf->size = exp_info->size;
556 dmabuf->exp_name = exp_info->exp_name;
557 dmabuf->owner = exp_info->owner;
558 spin_lock_init(&dmabuf->name_lock);
559 init_waitqueue_head(&dmabuf->poll);
560 dmabuf->cb_excl.poll = dmabuf->cb_shared.poll = &dmabuf->poll;
561 dmabuf->cb_excl.active = dmabuf->cb_shared.active = 0;
564 resv = (struct dma_resv *)&dmabuf[1];
569 file = dma_buf_getfile(dmabuf, exp_info->flags);
575 file->f_mode |= FMODE_LSEEK;
578 mutex_init(&dmabuf->lock);
579 INIT_LIST_HEAD(&dmabuf->attachments);
581 mutex_lock(&db_list.lock);
582 list_add(&dmabuf->list_node, &db_list.head);
583 mutex_unlock(&db_list.lock);
590 module_put(exp_info->owner);
593 EXPORT_SYMBOL_GPL(dma_buf_export);
596 * dma_buf_fd - returns a file descriptor for the given dma_buf
597 * @dmabuf: [in] pointer to dma_buf for which fd is required.
598 * @flags: [in] flags to give to fd
600 * On success, returns an associated 'fd'. Else, returns error.
602 int dma_buf_fd(struct dma_buf *dmabuf, int flags)
606 if (!dmabuf || !dmabuf->file)
609 fd = get_unused_fd_flags(flags);
613 fd_install(fd, dmabuf->file);
617 EXPORT_SYMBOL_GPL(dma_buf_fd);
620 * dma_buf_get - returns the dma_buf structure related to an fd
621 * @fd: [in] fd associated with the dma_buf to be returned
623 * On success, returns the dma_buf structure associated with an fd; uses
624 * file's refcounting done by fget to increase refcount. returns ERR_PTR
627 struct dma_buf *dma_buf_get(int fd)
634 return ERR_PTR(-EBADF);
636 if (!is_dma_buf_file(file)) {
638 return ERR_PTR(-EINVAL);
641 return file->private_data;
643 EXPORT_SYMBOL_GPL(dma_buf_get);
646 * dma_buf_put - decreases refcount of the buffer
647 * @dmabuf: [in] buffer to reduce refcount of
649 * Uses file's refcounting done implicitly by fput().
651 * If, as a result of this call, the refcount becomes 0, the 'release' file
652 * operation related to this fd is called. It calls &dma_buf_ops.release vfunc
653 * in turn, and frees the memory allocated for dmabuf when exported.
655 void dma_buf_put(struct dma_buf *dmabuf)
657 if (WARN_ON(!dmabuf || !dmabuf->file))
662 EXPORT_SYMBOL_GPL(dma_buf_put);
665 * dma_buf_attach - Add the device to dma_buf's attachments list; optionally,
666 * calls attach() of dma_buf_ops to allow device-specific attach functionality
667 * @dmabuf: [in] buffer to attach device to.
668 * @dev: [in] device to be attached.
670 * Returns struct dma_buf_attachment pointer for this attachment. Attachments
671 * must be cleaned up by calling dma_buf_detach().
675 * A pointer to newly created &dma_buf_attachment on success, or a negative
676 * error code wrapped into a pointer on failure.
678 * Note that this can fail if the backing storage of @dmabuf is in a place not
679 * accessible to @dev, and cannot be moved to a more suitable place. This is
680 * indicated with the error code -EBUSY.
682 struct dma_buf_attachment *dma_buf_attach(struct dma_buf *dmabuf,
685 struct dma_buf_attachment *attach;
688 if (WARN_ON(!dmabuf || !dev))
689 return ERR_PTR(-EINVAL);
691 attach = kzalloc(sizeof(*attach), GFP_KERNEL);
693 return ERR_PTR(-ENOMEM);
696 attach->dmabuf = dmabuf;
698 mutex_lock(&dmabuf->lock);
700 if (dmabuf->ops->attach) {
701 ret = dmabuf->ops->attach(dmabuf, attach);
705 list_add(&attach->node, &dmabuf->attachments);
707 mutex_unlock(&dmabuf->lock);
713 mutex_unlock(&dmabuf->lock);
716 EXPORT_SYMBOL_GPL(dma_buf_attach);
719 * dma_buf_detach - Remove the given attachment from dmabuf's attachments list;
720 * optionally calls detach() of dma_buf_ops for device-specific detach
721 * @dmabuf: [in] buffer to detach from.
722 * @attach: [in] attachment to be detached; is free'd after this call.
724 * Clean up a device attachment obtained by calling dma_buf_attach().
726 void dma_buf_detach(struct dma_buf *dmabuf, struct dma_buf_attachment *attach)
728 if (WARN_ON(!dmabuf || !attach))
732 dmabuf->ops->unmap_dma_buf(attach, attach->sgt, attach->dir);
734 mutex_lock(&dmabuf->lock);
735 list_del(&attach->node);
736 if (dmabuf->ops->detach)
737 dmabuf->ops->detach(dmabuf, attach);
739 mutex_unlock(&dmabuf->lock);
742 EXPORT_SYMBOL_GPL(dma_buf_detach);
745 * dma_buf_map_attachment - Returns the scatterlist table of the attachment;
746 * mapped into _device_ address space. Is a wrapper for map_dma_buf() of the
748 * @attach: [in] attachment whose scatterlist is to be returned
749 * @direction: [in] direction of DMA transfer
751 * Returns sg_table containing the scatterlist to be returned; returns ERR_PTR
752 * on error. May return -EINTR if it is interrupted by a signal.
754 * A mapping must be unmapped by using dma_buf_unmap_attachment(). Note that
755 * the underlying backing storage is pinned for as long as a mapping exists,
756 * therefore users/importers should not hold onto a mapping for undue amounts of
759 struct sg_table *dma_buf_map_attachment(struct dma_buf_attachment *attach,
760 enum dma_data_direction direction)
762 struct sg_table *sg_table;
766 if (WARN_ON(!attach || !attach->dmabuf))
767 return ERR_PTR(-EINVAL);
771 * Two mappings with different directions for the same
772 * attachment are not allowed.
774 if (attach->dir != direction &&
775 attach->dir != DMA_BIDIRECTIONAL)
776 return ERR_PTR(-EBUSY);
781 sg_table = attach->dmabuf->ops->map_dma_buf(attach, direction);
783 sg_table = ERR_PTR(-ENOMEM);
785 if (!IS_ERR(sg_table) && attach->dmabuf->ops->cache_sgt_mapping) {
786 attach->sgt = sg_table;
787 attach->dir = direction;
792 EXPORT_SYMBOL_GPL(dma_buf_map_attachment);
795 * dma_buf_unmap_attachment - unmaps and decreases usecount of the buffer;might
796 * deallocate the scatterlist associated. Is a wrapper for unmap_dma_buf() of
798 * @attach: [in] attachment to unmap buffer from
799 * @sg_table: [in] scatterlist info of the buffer to unmap
800 * @direction: [in] direction of DMA transfer
802 * This unmaps a DMA mapping for @attached obtained by dma_buf_map_attachment().
804 void dma_buf_unmap_attachment(struct dma_buf_attachment *attach,
805 struct sg_table *sg_table,
806 enum dma_data_direction direction)
810 if (WARN_ON(!attach || !attach->dmabuf || !sg_table))
813 if (attach->sgt == sg_table)
816 attach->dmabuf->ops->unmap_dma_buf(attach, sg_table, direction);
818 EXPORT_SYMBOL_GPL(dma_buf_unmap_attachment);
823 * There are mutliple reasons for supporting CPU access to a dma buffer object:
825 * - Fallback operations in the kernel, for example when a device is connected
826 * over USB and the kernel needs to shuffle the data around first before
827 * sending it away. Cache coherency is handled by braketing any transactions
828 * with calls to dma_buf_begin_cpu_access() and dma_buf_end_cpu_access()
831 * To support dma_buf objects residing in highmem cpu access is page-based
832 * using an api similar to kmap. Accessing a dma_buf is done in aligned chunks
833 * of PAGE_SIZE size. Before accessing a chunk it needs to be mapped, which
834 * returns a pointer in kernel virtual address space. Afterwards the chunk
835 * needs to be unmapped again. There is no limit on how often a given chunk
836 * can be mapped and unmapped, i.e. the importer does not need to call
837 * begin_cpu_access again before mapping the same chunk again.
840 * void \*dma_buf_kmap(struct dma_buf \*, unsigned long);
841 * void dma_buf_kunmap(struct dma_buf \*, unsigned long, void \*);
843 * Implementing the functions is optional for exporters and for importers all
844 * the restrictions of using kmap apply.
846 * dma_buf kmap calls outside of the range specified in begin_cpu_access are
847 * undefined. If the range is not PAGE_SIZE aligned, kmap needs to succeed on
848 * the partial chunks at the beginning and end but may return stale or bogus
849 * data outside of the range (in these partial chunks).
851 * For some cases the overhead of kmap can be too high, a vmap interface
852 * is introduced. This interface should be used very carefully, as vmalloc
853 * space is a limited resources on many architectures.
856 * void \*dma_buf_vmap(struct dma_buf \*dmabuf)
857 * void dma_buf_vunmap(struct dma_buf \*dmabuf, void \*vaddr)
859 * The vmap call can fail if there is no vmap support in the exporter, or if
860 * it runs out of vmalloc space. Fallback to kmap should be implemented. Note
861 * that the dma-buf layer keeps a reference count for all vmap access and
862 * calls down into the exporter's vmap function only when no vmapping exists,
863 * and only unmaps it once. Protection against concurrent vmap/vunmap calls is
864 * provided by taking the dma_buf->lock mutex.
866 * - For full compatibility on the importer side with existing userspace
867 * interfaces, which might already support mmap'ing buffers. This is needed in
868 * many processing pipelines (e.g. feeding a software rendered image into a
869 * hardware pipeline, thumbnail creation, snapshots, ...). Also, Android's ION
870 * framework already supported this and for DMA buffer file descriptors to
871 * replace ION buffers mmap support was needed.
873 * There is no special interfaces, userspace simply calls mmap on the dma-buf
874 * fd. But like for CPU access there's a need to braket the actual access,
875 * which is handled by the ioctl (DMA_BUF_IOCTL_SYNC). Note that
876 * DMA_BUF_IOCTL_SYNC can fail with -EAGAIN or -EINTR, in which case it must
879 * Some systems might need some sort of cache coherency management e.g. when
880 * CPU and GPU domains are being accessed through dma-buf at the same time.
881 * To circumvent this problem there are begin/end coherency markers, that
882 * forward directly to existing dma-buf device drivers vfunc hooks. Userspace
883 * can make use of those markers through the DMA_BUF_IOCTL_SYNC ioctl. The
884 * sequence would be used like following:
887 * - for each drawing/upload cycle in CPU 1. SYNC_START ioctl, 2. read/write
888 * to mmap area 3. SYNC_END ioctl. This can be repeated as often as you
889 * want (with the new data being consumed by say the GPU or the scanout
891 * - munmap once you don't need the buffer any more
893 * For correctness and optimal performance, it is always required to use
894 * SYNC_START and SYNC_END before and after, respectively, when accessing the
895 * mapped address. Userspace cannot rely on coherent access, even when there
896 * are systems where it just works without calling these ioctls.
898 * - And as a CPU fallback in userspace processing pipelines.
900 * Similar to the motivation for kernel cpu access it is again important that
901 * the userspace code of a given importing subsystem can use the same
902 * interfaces with a imported dma-buf buffer object as with a native buffer
903 * object. This is especially important for drm where the userspace part of
904 * contemporary OpenGL, X, and other drivers is huge, and reworking them to
905 * use a different way to mmap a buffer rather invasive.
907 * The assumption in the current dma-buf interfaces is that redirecting the
908 * initial mmap is all that's needed. A survey of some of the existing
909 * subsystems shows that no driver seems to do any nefarious thing like
910 * syncing up with outstanding asynchronous processing on the device or
911 * allocating special resources at fault time. So hopefully this is good
912 * enough, since adding interfaces to intercept pagefaults and allow pte
913 * shootdowns would increase the complexity quite a bit.
916 * int dma_buf_mmap(struct dma_buf \*, struct vm_area_struct \*,
919 * If the importing subsystem simply provides a special-purpose mmap call to
920 * set up a mapping in userspace, calling do_mmap with dma_buf->file will
921 * equally achieve that for a dma-buf object.
924 static int __dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
925 enum dma_data_direction direction)
927 bool write = (direction == DMA_BIDIRECTIONAL ||
928 direction == DMA_TO_DEVICE);
929 struct dma_resv *resv = dmabuf->resv;
932 /* Wait on any implicit rendering fences */
933 ret = dma_resv_wait_timeout_rcu(resv, write, true,
934 MAX_SCHEDULE_TIMEOUT);
942 * dma_buf_begin_cpu_access - Must be called before accessing a dma_buf from the
943 * cpu in the kernel context. Calls begin_cpu_access to allow exporter-specific
944 * preparations. Coherency is only guaranteed in the specified range for the
945 * specified access direction.
946 * @dmabuf: [in] buffer to prepare cpu access for.
947 * @direction: [in] length of range for cpu access.
949 * After the cpu access is complete the caller should call
950 * dma_buf_end_cpu_access(). Only when cpu access is braketed by both calls is
951 * it guaranteed to be coherent with other DMA access.
953 * Can return negative error values, returns 0 on success.
955 int dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
956 enum dma_data_direction direction)
960 if (WARN_ON(!dmabuf))
963 if (dmabuf->ops->begin_cpu_access)
964 ret = dmabuf->ops->begin_cpu_access(dmabuf, direction);
966 /* Ensure that all fences are waited upon - but we first allow
967 * the native handler the chance to do so more efficiently if it
968 * chooses. A double invocation here will be reasonably cheap no-op.
971 ret = __dma_buf_begin_cpu_access(dmabuf, direction);
975 EXPORT_SYMBOL_GPL(dma_buf_begin_cpu_access);
978 * dma_buf_end_cpu_access - Must be called after accessing a dma_buf from the
979 * cpu in the kernel context. Calls end_cpu_access to allow exporter-specific
980 * actions. Coherency is only guaranteed in the specified range for the
981 * specified access direction.
982 * @dmabuf: [in] buffer to complete cpu access for.
983 * @direction: [in] length of range for cpu access.
985 * This terminates CPU access started with dma_buf_begin_cpu_access().
987 * Can return negative error values, returns 0 on success.
989 int dma_buf_end_cpu_access(struct dma_buf *dmabuf,
990 enum dma_data_direction direction)
996 if (dmabuf->ops->end_cpu_access)
997 ret = dmabuf->ops->end_cpu_access(dmabuf, direction);
1001 EXPORT_SYMBOL_GPL(dma_buf_end_cpu_access);
1004 * dma_buf_kmap - Map a page of the buffer object into kernel address space. The
1005 * same restrictions as for kmap and friends apply.
1006 * @dmabuf: [in] buffer to map page from.
1007 * @page_num: [in] page in PAGE_SIZE units to map.
1009 * This call must always succeed, any necessary preparations that might fail
1010 * need to be done in begin_cpu_access.
1012 void *dma_buf_kmap(struct dma_buf *dmabuf, unsigned long page_num)
1016 if (!dmabuf->ops->map)
1018 return dmabuf->ops->map(dmabuf, page_num);
1020 EXPORT_SYMBOL_GPL(dma_buf_kmap);
1023 * dma_buf_kunmap - Unmap a page obtained by dma_buf_kmap.
1024 * @dmabuf: [in] buffer to unmap page from.
1025 * @page_num: [in] page in PAGE_SIZE units to unmap.
1026 * @vaddr: [in] kernel space pointer obtained from dma_buf_kmap.
1028 * This call must always succeed.
1030 void dma_buf_kunmap(struct dma_buf *dmabuf, unsigned long page_num,
1035 if (dmabuf->ops->unmap)
1036 dmabuf->ops->unmap(dmabuf, page_num, vaddr);
1038 EXPORT_SYMBOL_GPL(dma_buf_kunmap);
1042 * dma_buf_mmap - Setup up a userspace mmap with the given vma
1043 * @dmabuf: [in] buffer that should back the vma
1044 * @vma: [in] vma for the mmap
1045 * @pgoff: [in] offset in pages where this mmap should start within the
1048 * This function adjusts the passed in vma so that it points at the file of the
1049 * dma_buf operation. It also adjusts the starting pgoff and does bounds
1050 * checking on the size of the vma. Then it calls the exporters mmap function to
1051 * set up the mapping.
1053 * Can return negative error values, returns 0 on success.
1055 int dma_buf_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma,
1056 unsigned long pgoff)
1058 struct file *oldfile;
1061 if (WARN_ON(!dmabuf || !vma))
1064 /* check if buffer supports mmap */
1065 if (!dmabuf->ops->mmap)
1068 /* check for offset overflow */
1069 if (pgoff + vma_pages(vma) < pgoff)
1072 /* check for overflowing the buffer's size */
1073 if (pgoff + vma_pages(vma) >
1074 dmabuf->size >> PAGE_SHIFT)
1077 /* readjust the vma */
1078 get_file(dmabuf->file);
1079 oldfile = vma->vm_file;
1080 vma->vm_file = dmabuf->file;
1081 vma->vm_pgoff = pgoff;
1083 ret = dmabuf->ops->mmap(dmabuf, vma);
1085 /* restore old parameters on failure */
1086 vma->vm_file = oldfile;
1095 EXPORT_SYMBOL_GPL(dma_buf_mmap);
1098 * dma_buf_vmap - Create virtual mapping for the buffer object into kernel
1099 * address space. Same restrictions as for vmap and friends apply.
1100 * @dmabuf: [in] buffer to vmap
1102 * This call may fail due to lack of virtual mapping address space.
1103 * These calls are optional in drivers. The intended use for them
1104 * is for mapping objects linear in kernel space for high use objects.
1105 * Please attempt to use kmap/kunmap before thinking about these interfaces.
1107 * Returns NULL on error.
1109 void *dma_buf_vmap(struct dma_buf *dmabuf)
1113 if (WARN_ON(!dmabuf))
1116 if (!dmabuf->ops->vmap)
1119 mutex_lock(&dmabuf->lock);
1120 if (dmabuf->vmapping_counter) {
1121 dmabuf->vmapping_counter++;
1122 BUG_ON(!dmabuf->vmap_ptr);
1123 ptr = dmabuf->vmap_ptr;
1127 BUG_ON(dmabuf->vmap_ptr);
1129 ptr = dmabuf->ops->vmap(dmabuf);
1130 if (WARN_ON_ONCE(IS_ERR(ptr)))
1135 dmabuf->vmap_ptr = ptr;
1136 dmabuf->vmapping_counter = 1;
1139 mutex_unlock(&dmabuf->lock);
1142 EXPORT_SYMBOL_GPL(dma_buf_vmap);
1145 * dma_buf_vunmap - Unmap a vmap obtained by dma_buf_vmap.
1146 * @dmabuf: [in] buffer to vunmap
1147 * @vaddr: [in] vmap to vunmap
1149 void dma_buf_vunmap(struct dma_buf *dmabuf, void *vaddr)
1151 if (WARN_ON(!dmabuf))
1154 BUG_ON(!dmabuf->vmap_ptr);
1155 BUG_ON(dmabuf->vmapping_counter == 0);
1156 BUG_ON(dmabuf->vmap_ptr != vaddr);
1158 mutex_lock(&dmabuf->lock);
1159 if (--dmabuf->vmapping_counter == 0) {
1160 if (dmabuf->ops->vunmap)
1161 dmabuf->ops->vunmap(dmabuf, vaddr);
1162 dmabuf->vmap_ptr = NULL;
1164 mutex_unlock(&dmabuf->lock);
1166 EXPORT_SYMBOL_GPL(dma_buf_vunmap);
1168 #ifdef CONFIG_DEBUG_FS
1169 static int dma_buf_debug_show(struct seq_file *s, void *unused)
1172 struct dma_buf *buf_obj;
1173 struct dma_buf_attachment *attach_obj;
1174 struct dma_resv *robj;
1175 struct dma_resv_list *fobj;
1176 struct dma_fence *fence;
1178 int count = 0, attach_count, shared_count, i;
1181 ret = mutex_lock_interruptible(&db_list.lock);
1186 seq_puts(s, "\nDma-buf Objects:\n");
1187 seq_printf(s, "%-8s\t%-8s\t%-8s\t%-8s\texp_name\t%-8s\n",
1188 "size", "flags", "mode", "count", "ino");
1190 list_for_each_entry(buf_obj, &db_list.head, list_node) {
1191 ret = mutex_lock_interruptible(&buf_obj->lock);
1195 "\tERROR locking buffer object: skipping\n");
1199 seq_printf(s, "%08zu\t%08x\t%08x\t%08ld\t%s\t%08lu\t%s\n",
1201 buf_obj->file->f_flags, buf_obj->file->f_mode,
1202 file_count(buf_obj->file),
1204 file_inode(buf_obj->file)->i_ino,
1205 buf_obj->name ?: "");
1207 robj = buf_obj->resv;
1209 seq = read_seqcount_begin(&robj->seq);
1211 fobj = rcu_dereference(robj->fence);
1212 shared_count = fobj ? fobj->shared_count : 0;
1213 fence = rcu_dereference(robj->fence_excl);
1214 if (!read_seqcount_retry(&robj->seq, seq))
1220 seq_printf(s, "\tExclusive fence: %s %s %ssignalled\n",
1221 fence->ops->get_driver_name(fence),
1222 fence->ops->get_timeline_name(fence),
1223 dma_fence_is_signaled(fence) ? "" : "un");
1224 for (i = 0; i < shared_count; i++) {
1225 fence = rcu_dereference(fobj->shared[i]);
1226 if (!dma_fence_get_rcu(fence))
1228 seq_printf(s, "\tShared fence: %s %s %ssignalled\n",
1229 fence->ops->get_driver_name(fence),
1230 fence->ops->get_timeline_name(fence),
1231 dma_fence_is_signaled(fence) ? "" : "un");
1232 dma_fence_put(fence);
1236 seq_puts(s, "\tAttached Devices:\n");
1239 list_for_each_entry(attach_obj, &buf_obj->attachments, node) {
1240 seq_printf(s, "\t%s\n", dev_name(attach_obj->dev));
1244 seq_printf(s, "Total %d devices attached\n\n",
1248 size += buf_obj->size;
1249 mutex_unlock(&buf_obj->lock);
1252 seq_printf(s, "\nTotal %d objects, %zu bytes\n", count, size);
1254 mutex_unlock(&db_list.lock);
1258 DEFINE_SHOW_ATTRIBUTE(dma_buf_debug);
1260 static struct dentry *dma_buf_debugfs_dir;
1262 static int dma_buf_init_debugfs(void)
1267 d = debugfs_create_dir("dma_buf", NULL);
1271 dma_buf_debugfs_dir = d;
1273 d = debugfs_create_file("bufinfo", S_IRUGO, dma_buf_debugfs_dir,
1274 NULL, &dma_buf_debug_fops);
1276 pr_debug("dma_buf: debugfs: failed to create node bufinfo\n");
1277 debugfs_remove_recursive(dma_buf_debugfs_dir);
1278 dma_buf_debugfs_dir = NULL;
1285 static void dma_buf_uninit_debugfs(void)
1287 debugfs_remove_recursive(dma_buf_debugfs_dir);
1290 static inline int dma_buf_init_debugfs(void)
1294 static inline void dma_buf_uninit_debugfs(void)
1299 static int __init dma_buf_init(void)
1301 dma_buf_mnt = kern_mount(&dma_buf_fs_type);
1302 if (IS_ERR(dma_buf_mnt))
1303 return PTR_ERR(dma_buf_mnt);
1305 mutex_init(&db_list.lock);
1306 INIT_LIST_HEAD(&db_list.head);
1307 dma_buf_init_debugfs();
1310 subsys_initcall(dma_buf_init);
1312 static void __exit dma_buf_deinit(void)
1314 dma_buf_uninit_debugfs();
1315 kern_unmount(dma_buf_mnt);
1317 __exitcall(dma_buf_deinit);