1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/cred.h>
3 #include <linux/device.h>
4 #include <linux/dma-buf.h>
5 #include <linux/highmem.h>
6 #include <linux/init.h>
7 #include <linux/kernel.h>
8 #include <linux/memfd.h>
9 #include <linux/miscdevice.h>
10 #include <linux/module.h>
11 #include <linux/shmem_fs.h>
12 #include <linux/slab.h>
13 #include <linux/udmabuf.h>
15 static const u32 list_limit = 1024; /* udmabuf_create_list->count limit */
16 static const size_t size_limit_mb = 64; /* total dmabuf size, in megabytes */
23 static vm_fault_t udmabuf_vm_fault(struct vm_fault *vmf)
25 struct vm_area_struct *vma = vmf->vma;
26 struct udmabuf *ubuf = vma->vm_private_data;
27 pgoff_t pgoff = vmf->pgoff;
29 if (pgoff >= ubuf->pagecount)
30 return VM_FAULT_SIGBUS;
31 vmf->page = ubuf->pages[pgoff];
36 static const struct vm_operations_struct udmabuf_vm_ops = {
37 .fault = udmabuf_vm_fault,
40 static int mmap_udmabuf(struct dma_buf *buf, struct vm_area_struct *vma)
42 struct udmabuf *ubuf = buf->priv;
44 if ((vma->vm_flags & (VM_SHARED | VM_MAYSHARE)) == 0)
47 vma->vm_ops = &udmabuf_vm_ops;
48 vma->vm_private_data = ubuf;
52 static struct sg_table *map_udmabuf(struct dma_buf_attachment *at,
53 enum dma_data_direction direction)
55 struct udmabuf *ubuf = at->dmabuf->priv;
59 sg = kzalloc(sizeof(*sg), GFP_KERNEL);
61 return ERR_PTR(-ENOMEM);
62 ret = sg_alloc_table_from_pages(sg, ubuf->pages, ubuf->pagecount,
63 0, ubuf->pagecount << PAGE_SHIFT,
67 if (!dma_map_sg(at->dev, sg->sgl, sg->nents, direction)) {
79 static void unmap_udmabuf(struct dma_buf_attachment *at,
81 enum dma_data_direction direction)
83 dma_unmap_sg(at->dev, sg->sgl, sg->nents, direction);
88 static void release_udmabuf(struct dma_buf *buf)
90 struct udmabuf *ubuf = buf->priv;
93 for (pg = 0; pg < ubuf->pagecount; pg++)
94 put_page(ubuf->pages[pg]);
99 static void *kmap_udmabuf(struct dma_buf *buf, unsigned long page_num)
101 struct udmabuf *ubuf = buf->priv;
102 struct page *page = ubuf->pages[page_num];
107 static void kunmap_udmabuf(struct dma_buf *buf, unsigned long page_num,
113 static const struct dma_buf_ops udmabuf_ops = {
114 .map_dma_buf = map_udmabuf,
115 .unmap_dma_buf = unmap_udmabuf,
116 .release = release_udmabuf,
118 .unmap = kunmap_udmabuf,
119 .mmap = mmap_udmabuf,
122 #define SEALS_WANTED (F_SEAL_SHRINK)
123 #define SEALS_DENIED (F_SEAL_WRITE)
125 static long udmabuf_create(const struct udmabuf_create_list *head,
126 const struct udmabuf_create_item *list)
128 DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
129 struct file *memfd = NULL;
130 struct udmabuf *ubuf;
132 pgoff_t pgoff, pgcnt, pgidx, pgbuf = 0, pglimit;
134 int seals, ret = -EINVAL;
137 ubuf = kzalloc(sizeof(*ubuf), GFP_KERNEL);
141 pglimit = (size_limit_mb * 1024 * 1024) >> PAGE_SHIFT;
142 for (i = 0; i < head->count; i++) {
143 if (!IS_ALIGNED(list[i].offset, PAGE_SIZE))
145 if (!IS_ALIGNED(list[i].size, PAGE_SIZE))
147 ubuf->pagecount += list[i].size >> PAGE_SHIFT;
148 if (ubuf->pagecount > pglimit)
152 if (!ubuf->pagecount)
155 ubuf->pages = kmalloc_array(ubuf->pagecount, sizeof(*ubuf->pages),
163 for (i = 0; i < head->count; i++) {
165 memfd = fget(list[i].memfd);
168 if (!shmem_mapping(file_inode(memfd)->i_mapping))
170 seals = memfd_fcntl(memfd, F_GET_SEALS, 0);
171 if (seals == -EINVAL)
174 if ((seals & SEALS_WANTED) != SEALS_WANTED ||
175 (seals & SEALS_DENIED) != 0)
177 pgoff = list[i].offset >> PAGE_SHIFT;
178 pgcnt = list[i].size >> PAGE_SHIFT;
179 for (pgidx = 0; pgidx < pgcnt; pgidx++) {
180 page = shmem_read_mapping_page(
181 file_inode(memfd)->i_mapping, pgoff + pgidx);
186 ubuf->pages[pgbuf++] = page;
192 exp_info.ops = &udmabuf_ops;
193 exp_info.size = ubuf->pagecount << PAGE_SHIFT;
194 exp_info.priv = ubuf;
195 exp_info.flags = O_RDWR;
197 buf = dma_buf_export(&exp_info);
204 if (head->flags & UDMABUF_FLAGS_CLOEXEC)
206 return dma_buf_fd(buf, flags);
210 put_page(ubuf->pages[--pgbuf]);
218 static long udmabuf_ioctl_create(struct file *filp, unsigned long arg)
220 struct udmabuf_create create;
221 struct udmabuf_create_list head;
222 struct udmabuf_create_item list;
224 if (copy_from_user(&create, (void __user *)arg,
228 head.flags = create.flags;
230 list.memfd = create.memfd;
231 list.offset = create.offset;
232 list.size = create.size;
234 return udmabuf_create(&head, &list);
237 static long udmabuf_ioctl_create_list(struct file *filp, unsigned long arg)
239 struct udmabuf_create_list head;
240 struct udmabuf_create_item *list;
244 if (copy_from_user(&head, (void __user *)arg, sizeof(head)))
246 if (head.count > list_limit)
248 lsize = sizeof(struct udmabuf_create_item) * head.count;
249 list = memdup_user((void __user *)(arg + sizeof(head)), lsize);
251 return PTR_ERR(list);
253 ret = udmabuf_create(&head, list);
258 static long udmabuf_ioctl(struct file *filp, unsigned int ioctl,
265 ret = udmabuf_ioctl_create(filp, arg);
267 case UDMABUF_CREATE_LIST:
268 ret = udmabuf_ioctl_create_list(filp, arg);
277 static const struct file_operations udmabuf_fops = {
278 .owner = THIS_MODULE,
279 .unlocked_ioctl = udmabuf_ioctl,
282 static struct miscdevice udmabuf_misc = {
283 .minor = MISC_DYNAMIC_MINOR,
285 .fops = &udmabuf_fops,
288 static int __init udmabuf_dev_init(void)
292 ret = misc_register(&udmabuf_misc);
294 pr_err("Could not initialize udmabuf device\n");
298 ret = dma_coerce_mask_and_coherent(udmabuf_misc.this_device,
301 pr_err("Could not setup DMA mask for udmabuf device\n");
302 misc_deregister(&udmabuf_misc);
309 static void __exit udmabuf_dev_exit(void)
311 misc_deregister(&udmabuf_misc);
314 module_init(udmabuf_dev_init)
315 module_exit(udmabuf_dev_exit)
317 MODULE_AUTHOR("Gerd Hoffmann <kraxel@redhat.com>");
318 MODULE_LICENSE("GPL v2");