1 // SPDX-License-Identifier: GPL-2.0
4 * Xen dma-buf functionality for gntdev.
6 * DMA buffer implementation is based on drivers/gpu/drm/drm_prime.c.
8 * Copyright (c) 2018 Oleksandr Andrushchenko, EPAM Systems Inc.
11 #include <linux/kernel.h>
12 #include <linux/errno.h>
13 #include <linux/dma-buf.h>
14 #include <linux/dma-direct.h>
15 #include <linux/slab.h>
16 #include <linux/types.h>
17 #include <linux/uaccess.h>
20 #include <xen/grant_table.h>
22 #include "gntdev-common.h"
23 #include "gntdev-dmabuf.h"
25 #ifndef GRANT_INVALID_REF
27 * Note on usage of grant reference 0 as invalid grant reference:
28 * grant reference 0 is valid, but never exposed to a driver,
29 * because of the fact it is already in use/reserved by the PV console.
31 #define GRANT_INVALID_REF 0
34 struct gntdev_dmabuf {
35 struct gntdev_dmabuf_priv *priv;
36 struct dma_buf *dmabuf;
37 struct list_head next;
42 /* Exported buffers are reference counted. */
45 struct gntdev_priv *priv;
46 struct gntdev_grant_map *map;
49 /* Granted references of the imported buffer. */
51 /* Scatter-gather table of the imported buffer. */
53 /* dma-buf attachment of the imported buffer. */
54 struct dma_buf_attachment *attach;
58 /* Number of pages this buffer has. */
60 /* Pages of this buffer (only for dma-buf export). */
64 struct gntdev_dmabuf_wait_obj {
65 struct list_head next;
66 struct gntdev_dmabuf *gntdev_dmabuf;
67 struct completion completion;
70 struct gntdev_dmabuf_attachment {
72 enum dma_data_direction dir;
75 struct gntdev_dmabuf_priv {
76 /* List of exported DMA buffers. */
77 struct list_head exp_list;
78 /* List of wait objects. */
79 struct list_head exp_wait_list;
80 /* List of imported DMA buffers. */
81 struct list_head imp_list;
82 /* This is the lock which protects dma_buf_xxx lists. */
85 * We reference this file while exporting dma-bufs, so
86 * the grant device context is not destroyed while there are
87 * external users alive.
92 /* DMA buffer export support. */
94 /* Implementation of wait for exported DMA buffer to be released. */
96 static void dmabuf_exp_release(struct kref *kref);
98 static struct gntdev_dmabuf_wait_obj *
99 dmabuf_exp_wait_obj_new(struct gntdev_dmabuf_priv *priv,
100 struct gntdev_dmabuf *gntdev_dmabuf)
102 struct gntdev_dmabuf_wait_obj *obj;
104 obj = kzalloc(sizeof(*obj), GFP_KERNEL);
106 return ERR_PTR(-ENOMEM);
108 init_completion(&obj->completion);
109 obj->gntdev_dmabuf = gntdev_dmabuf;
111 mutex_lock(&priv->lock);
112 list_add(&obj->next, &priv->exp_wait_list);
113 /* Put our reference and wait for gntdev_dmabuf's release to fire. */
114 kref_put(&gntdev_dmabuf->u.exp.refcount, dmabuf_exp_release);
115 mutex_unlock(&priv->lock);
119 static void dmabuf_exp_wait_obj_free(struct gntdev_dmabuf_priv *priv,
120 struct gntdev_dmabuf_wait_obj *obj)
122 mutex_lock(&priv->lock);
123 list_del(&obj->next);
124 mutex_unlock(&priv->lock);
128 static int dmabuf_exp_wait_obj_wait(struct gntdev_dmabuf_wait_obj *obj,
131 if (wait_for_completion_timeout(&obj->completion,
132 msecs_to_jiffies(wait_to_ms)) <= 0)
138 static void dmabuf_exp_wait_obj_signal(struct gntdev_dmabuf_priv *priv,
139 struct gntdev_dmabuf *gntdev_dmabuf)
141 struct gntdev_dmabuf_wait_obj *obj;
143 list_for_each_entry(obj, &priv->exp_wait_list, next)
144 if (obj->gntdev_dmabuf == gntdev_dmabuf) {
145 pr_debug("Found gntdev_dmabuf in the wait list, wake\n");
146 complete_all(&obj->completion);
151 static struct gntdev_dmabuf *
152 dmabuf_exp_wait_obj_get_dmabuf(struct gntdev_dmabuf_priv *priv, int fd)
154 struct gntdev_dmabuf *gntdev_dmabuf, *ret = ERR_PTR(-ENOENT);
156 mutex_lock(&priv->lock);
157 list_for_each_entry(gntdev_dmabuf, &priv->exp_list, next)
158 if (gntdev_dmabuf->fd == fd) {
159 pr_debug("Found gntdev_dmabuf in the wait list\n");
160 kref_get(&gntdev_dmabuf->u.exp.refcount);
164 mutex_unlock(&priv->lock);
168 static int dmabuf_exp_wait_released(struct gntdev_dmabuf_priv *priv, int fd,
171 struct gntdev_dmabuf *gntdev_dmabuf;
172 struct gntdev_dmabuf_wait_obj *obj;
175 pr_debug("Will wait for dma-buf with fd %d\n", fd);
177 * Try to find the DMA buffer: if not found means that
178 * either the buffer has already been released or file descriptor
181 gntdev_dmabuf = dmabuf_exp_wait_obj_get_dmabuf(priv, fd);
182 if (IS_ERR(gntdev_dmabuf))
183 return PTR_ERR(gntdev_dmabuf);
186 * gntdev_dmabuf still exists and is reference count locked by us now,
187 * so prepare to wait: allocate wait object and add it to the wait list,
188 * so we can find it on release.
190 obj = dmabuf_exp_wait_obj_new(priv, gntdev_dmabuf);
194 ret = dmabuf_exp_wait_obj_wait(obj, wait_to_ms);
195 dmabuf_exp_wait_obj_free(priv, obj);
199 /* DMA buffer export support. */
201 static struct sg_table *
202 dmabuf_pages_to_sgt(struct page **pages, unsigned int nr_pages)
204 struct sg_table *sgt;
207 sgt = kmalloc(sizeof(*sgt), GFP_KERNEL);
213 ret = sg_alloc_table_from_pages(sgt, pages, nr_pages, 0,
214 nr_pages << PAGE_SHIFT,
226 static int dmabuf_exp_ops_attach(struct dma_buf *dma_buf,
227 struct dma_buf_attachment *attach)
229 struct gntdev_dmabuf_attachment *gntdev_dmabuf_attach;
231 gntdev_dmabuf_attach = kzalloc(sizeof(*gntdev_dmabuf_attach),
233 if (!gntdev_dmabuf_attach)
236 gntdev_dmabuf_attach->dir = DMA_NONE;
237 attach->priv = gntdev_dmabuf_attach;
241 static void dmabuf_exp_ops_detach(struct dma_buf *dma_buf,
242 struct dma_buf_attachment *attach)
244 struct gntdev_dmabuf_attachment *gntdev_dmabuf_attach = attach->priv;
246 if (gntdev_dmabuf_attach) {
247 struct sg_table *sgt = gntdev_dmabuf_attach->sgt;
250 if (gntdev_dmabuf_attach->dir != DMA_NONE)
251 dma_unmap_sgtable(attach->dev, sgt,
252 gntdev_dmabuf_attach->dir,
253 DMA_ATTR_SKIP_CPU_SYNC);
258 kfree(gntdev_dmabuf_attach);
263 static struct sg_table *
264 dmabuf_exp_ops_map_dma_buf(struct dma_buf_attachment *attach,
265 enum dma_data_direction dir)
267 struct gntdev_dmabuf_attachment *gntdev_dmabuf_attach = attach->priv;
268 struct gntdev_dmabuf *gntdev_dmabuf = attach->dmabuf->priv;
269 struct sg_table *sgt;
271 pr_debug("Mapping %d pages for dev %p\n", gntdev_dmabuf->nr_pages,
274 if (dir == DMA_NONE || !gntdev_dmabuf_attach)
275 return ERR_PTR(-EINVAL);
277 /* Return the cached mapping when possible. */
278 if (gntdev_dmabuf_attach->dir == dir)
279 return gntdev_dmabuf_attach->sgt;
282 * Two mappings with different directions for the same attachment are
285 if (gntdev_dmabuf_attach->dir != DMA_NONE)
286 return ERR_PTR(-EBUSY);
288 sgt = dmabuf_pages_to_sgt(gntdev_dmabuf->pages,
289 gntdev_dmabuf->nr_pages);
291 if (dma_map_sgtable(attach->dev, sgt, dir,
292 DMA_ATTR_SKIP_CPU_SYNC)) {
295 sgt = ERR_PTR(-ENOMEM);
297 gntdev_dmabuf_attach->sgt = sgt;
298 gntdev_dmabuf_attach->dir = dir;
302 pr_debug("Failed to map sg table for dev %p\n", attach->dev);
306 static void dmabuf_exp_ops_unmap_dma_buf(struct dma_buf_attachment *attach,
307 struct sg_table *sgt,
308 enum dma_data_direction dir)
310 /* Not implemented. The unmap is done at dmabuf_exp_ops_detach(). */
313 static void dmabuf_exp_release(struct kref *kref)
315 struct gntdev_dmabuf *gntdev_dmabuf =
316 container_of(kref, struct gntdev_dmabuf, u.exp.refcount);
318 dmabuf_exp_wait_obj_signal(gntdev_dmabuf->priv, gntdev_dmabuf);
319 list_del(&gntdev_dmabuf->next);
320 fput(gntdev_dmabuf->priv->filp);
321 kfree(gntdev_dmabuf);
324 static void dmabuf_exp_remove_map(struct gntdev_priv *priv,
325 struct gntdev_grant_map *map)
327 mutex_lock(&priv->lock);
328 list_del(&map->next);
329 gntdev_put_map(NULL /* already removed */, map);
330 mutex_unlock(&priv->lock);
333 static void dmabuf_exp_ops_release(struct dma_buf *dma_buf)
335 struct gntdev_dmabuf *gntdev_dmabuf = dma_buf->priv;
336 struct gntdev_dmabuf_priv *priv = gntdev_dmabuf->priv;
338 dmabuf_exp_remove_map(gntdev_dmabuf->u.exp.priv,
339 gntdev_dmabuf->u.exp.map);
340 mutex_lock(&priv->lock);
341 kref_put(&gntdev_dmabuf->u.exp.refcount, dmabuf_exp_release);
342 mutex_unlock(&priv->lock);
345 static const struct dma_buf_ops dmabuf_exp_ops = {
346 .attach = dmabuf_exp_ops_attach,
347 .detach = dmabuf_exp_ops_detach,
348 .map_dma_buf = dmabuf_exp_ops_map_dma_buf,
349 .unmap_dma_buf = dmabuf_exp_ops_unmap_dma_buf,
350 .release = dmabuf_exp_ops_release,
353 struct gntdev_dmabuf_export_args {
354 struct gntdev_priv *priv;
355 struct gntdev_grant_map *map;
356 struct gntdev_dmabuf_priv *dmabuf_priv;
363 static int dmabuf_exp_from_pages(struct gntdev_dmabuf_export_args *args)
365 DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
366 struct gntdev_dmabuf *gntdev_dmabuf;
369 gntdev_dmabuf = kzalloc(sizeof(*gntdev_dmabuf), GFP_KERNEL);
373 kref_init(&gntdev_dmabuf->u.exp.refcount);
375 gntdev_dmabuf->priv = args->dmabuf_priv;
376 gntdev_dmabuf->nr_pages = args->count;
377 gntdev_dmabuf->pages = args->pages;
378 gntdev_dmabuf->u.exp.priv = args->priv;
379 gntdev_dmabuf->u.exp.map = args->map;
381 exp_info.exp_name = KBUILD_MODNAME;
382 if (args->dev->driver && args->dev->driver->owner)
383 exp_info.owner = args->dev->driver->owner;
385 exp_info.owner = THIS_MODULE;
386 exp_info.ops = &dmabuf_exp_ops;
387 exp_info.size = args->count << PAGE_SHIFT;
388 exp_info.flags = O_RDWR;
389 exp_info.priv = gntdev_dmabuf;
391 gntdev_dmabuf->dmabuf = dma_buf_export(&exp_info);
392 if (IS_ERR(gntdev_dmabuf->dmabuf)) {
393 ret = PTR_ERR(gntdev_dmabuf->dmabuf);
394 gntdev_dmabuf->dmabuf = NULL;
398 ret = dma_buf_fd(gntdev_dmabuf->dmabuf, O_CLOEXEC);
402 gntdev_dmabuf->fd = ret;
405 pr_debug("Exporting DMA buffer with fd %d\n", ret);
407 mutex_lock(&args->dmabuf_priv->lock);
408 list_add(&gntdev_dmabuf->next, &args->dmabuf_priv->exp_list);
409 mutex_unlock(&args->dmabuf_priv->lock);
410 get_file(gntdev_dmabuf->priv->filp);
414 if (gntdev_dmabuf->dmabuf)
415 dma_buf_put(gntdev_dmabuf->dmabuf);
416 kfree(gntdev_dmabuf);
420 static struct gntdev_grant_map *
421 dmabuf_exp_alloc_backing_storage(struct gntdev_priv *priv, int dmabuf_flags,
424 struct gntdev_grant_map *map;
426 if (unlikely(gntdev_test_page_count(count)))
427 return ERR_PTR(-EINVAL);
429 if ((dmabuf_flags & GNTDEV_DMA_FLAG_WC) &&
430 (dmabuf_flags & GNTDEV_DMA_FLAG_COHERENT)) {
431 pr_debug("Wrong dma-buf flags: 0x%x\n", dmabuf_flags);
432 return ERR_PTR(-EINVAL);
435 map = gntdev_alloc_map(priv, count, dmabuf_flags);
437 return ERR_PTR(-ENOMEM);
442 static int dmabuf_exp_from_refs(struct gntdev_priv *priv, int flags,
443 int count, u32 domid, u32 *refs, u32 *fd)
445 struct gntdev_grant_map *map;
446 struct gntdev_dmabuf_export_args args;
449 map = dmabuf_exp_alloc_backing_storage(priv, flags, count);
453 for (i = 0; i < count; i++) {
454 map->grants[i].domid = domid;
455 map->grants[i].ref = refs[i];
458 mutex_lock(&priv->lock);
459 gntdev_add_map(priv, map);
460 mutex_unlock(&priv->lock);
462 map->flags |= GNTMAP_host_map;
463 #if defined(CONFIG_X86)
464 map->flags |= GNTMAP_device_map;
467 ret = gntdev_map_grant_pages(map);
473 args.dev = priv->dma_dev;
474 args.dmabuf_priv = priv->dmabuf_priv;
475 args.count = map->count;
476 args.pages = map->pages;
477 args.fd = -1; /* Shut up unnecessary gcc warning for i386 */
479 ret = dmabuf_exp_from_pages(&args);
487 dmabuf_exp_remove_map(priv, map);
491 /* DMA buffer import support. */
494 dmabuf_imp_grant_foreign_access(unsigned long *gfns, u32 *refs,
495 int count, int domid)
497 grant_ref_t priv_gref_head;
500 ret = gnttab_alloc_grant_references(count, &priv_gref_head);
502 pr_debug("Cannot allocate grant references, ret %d\n", ret);
506 for (i = 0; i < count; i++) {
509 cur_ref = gnttab_claim_grant_reference(&priv_gref_head);
512 pr_debug("Cannot claim grant reference, ret %d\n", ret);
516 gnttab_grant_foreign_access_ref(cur_ref, domid,
524 gnttab_free_grant_references(priv_gref_head);
528 static void dmabuf_imp_end_foreign_access(u32 *refs, int count)
532 for (i = 0; i < count; i++)
533 if (refs[i] != GRANT_INVALID_REF)
534 gnttab_end_foreign_access(refs[i], 0, 0UL);
537 static void dmabuf_imp_free_storage(struct gntdev_dmabuf *gntdev_dmabuf)
539 kfree(gntdev_dmabuf->u.imp.refs);
540 kfree(gntdev_dmabuf);
543 static struct gntdev_dmabuf *dmabuf_imp_alloc_storage(int count)
545 struct gntdev_dmabuf *gntdev_dmabuf;
548 gntdev_dmabuf = kzalloc(sizeof(*gntdev_dmabuf), GFP_KERNEL);
552 gntdev_dmabuf->u.imp.refs = kcalloc(count,
553 sizeof(gntdev_dmabuf->u.imp.refs[0]),
555 if (!gntdev_dmabuf->u.imp.refs)
558 gntdev_dmabuf->nr_pages = count;
560 for (i = 0; i < count; i++)
561 gntdev_dmabuf->u.imp.refs[i] = GRANT_INVALID_REF;
563 return gntdev_dmabuf;
566 dmabuf_imp_free_storage(gntdev_dmabuf);
568 return ERR_PTR(-ENOMEM);
571 static struct gntdev_dmabuf *
572 dmabuf_imp_to_refs(struct gntdev_dmabuf_priv *priv, struct device *dev,
573 int fd, int count, int domid)
575 struct gntdev_dmabuf *gntdev_dmabuf, *ret;
576 struct dma_buf *dma_buf;
577 struct dma_buf_attachment *attach;
578 struct sg_table *sgt;
579 struct sg_dma_page_iter sg_iter;
583 dma_buf = dma_buf_get(fd);
585 return ERR_CAST(dma_buf);
587 gntdev_dmabuf = dmabuf_imp_alloc_storage(count);
588 if (IS_ERR(gntdev_dmabuf)) {
593 gntdev_dmabuf->priv = priv;
594 gntdev_dmabuf->fd = fd;
596 attach = dma_buf_attach(dma_buf, dev);
597 if (IS_ERR(attach)) {
598 ret = ERR_CAST(attach);
602 gntdev_dmabuf->u.imp.attach = attach;
604 sgt = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
610 /* Check that we have zero offset. */
611 if (sgt->sgl->offset) {
612 ret = ERR_PTR(-EINVAL);
613 pr_debug("DMA buffer has %d bytes offset, user-space expects 0\n",
618 /* Check number of pages that imported buffer has. */
619 if (attach->dmabuf->size != gntdev_dmabuf->nr_pages << PAGE_SHIFT) {
620 ret = ERR_PTR(-EINVAL);
621 pr_debug("DMA buffer has %zu pages, user-space expects %d\n",
622 attach->dmabuf->size, gntdev_dmabuf->nr_pages);
626 gntdev_dmabuf->u.imp.sgt = sgt;
628 gfns = kcalloc(count, sizeof(*gfns), GFP_KERNEL);
630 ret = ERR_PTR(-ENOMEM);
635 * Now convert sgt to array of gfns without accessing underlying pages.
636 * It is not allowed to access the underlying struct page of an sg table
637 * exported by DMA-buf, but since we deal with special Xen dma device here
638 * (not a normal physical one) look at the dma addresses in the sg table
639 * and then calculate gfns directly from them.
642 for_each_sgtable_dma_page(sgt, &sg_iter, 0) {
643 dma_addr_t addr = sg_page_iter_dma_address(&sg_iter);
644 unsigned long pfn = bfn_to_pfn(XEN_PFN_DOWN(dma_to_phys(dev, addr)));
646 gfns[i++] = pfn_to_gfn(pfn);
649 ret = ERR_PTR(dmabuf_imp_grant_foreign_access(gfns,
650 gntdev_dmabuf->u.imp.refs,
654 goto fail_end_access;
656 pr_debug("Imported DMA buffer with fd %d\n", fd);
658 mutex_lock(&priv->lock);
659 list_add(&gntdev_dmabuf->next, &priv->imp_list);
660 mutex_unlock(&priv->lock);
662 return gntdev_dmabuf;
665 dmabuf_imp_end_foreign_access(gntdev_dmabuf->u.imp.refs, count);
667 dma_buf_unmap_attachment(attach, sgt, DMA_BIDIRECTIONAL);
669 dma_buf_detach(dma_buf, attach);
671 dmabuf_imp_free_storage(gntdev_dmabuf);
673 dma_buf_put(dma_buf);
678 * Find the hyper dma-buf by its file descriptor and remove
679 * it from the buffer's list.
681 static struct gntdev_dmabuf *
682 dmabuf_imp_find_unlink(struct gntdev_dmabuf_priv *priv, int fd)
684 struct gntdev_dmabuf *q, *gntdev_dmabuf, *ret = ERR_PTR(-ENOENT);
686 mutex_lock(&priv->lock);
687 list_for_each_entry_safe(gntdev_dmabuf, q, &priv->imp_list, next) {
688 if (gntdev_dmabuf->fd == fd) {
689 pr_debug("Found gntdev_dmabuf in the import list\n");
691 list_del(&gntdev_dmabuf->next);
695 mutex_unlock(&priv->lock);
699 static int dmabuf_imp_release(struct gntdev_dmabuf_priv *priv, u32 fd)
701 struct gntdev_dmabuf *gntdev_dmabuf;
702 struct dma_buf_attachment *attach;
703 struct dma_buf *dma_buf;
705 gntdev_dmabuf = dmabuf_imp_find_unlink(priv, fd);
706 if (IS_ERR(gntdev_dmabuf))
707 return PTR_ERR(gntdev_dmabuf);
709 pr_debug("Releasing DMA buffer with fd %d\n", fd);
711 dmabuf_imp_end_foreign_access(gntdev_dmabuf->u.imp.refs,
712 gntdev_dmabuf->nr_pages);
714 attach = gntdev_dmabuf->u.imp.attach;
716 if (gntdev_dmabuf->u.imp.sgt)
717 dma_buf_unmap_attachment(attach, gntdev_dmabuf->u.imp.sgt,
719 dma_buf = attach->dmabuf;
720 dma_buf_detach(attach->dmabuf, attach);
721 dma_buf_put(dma_buf);
723 dmabuf_imp_free_storage(gntdev_dmabuf);
727 static void dmabuf_imp_release_all(struct gntdev_dmabuf_priv *priv)
729 struct gntdev_dmabuf *q, *gntdev_dmabuf;
731 list_for_each_entry_safe(gntdev_dmabuf, q, &priv->imp_list, next)
732 dmabuf_imp_release(priv, gntdev_dmabuf->fd);
735 /* DMA buffer IOCTL support. */
737 long gntdev_ioctl_dmabuf_exp_from_refs(struct gntdev_priv *priv, int use_ptemod,
738 struct ioctl_gntdev_dmabuf_exp_from_refs __user *u)
740 struct ioctl_gntdev_dmabuf_exp_from_refs op;
745 pr_debug("Cannot provide dma-buf: use_ptemode %d\n",
750 if (copy_from_user(&op, u, sizeof(op)) != 0)
753 if (unlikely(gntdev_test_page_count(op.count)))
756 refs = kcalloc(op.count, sizeof(*refs), GFP_KERNEL);
760 if (copy_from_user(refs, u->refs, sizeof(*refs) * op.count) != 0) {
765 ret = dmabuf_exp_from_refs(priv, op.flags, op.count,
766 op.domid, refs, &op.fd);
770 if (copy_to_user(u, &op, sizeof(op)) != 0)
778 long gntdev_ioctl_dmabuf_exp_wait_released(struct gntdev_priv *priv,
779 struct ioctl_gntdev_dmabuf_exp_wait_released __user *u)
781 struct ioctl_gntdev_dmabuf_exp_wait_released op;
783 if (copy_from_user(&op, u, sizeof(op)) != 0)
786 return dmabuf_exp_wait_released(priv->dmabuf_priv, op.fd,
790 long gntdev_ioctl_dmabuf_imp_to_refs(struct gntdev_priv *priv,
791 struct ioctl_gntdev_dmabuf_imp_to_refs __user *u)
793 struct ioctl_gntdev_dmabuf_imp_to_refs op;
794 struct gntdev_dmabuf *gntdev_dmabuf;
797 if (copy_from_user(&op, u, sizeof(op)) != 0)
800 if (unlikely(gntdev_test_page_count(op.count)))
803 gntdev_dmabuf = dmabuf_imp_to_refs(priv->dmabuf_priv,
804 priv->dma_dev, op.fd,
806 if (IS_ERR(gntdev_dmabuf))
807 return PTR_ERR(gntdev_dmabuf);
809 if (copy_to_user(u->refs, gntdev_dmabuf->u.imp.refs,
810 sizeof(*u->refs) * op.count) != 0) {
817 dmabuf_imp_release(priv->dmabuf_priv, op.fd);
821 long gntdev_ioctl_dmabuf_imp_release(struct gntdev_priv *priv,
822 struct ioctl_gntdev_dmabuf_imp_release __user *u)
824 struct ioctl_gntdev_dmabuf_imp_release op;
826 if (copy_from_user(&op, u, sizeof(op)) != 0)
829 return dmabuf_imp_release(priv->dmabuf_priv, op.fd);
832 struct gntdev_dmabuf_priv *gntdev_dmabuf_init(struct file *filp)
834 struct gntdev_dmabuf_priv *priv;
836 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
838 return ERR_PTR(-ENOMEM);
840 mutex_init(&priv->lock);
841 INIT_LIST_HEAD(&priv->exp_list);
842 INIT_LIST_HEAD(&priv->exp_wait_list);
843 INIT_LIST_HEAD(&priv->imp_list);
850 void gntdev_dmabuf_fini(struct gntdev_dmabuf_priv *priv)
852 dmabuf_imp_release_all(priv);