GNU Linux-libre 6.8.9-gnu
[releases.git] / drivers / xen / gntdev-dmabuf.c
1 // SPDX-License-Identifier: GPL-2.0
2
3 /*
4  * Xen dma-buf functionality for gntdev.
5  *
6  * DMA buffer implementation is based on drivers/gpu/drm/drm_prime.c.
7  *
8  * Copyright (c) 2018 Oleksandr Andrushchenko, EPAM Systems Inc.
9  */
10
11 #include <linux/kernel.h>
12 #include <linux/errno.h>
13 #include <linux/dma-buf.h>
14 #include <linux/dma-direct.h>
15 #include <linux/slab.h>
16 #include <linux/types.h>
17 #include <linux/uaccess.h>
18 #include <linux/module.h>
19
20 #include <xen/xen.h>
21 #include <xen/grant_table.h>
22
23 #include "gntdev-common.h"
24 #include "gntdev-dmabuf.h"
25
26 MODULE_IMPORT_NS(DMA_BUF);
27
28 struct gntdev_dmabuf {
29         struct gntdev_dmabuf_priv *priv;
30         struct dma_buf *dmabuf;
31         struct list_head next;
32         int fd;
33
34         union {
35                 struct {
36                         /* Exported buffers are reference counted. */
37                         struct kref refcount;
38
39                         struct gntdev_priv *priv;
40                         struct gntdev_grant_map *map;
41                 } exp;
42                 struct {
43                         /* Granted references of the imported buffer. */
44                         grant_ref_t *refs;
45                         /* Scatter-gather table of the imported buffer. */
46                         struct sg_table *sgt;
47                         /* dma-buf attachment of the imported buffer. */
48                         struct dma_buf_attachment *attach;
49                 } imp;
50         } u;
51
52         /* Number of pages this buffer has. */
53         int nr_pages;
54         /* Pages of this buffer (only for dma-buf export). */
55         struct page **pages;
56 };
57
58 struct gntdev_dmabuf_wait_obj {
59         struct list_head next;
60         struct gntdev_dmabuf *gntdev_dmabuf;
61         struct completion completion;
62 };
63
64 struct gntdev_dmabuf_attachment {
65         struct sg_table *sgt;
66         enum dma_data_direction dir;
67 };
68
69 struct gntdev_dmabuf_priv {
70         /* List of exported DMA buffers. */
71         struct list_head exp_list;
72         /* List of wait objects. */
73         struct list_head exp_wait_list;
74         /* List of imported DMA buffers. */
75         struct list_head imp_list;
76         /* This is the lock which protects dma_buf_xxx lists. */
77         struct mutex lock;
78         /*
79          * We reference this file while exporting dma-bufs, so
80          * the grant device context is not destroyed while there are
81          * external users alive.
82          */
83         struct file *filp;
84 };
85
86 /* DMA buffer export support. */
87
88 /* Implementation of wait for exported DMA buffer to be released. */
89
90 static void dmabuf_exp_release(struct kref *kref);
91
92 static struct gntdev_dmabuf_wait_obj *
93 dmabuf_exp_wait_obj_new(struct gntdev_dmabuf_priv *priv,
94                         struct gntdev_dmabuf *gntdev_dmabuf)
95 {
96         struct gntdev_dmabuf_wait_obj *obj;
97
98         obj = kzalloc(sizeof(*obj), GFP_KERNEL);
99         if (!obj)
100                 return ERR_PTR(-ENOMEM);
101
102         init_completion(&obj->completion);
103         obj->gntdev_dmabuf = gntdev_dmabuf;
104
105         mutex_lock(&priv->lock);
106         list_add(&obj->next, &priv->exp_wait_list);
107         /* Put our reference and wait for gntdev_dmabuf's release to fire. */
108         kref_put(&gntdev_dmabuf->u.exp.refcount, dmabuf_exp_release);
109         mutex_unlock(&priv->lock);
110         return obj;
111 }
112
113 static void dmabuf_exp_wait_obj_free(struct gntdev_dmabuf_priv *priv,
114                                      struct gntdev_dmabuf_wait_obj *obj)
115 {
116         mutex_lock(&priv->lock);
117         list_del(&obj->next);
118         mutex_unlock(&priv->lock);
119         kfree(obj);
120 }
121
122 static int dmabuf_exp_wait_obj_wait(struct gntdev_dmabuf_wait_obj *obj,
123                                     u32 wait_to_ms)
124 {
125         if (wait_for_completion_timeout(&obj->completion,
126                         msecs_to_jiffies(wait_to_ms)) <= 0)
127                 return -ETIMEDOUT;
128
129         return 0;
130 }
131
132 static void dmabuf_exp_wait_obj_signal(struct gntdev_dmabuf_priv *priv,
133                                        struct gntdev_dmabuf *gntdev_dmabuf)
134 {
135         struct gntdev_dmabuf_wait_obj *obj;
136
137         list_for_each_entry(obj, &priv->exp_wait_list, next)
138                 if (obj->gntdev_dmabuf == gntdev_dmabuf) {
139                         pr_debug("Found gntdev_dmabuf in the wait list, wake\n");
140                         complete_all(&obj->completion);
141                         break;
142                 }
143 }
144
145 static struct gntdev_dmabuf *
146 dmabuf_exp_wait_obj_get_dmabuf(struct gntdev_dmabuf_priv *priv, int fd)
147 {
148         struct gntdev_dmabuf *gntdev_dmabuf, *ret = ERR_PTR(-ENOENT);
149
150         mutex_lock(&priv->lock);
151         list_for_each_entry(gntdev_dmabuf, &priv->exp_list, next)
152                 if (gntdev_dmabuf->fd == fd) {
153                         pr_debug("Found gntdev_dmabuf in the wait list\n");
154                         kref_get(&gntdev_dmabuf->u.exp.refcount);
155                         ret = gntdev_dmabuf;
156                         break;
157                 }
158         mutex_unlock(&priv->lock);
159         return ret;
160 }
161
162 static int dmabuf_exp_wait_released(struct gntdev_dmabuf_priv *priv, int fd,
163                                     int wait_to_ms)
164 {
165         struct gntdev_dmabuf *gntdev_dmabuf;
166         struct gntdev_dmabuf_wait_obj *obj;
167         int ret;
168
169         pr_debug("Will wait for dma-buf with fd %d\n", fd);
170         /*
171          * Try to find the DMA buffer: if not found means that
172          * either the buffer has already been released or file descriptor
173          * provided is wrong.
174          */
175         gntdev_dmabuf = dmabuf_exp_wait_obj_get_dmabuf(priv, fd);
176         if (IS_ERR(gntdev_dmabuf))
177                 return PTR_ERR(gntdev_dmabuf);
178
179         /*
180          * gntdev_dmabuf still exists and is reference count locked by us now,
181          * so prepare to wait: allocate wait object and add it to the wait list,
182          * so we can find it on release.
183          */
184         obj = dmabuf_exp_wait_obj_new(priv, gntdev_dmabuf);
185         if (IS_ERR(obj))
186                 return PTR_ERR(obj);
187
188         ret = dmabuf_exp_wait_obj_wait(obj, wait_to_ms);
189         dmabuf_exp_wait_obj_free(priv, obj);
190         return ret;
191 }
192
193 /* DMA buffer export support. */
194
195 static struct sg_table *
196 dmabuf_pages_to_sgt(struct page **pages, unsigned int nr_pages)
197 {
198         struct sg_table *sgt;
199         int ret;
200
201         sgt = kmalloc(sizeof(*sgt), GFP_KERNEL);
202         if (!sgt) {
203                 ret = -ENOMEM;
204                 goto out;
205         }
206
207         ret = sg_alloc_table_from_pages(sgt, pages, nr_pages, 0,
208                                         nr_pages << PAGE_SHIFT,
209                                         GFP_KERNEL);
210         if (ret)
211                 goto out;
212
213         return sgt;
214
215 out:
216         kfree(sgt);
217         return ERR_PTR(ret);
218 }
219
220 static int dmabuf_exp_ops_attach(struct dma_buf *dma_buf,
221                                  struct dma_buf_attachment *attach)
222 {
223         struct gntdev_dmabuf_attachment *gntdev_dmabuf_attach;
224
225         gntdev_dmabuf_attach = kzalloc(sizeof(*gntdev_dmabuf_attach),
226                                        GFP_KERNEL);
227         if (!gntdev_dmabuf_attach)
228                 return -ENOMEM;
229
230         gntdev_dmabuf_attach->dir = DMA_NONE;
231         attach->priv = gntdev_dmabuf_attach;
232         return 0;
233 }
234
235 static void dmabuf_exp_ops_detach(struct dma_buf *dma_buf,
236                                   struct dma_buf_attachment *attach)
237 {
238         struct gntdev_dmabuf_attachment *gntdev_dmabuf_attach = attach->priv;
239
240         if (gntdev_dmabuf_attach) {
241                 struct sg_table *sgt = gntdev_dmabuf_attach->sgt;
242
243                 if (sgt) {
244                         if (gntdev_dmabuf_attach->dir != DMA_NONE)
245                                 dma_unmap_sgtable(attach->dev, sgt,
246                                                   gntdev_dmabuf_attach->dir,
247                                                   DMA_ATTR_SKIP_CPU_SYNC);
248                         sg_free_table(sgt);
249                 }
250
251                 kfree(sgt);
252                 kfree(gntdev_dmabuf_attach);
253                 attach->priv = NULL;
254         }
255 }
256
257 static struct sg_table *
258 dmabuf_exp_ops_map_dma_buf(struct dma_buf_attachment *attach,
259                            enum dma_data_direction dir)
260 {
261         struct gntdev_dmabuf_attachment *gntdev_dmabuf_attach = attach->priv;
262         struct gntdev_dmabuf *gntdev_dmabuf = attach->dmabuf->priv;
263         struct sg_table *sgt;
264
265         pr_debug("Mapping %d pages for dev %p\n", gntdev_dmabuf->nr_pages,
266                  attach->dev);
267
268         if (dir == DMA_NONE || !gntdev_dmabuf_attach)
269                 return ERR_PTR(-EINVAL);
270
271         /* Return the cached mapping when possible. */
272         if (gntdev_dmabuf_attach->dir == dir)
273                 return gntdev_dmabuf_attach->sgt;
274
275         /*
276          * Two mappings with different directions for the same attachment are
277          * not allowed.
278          */
279         if (gntdev_dmabuf_attach->dir != DMA_NONE)
280                 return ERR_PTR(-EBUSY);
281
282         sgt = dmabuf_pages_to_sgt(gntdev_dmabuf->pages,
283                                   gntdev_dmabuf->nr_pages);
284         if (!IS_ERR(sgt)) {
285                 if (dma_map_sgtable(attach->dev, sgt, dir,
286                                     DMA_ATTR_SKIP_CPU_SYNC)) {
287                         sg_free_table(sgt);
288                         kfree(sgt);
289                         sgt = ERR_PTR(-ENOMEM);
290                 } else {
291                         gntdev_dmabuf_attach->sgt = sgt;
292                         gntdev_dmabuf_attach->dir = dir;
293                 }
294         }
295         if (IS_ERR(sgt))
296                 pr_debug("Failed to map sg table for dev %p\n", attach->dev);
297         return sgt;
298 }
299
300 static void dmabuf_exp_ops_unmap_dma_buf(struct dma_buf_attachment *attach,
301                                          struct sg_table *sgt,
302                                          enum dma_data_direction dir)
303 {
304         /* Not implemented. The unmap is done at dmabuf_exp_ops_detach(). */
305 }
306
307 static void dmabuf_exp_release(struct kref *kref)
308 {
309         struct gntdev_dmabuf *gntdev_dmabuf =
310                 container_of(kref, struct gntdev_dmabuf, u.exp.refcount);
311
312         dmabuf_exp_wait_obj_signal(gntdev_dmabuf->priv, gntdev_dmabuf);
313         list_del(&gntdev_dmabuf->next);
314         fput(gntdev_dmabuf->priv->filp);
315         kfree(gntdev_dmabuf);
316 }
317
318 static void dmabuf_exp_remove_map(struct gntdev_priv *priv,
319                                   struct gntdev_grant_map *map)
320 {
321         mutex_lock(&priv->lock);
322         list_del(&map->next);
323         gntdev_put_map(NULL /* already removed */, map);
324         mutex_unlock(&priv->lock);
325 }
326
327 static void dmabuf_exp_ops_release(struct dma_buf *dma_buf)
328 {
329         struct gntdev_dmabuf *gntdev_dmabuf = dma_buf->priv;
330         struct gntdev_dmabuf_priv *priv = gntdev_dmabuf->priv;
331
332         dmabuf_exp_remove_map(gntdev_dmabuf->u.exp.priv,
333                               gntdev_dmabuf->u.exp.map);
334         mutex_lock(&priv->lock);
335         kref_put(&gntdev_dmabuf->u.exp.refcount, dmabuf_exp_release);
336         mutex_unlock(&priv->lock);
337 }
338
339 static const struct dma_buf_ops dmabuf_exp_ops =  {
340         .attach = dmabuf_exp_ops_attach,
341         .detach = dmabuf_exp_ops_detach,
342         .map_dma_buf = dmabuf_exp_ops_map_dma_buf,
343         .unmap_dma_buf = dmabuf_exp_ops_unmap_dma_buf,
344         .release = dmabuf_exp_ops_release,
345 };
346
347 struct gntdev_dmabuf_export_args {
348         struct gntdev_priv *priv;
349         struct gntdev_grant_map *map;
350         struct gntdev_dmabuf_priv *dmabuf_priv;
351         struct device *dev;
352         int count;
353         struct page **pages;
354         u32 fd;
355 };
356
357 static int dmabuf_exp_from_pages(struct gntdev_dmabuf_export_args *args)
358 {
359         DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
360         struct gntdev_dmabuf *gntdev_dmabuf;
361         int ret;
362
363         gntdev_dmabuf = kzalloc(sizeof(*gntdev_dmabuf), GFP_KERNEL);
364         if (!gntdev_dmabuf)
365                 return -ENOMEM;
366
367         kref_init(&gntdev_dmabuf->u.exp.refcount);
368
369         gntdev_dmabuf->priv = args->dmabuf_priv;
370         gntdev_dmabuf->nr_pages = args->count;
371         gntdev_dmabuf->pages = args->pages;
372         gntdev_dmabuf->u.exp.priv = args->priv;
373         gntdev_dmabuf->u.exp.map = args->map;
374
375         exp_info.exp_name = KBUILD_MODNAME;
376         if (args->dev->driver && args->dev->driver->owner)
377                 exp_info.owner = args->dev->driver->owner;
378         else
379                 exp_info.owner = THIS_MODULE;
380         exp_info.ops = &dmabuf_exp_ops;
381         exp_info.size = args->count << PAGE_SHIFT;
382         exp_info.flags = O_RDWR;
383         exp_info.priv = gntdev_dmabuf;
384
385         gntdev_dmabuf->dmabuf = dma_buf_export(&exp_info);
386         if (IS_ERR(gntdev_dmabuf->dmabuf)) {
387                 ret = PTR_ERR(gntdev_dmabuf->dmabuf);
388                 gntdev_dmabuf->dmabuf = NULL;
389                 goto fail;
390         }
391
392         ret = dma_buf_fd(gntdev_dmabuf->dmabuf, O_CLOEXEC);
393         if (ret < 0)
394                 goto fail;
395
396         gntdev_dmabuf->fd = ret;
397         args->fd = ret;
398
399         pr_debug("Exporting DMA buffer with fd %d\n", ret);
400
401         mutex_lock(&args->dmabuf_priv->lock);
402         list_add(&gntdev_dmabuf->next, &args->dmabuf_priv->exp_list);
403         mutex_unlock(&args->dmabuf_priv->lock);
404         get_file(gntdev_dmabuf->priv->filp);
405         return 0;
406
407 fail:
408         if (gntdev_dmabuf->dmabuf)
409                 dma_buf_put(gntdev_dmabuf->dmabuf);
410         kfree(gntdev_dmabuf);
411         return ret;
412 }
413
414 static struct gntdev_grant_map *
415 dmabuf_exp_alloc_backing_storage(struct gntdev_priv *priv, int dmabuf_flags,
416                                  int count)
417 {
418         struct gntdev_grant_map *map;
419
420         if (unlikely(gntdev_test_page_count(count)))
421                 return ERR_PTR(-EINVAL);
422
423         if ((dmabuf_flags & GNTDEV_DMA_FLAG_WC) &&
424             (dmabuf_flags & GNTDEV_DMA_FLAG_COHERENT)) {
425                 pr_debug("Wrong dma-buf flags: 0x%x\n", dmabuf_flags);
426                 return ERR_PTR(-EINVAL);
427         }
428
429         map = gntdev_alloc_map(priv, count, dmabuf_flags);
430         if (!map)
431                 return ERR_PTR(-ENOMEM);
432
433         return map;
434 }
435
436 static int dmabuf_exp_from_refs(struct gntdev_priv *priv, int flags,
437                                 int count, u32 domid, u32 *refs, u32 *fd)
438 {
439         struct gntdev_grant_map *map;
440         struct gntdev_dmabuf_export_args args;
441         int i, ret;
442
443         map = dmabuf_exp_alloc_backing_storage(priv, flags, count);
444         if (IS_ERR(map))
445                 return PTR_ERR(map);
446
447         for (i = 0; i < count; i++) {
448                 map->grants[i].domid = domid;
449                 map->grants[i].ref = refs[i];
450         }
451
452         mutex_lock(&priv->lock);
453         gntdev_add_map(priv, map);
454         mutex_unlock(&priv->lock);
455
456         map->flags |= GNTMAP_host_map;
457 #if defined(CONFIG_X86)
458         map->flags |= GNTMAP_device_map;
459 #endif
460
461         ret = gntdev_map_grant_pages(map);
462         if (ret < 0)
463                 goto out;
464
465         args.priv = priv;
466         args.map = map;
467         args.dev = priv->dma_dev;
468         args.dmabuf_priv = priv->dmabuf_priv;
469         args.count = map->count;
470         args.pages = map->pages;
471         args.fd = -1; /* Shut up unnecessary gcc warning for i386 */
472
473         ret = dmabuf_exp_from_pages(&args);
474         if (ret < 0)
475                 goto out;
476
477         *fd = args.fd;
478         return 0;
479
480 out:
481         dmabuf_exp_remove_map(priv, map);
482         return ret;
483 }
484
485 /* DMA buffer import support. */
486
487 static int
488 dmabuf_imp_grant_foreign_access(unsigned long *gfns, u32 *refs,
489                                 int count, int domid)
490 {
491         grant_ref_t priv_gref_head;
492         int i, ret;
493
494         ret = gnttab_alloc_grant_references(count, &priv_gref_head);
495         if (ret < 0) {
496                 pr_debug("Cannot allocate grant references, ret %d\n", ret);
497                 return ret;
498         }
499
500         for (i = 0; i < count; i++) {
501                 int cur_ref;
502
503                 cur_ref = gnttab_claim_grant_reference(&priv_gref_head);
504                 if (cur_ref < 0) {
505                         ret = cur_ref;
506                         pr_debug("Cannot claim grant reference, ret %d\n", ret);
507                         goto out;
508                 }
509
510                 gnttab_grant_foreign_access_ref(cur_ref, domid,
511                                                 gfns[i], 0);
512                 refs[i] = cur_ref;
513         }
514
515         return 0;
516
517 out:
518         gnttab_free_grant_references(priv_gref_head);
519         return ret;
520 }
521
522 static void dmabuf_imp_end_foreign_access(u32 *refs, int count)
523 {
524         int i;
525
526         for (i = 0; i < count; i++)
527                 if (refs[i] != INVALID_GRANT_REF)
528                         gnttab_end_foreign_access(refs[i], NULL);
529 }
530
531 static void dmabuf_imp_free_storage(struct gntdev_dmabuf *gntdev_dmabuf)
532 {
533         kfree(gntdev_dmabuf->u.imp.refs);
534         kfree(gntdev_dmabuf);
535 }
536
537 static struct gntdev_dmabuf *dmabuf_imp_alloc_storage(int count)
538 {
539         struct gntdev_dmabuf *gntdev_dmabuf;
540         int i;
541
542         gntdev_dmabuf = kzalloc(sizeof(*gntdev_dmabuf), GFP_KERNEL);
543         if (!gntdev_dmabuf)
544                 goto fail_no_free;
545
546         gntdev_dmabuf->u.imp.refs = kcalloc(count,
547                                             sizeof(gntdev_dmabuf->u.imp.refs[0]),
548                                             GFP_KERNEL);
549         if (!gntdev_dmabuf->u.imp.refs)
550                 goto fail;
551
552         gntdev_dmabuf->nr_pages = count;
553
554         for (i = 0; i < count; i++)
555                 gntdev_dmabuf->u.imp.refs[i] = INVALID_GRANT_REF;
556
557         return gntdev_dmabuf;
558
559 fail:
560         dmabuf_imp_free_storage(gntdev_dmabuf);
561 fail_no_free:
562         return ERR_PTR(-ENOMEM);
563 }
564
565 static struct gntdev_dmabuf *
566 dmabuf_imp_to_refs(struct gntdev_dmabuf_priv *priv, struct device *dev,
567                    int fd, int count, int domid)
568 {
569         struct gntdev_dmabuf *gntdev_dmabuf, *ret;
570         struct dma_buf *dma_buf;
571         struct dma_buf_attachment *attach;
572         struct sg_table *sgt;
573         struct sg_dma_page_iter sg_iter;
574         unsigned long *gfns;
575         int i;
576
577         dma_buf = dma_buf_get(fd);
578         if (IS_ERR(dma_buf))
579                 return ERR_CAST(dma_buf);
580
581         gntdev_dmabuf = dmabuf_imp_alloc_storage(count);
582         if (IS_ERR(gntdev_dmabuf)) {
583                 ret = gntdev_dmabuf;
584                 goto fail_put;
585         }
586
587         gntdev_dmabuf->priv = priv;
588         gntdev_dmabuf->fd = fd;
589
590         attach = dma_buf_attach(dma_buf, dev);
591         if (IS_ERR(attach)) {
592                 ret = ERR_CAST(attach);
593                 goto fail_free_obj;
594         }
595
596         gntdev_dmabuf->u.imp.attach = attach;
597
598         sgt = dma_buf_map_attachment_unlocked(attach, DMA_BIDIRECTIONAL);
599         if (IS_ERR(sgt)) {
600                 ret = ERR_CAST(sgt);
601                 goto fail_detach;
602         }
603
604         /* Check that we have zero offset. */
605         if (sgt->sgl->offset) {
606                 ret = ERR_PTR(-EINVAL);
607                 pr_debug("DMA buffer has %d bytes offset, user-space expects 0\n",
608                          sgt->sgl->offset);
609                 goto fail_unmap;
610         }
611
612         /* Check number of pages that imported buffer has. */
613         if (attach->dmabuf->size != gntdev_dmabuf->nr_pages << PAGE_SHIFT) {
614                 ret = ERR_PTR(-EINVAL);
615                 pr_debug("DMA buffer has %zu pages, user-space expects %d\n",
616                          attach->dmabuf->size, gntdev_dmabuf->nr_pages);
617                 goto fail_unmap;
618         }
619
620         gntdev_dmabuf->u.imp.sgt = sgt;
621
622         gfns = kcalloc(count, sizeof(*gfns), GFP_KERNEL);
623         if (!gfns) {
624                 ret = ERR_PTR(-ENOMEM);
625                 goto fail_unmap;
626         }
627
628         /*
629          * Now convert sgt to array of gfns without accessing underlying pages.
630          * It is not allowed to access the underlying struct page of an sg table
631          * exported by DMA-buf, but since we deal with special Xen dma device here
632          * (not a normal physical one) look at the dma addresses in the sg table
633          * and then calculate gfns directly from them.
634          */
635         i = 0;
636         for_each_sgtable_dma_page(sgt, &sg_iter, 0) {
637                 dma_addr_t addr = sg_page_iter_dma_address(&sg_iter);
638                 unsigned long pfn = bfn_to_pfn(XEN_PFN_DOWN(dma_to_phys(dev, addr)));
639
640                 gfns[i++] = pfn_to_gfn(pfn);
641         }
642
643         ret = ERR_PTR(dmabuf_imp_grant_foreign_access(gfns,
644                                                       gntdev_dmabuf->u.imp.refs,
645                                                       count, domid));
646         kfree(gfns);
647         if (IS_ERR(ret))
648                 goto fail_end_access;
649
650         pr_debug("Imported DMA buffer with fd %d\n", fd);
651
652         mutex_lock(&priv->lock);
653         list_add(&gntdev_dmabuf->next, &priv->imp_list);
654         mutex_unlock(&priv->lock);
655
656         return gntdev_dmabuf;
657
658 fail_end_access:
659         dmabuf_imp_end_foreign_access(gntdev_dmabuf->u.imp.refs, count);
660 fail_unmap:
661         dma_buf_unmap_attachment_unlocked(attach, sgt, DMA_BIDIRECTIONAL);
662 fail_detach:
663         dma_buf_detach(dma_buf, attach);
664 fail_free_obj:
665         dmabuf_imp_free_storage(gntdev_dmabuf);
666 fail_put:
667         dma_buf_put(dma_buf);
668         return ret;
669 }
670
671 /*
672  * Find the hyper dma-buf by its file descriptor and remove
673  * it from the buffer's list.
674  */
675 static struct gntdev_dmabuf *
676 dmabuf_imp_find_unlink(struct gntdev_dmabuf_priv *priv, int fd)
677 {
678         struct gntdev_dmabuf *q, *gntdev_dmabuf, *ret = ERR_PTR(-ENOENT);
679
680         mutex_lock(&priv->lock);
681         list_for_each_entry_safe(gntdev_dmabuf, q, &priv->imp_list, next) {
682                 if (gntdev_dmabuf->fd == fd) {
683                         pr_debug("Found gntdev_dmabuf in the import list\n");
684                         ret = gntdev_dmabuf;
685                         list_del(&gntdev_dmabuf->next);
686                         break;
687                 }
688         }
689         mutex_unlock(&priv->lock);
690         return ret;
691 }
692
693 static int dmabuf_imp_release(struct gntdev_dmabuf_priv *priv, u32 fd)
694 {
695         struct gntdev_dmabuf *gntdev_dmabuf;
696         struct dma_buf_attachment *attach;
697         struct dma_buf *dma_buf;
698
699         gntdev_dmabuf = dmabuf_imp_find_unlink(priv, fd);
700         if (IS_ERR(gntdev_dmabuf))
701                 return PTR_ERR(gntdev_dmabuf);
702
703         pr_debug("Releasing DMA buffer with fd %d\n", fd);
704
705         dmabuf_imp_end_foreign_access(gntdev_dmabuf->u.imp.refs,
706                                       gntdev_dmabuf->nr_pages);
707
708         attach = gntdev_dmabuf->u.imp.attach;
709
710         if (gntdev_dmabuf->u.imp.sgt)
711                 dma_buf_unmap_attachment_unlocked(attach, gntdev_dmabuf->u.imp.sgt,
712                                                   DMA_BIDIRECTIONAL);
713         dma_buf = attach->dmabuf;
714         dma_buf_detach(attach->dmabuf, attach);
715         dma_buf_put(dma_buf);
716
717         dmabuf_imp_free_storage(gntdev_dmabuf);
718         return 0;
719 }
720
721 static void dmabuf_imp_release_all(struct gntdev_dmabuf_priv *priv)
722 {
723         struct gntdev_dmabuf *q, *gntdev_dmabuf;
724
725         list_for_each_entry_safe(gntdev_dmabuf, q, &priv->imp_list, next)
726                 dmabuf_imp_release(priv, gntdev_dmabuf->fd);
727 }
728
729 /* DMA buffer IOCTL support. */
730
731 long gntdev_ioctl_dmabuf_exp_from_refs(struct gntdev_priv *priv, int use_ptemod,
732                                        struct ioctl_gntdev_dmabuf_exp_from_refs __user *u)
733 {
734         struct ioctl_gntdev_dmabuf_exp_from_refs op;
735         u32 *refs;
736         long ret;
737
738         if (use_ptemod) {
739                 pr_debug("Cannot provide dma-buf: use_ptemode %d\n",
740                          use_ptemod);
741                 return -EINVAL;
742         }
743
744         if (copy_from_user(&op, u, sizeof(op)) != 0)
745                 return -EFAULT;
746
747         if (unlikely(gntdev_test_page_count(op.count)))
748                 return -EINVAL;
749
750         refs = kcalloc(op.count, sizeof(*refs), GFP_KERNEL);
751         if (!refs)
752                 return -ENOMEM;
753
754         if (copy_from_user(refs, u->refs, sizeof(*refs) * op.count) != 0) {
755                 ret = -EFAULT;
756                 goto out;
757         }
758
759         ret = dmabuf_exp_from_refs(priv, op.flags, op.count,
760                                    op.domid, refs, &op.fd);
761         if (ret)
762                 goto out;
763
764         if (copy_to_user(u, &op, sizeof(op)) != 0)
765                 ret = -EFAULT;
766
767 out:
768         kfree(refs);
769         return ret;
770 }
771
772 long gntdev_ioctl_dmabuf_exp_wait_released(struct gntdev_priv *priv,
773                                            struct ioctl_gntdev_dmabuf_exp_wait_released __user *u)
774 {
775         struct ioctl_gntdev_dmabuf_exp_wait_released op;
776
777         if (copy_from_user(&op, u, sizeof(op)) != 0)
778                 return -EFAULT;
779
780         return dmabuf_exp_wait_released(priv->dmabuf_priv, op.fd,
781                                         op.wait_to_ms);
782 }
783
784 long gntdev_ioctl_dmabuf_imp_to_refs(struct gntdev_priv *priv,
785                                      struct ioctl_gntdev_dmabuf_imp_to_refs __user *u)
786 {
787         struct ioctl_gntdev_dmabuf_imp_to_refs op;
788         struct gntdev_dmabuf *gntdev_dmabuf;
789         long ret;
790
791         if (copy_from_user(&op, u, sizeof(op)) != 0)
792                 return -EFAULT;
793
794         if (unlikely(gntdev_test_page_count(op.count)))
795                 return -EINVAL;
796
797         gntdev_dmabuf = dmabuf_imp_to_refs(priv->dmabuf_priv,
798                                            priv->dma_dev, op.fd,
799                                            op.count, op.domid);
800         if (IS_ERR(gntdev_dmabuf))
801                 return PTR_ERR(gntdev_dmabuf);
802
803         if (copy_to_user(u->refs, gntdev_dmabuf->u.imp.refs,
804                          sizeof(*u->refs) * op.count) != 0) {
805                 ret = -EFAULT;
806                 goto out_release;
807         }
808         return 0;
809
810 out_release:
811         dmabuf_imp_release(priv->dmabuf_priv, op.fd);
812         return ret;
813 }
814
815 long gntdev_ioctl_dmabuf_imp_release(struct gntdev_priv *priv,
816                                      struct ioctl_gntdev_dmabuf_imp_release __user *u)
817 {
818         struct ioctl_gntdev_dmabuf_imp_release op;
819
820         if (copy_from_user(&op, u, sizeof(op)) != 0)
821                 return -EFAULT;
822
823         return dmabuf_imp_release(priv->dmabuf_priv, op.fd);
824 }
825
826 struct gntdev_dmabuf_priv *gntdev_dmabuf_init(struct file *filp)
827 {
828         struct gntdev_dmabuf_priv *priv;
829
830         priv = kzalloc(sizeof(*priv), GFP_KERNEL);
831         if (!priv)
832                 return ERR_PTR(-ENOMEM);
833
834         mutex_init(&priv->lock);
835         INIT_LIST_HEAD(&priv->exp_list);
836         INIT_LIST_HEAD(&priv->exp_wait_list);
837         INIT_LIST_HEAD(&priv->imp_list);
838
839         priv->filp = filp;
840
841         return priv;
842 }
843
844 void gntdev_dmabuf_fini(struct gntdev_dmabuf_priv *priv)
845 {
846         dmabuf_imp_release_all(priv);
847         kfree(priv);
848 }