GNU Linux-libre 5.10.215-gnu1
[releases.git] / drivers / xen / gntdev-dmabuf.c
1 // SPDX-License-Identifier: GPL-2.0
2
3 /*
4  * Xen dma-buf functionality for gntdev.
5  *
6  * DMA buffer implementation is based on drivers/gpu/drm/drm_prime.c.
7  *
8  * Copyright (c) 2018 Oleksandr Andrushchenko, EPAM Systems Inc.
9  */
10
11 #include <linux/kernel.h>
12 #include <linux/errno.h>
13 #include <linux/dma-buf.h>
14 #include <linux/dma-direct.h>
15 #include <linux/slab.h>
16 #include <linux/types.h>
17 #include <linux/uaccess.h>
18
19 #include <xen/xen.h>
20 #include <xen/grant_table.h>
21
22 #include "gntdev-common.h"
23 #include "gntdev-dmabuf.h"
24
25 #ifndef GRANT_INVALID_REF
26 /*
27  * Note on usage of grant reference 0 as invalid grant reference:
28  * grant reference 0 is valid, but never exposed to a driver,
29  * because of the fact it is already in use/reserved by the PV console.
30  */
31 #define GRANT_INVALID_REF       0
32 #endif
33
34 struct gntdev_dmabuf {
35         struct gntdev_dmabuf_priv *priv;
36         struct dma_buf *dmabuf;
37         struct list_head next;
38         int fd;
39
40         union {
41                 struct {
42                         /* Exported buffers are reference counted. */
43                         struct kref refcount;
44
45                         struct gntdev_priv *priv;
46                         struct gntdev_grant_map *map;
47                 } exp;
48                 struct {
49                         /* Granted references of the imported buffer. */
50                         grant_ref_t *refs;
51                         /* Scatter-gather table of the imported buffer. */
52                         struct sg_table *sgt;
53                         /* dma-buf attachment of the imported buffer. */
54                         struct dma_buf_attachment *attach;
55                 } imp;
56         } u;
57
58         /* Number of pages this buffer has. */
59         int nr_pages;
60         /* Pages of this buffer (only for dma-buf export). */
61         struct page **pages;
62 };
63
64 struct gntdev_dmabuf_wait_obj {
65         struct list_head next;
66         struct gntdev_dmabuf *gntdev_dmabuf;
67         struct completion completion;
68 };
69
70 struct gntdev_dmabuf_attachment {
71         struct sg_table *sgt;
72         enum dma_data_direction dir;
73 };
74
75 struct gntdev_dmabuf_priv {
76         /* List of exported DMA buffers. */
77         struct list_head exp_list;
78         /* List of wait objects. */
79         struct list_head exp_wait_list;
80         /* List of imported DMA buffers. */
81         struct list_head imp_list;
82         /* This is the lock which protects dma_buf_xxx lists. */
83         struct mutex lock;
84         /*
85          * We reference this file while exporting dma-bufs, so
86          * the grant device context is not destroyed while there are
87          * external users alive.
88          */
89         struct file *filp;
90 };
91
92 /* DMA buffer export support. */
93
94 /* Implementation of wait for exported DMA buffer to be released. */
95
96 static void dmabuf_exp_release(struct kref *kref);
97
98 static struct gntdev_dmabuf_wait_obj *
99 dmabuf_exp_wait_obj_new(struct gntdev_dmabuf_priv *priv,
100                         struct gntdev_dmabuf *gntdev_dmabuf)
101 {
102         struct gntdev_dmabuf_wait_obj *obj;
103
104         obj = kzalloc(sizeof(*obj), GFP_KERNEL);
105         if (!obj)
106                 return ERR_PTR(-ENOMEM);
107
108         init_completion(&obj->completion);
109         obj->gntdev_dmabuf = gntdev_dmabuf;
110
111         mutex_lock(&priv->lock);
112         list_add(&obj->next, &priv->exp_wait_list);
113         /* Put our reference and wait for gntdev_dmabuf's release to fire. */
114         kref_put(&gntdev_dmabuf->u.exp.refcount, dmabuf_exp_release);
115         mutex_unlock(&priv->lock);
116         return obj;
117 }
118
119 static void dmabuf_exp_wait_obj_free(struct gntdev_dmabuf_priv *priv,
120                                      struct gntdev_dmabuf_wait_obj *obj)
121 {
122         mutex_lock(&priv->lock);
123         list_del(&obj->next);
124         mutex_unlock(&priv->lock);
125         kfree(obj);
126 }
127
128 static int dmabuf_exp_wait_obj_wait(struct gntdev_dmabuf_wait_obj *obj,
129                                     u32 wait_to_ms)
130 {
131         if (wait_for_completion_timeout(&obj->completion,
132                         msecs_to_jiffies(wait_to_ms)) <= 0)
133                 return -ETIMEDOUT;
134
135         return 0;
136 }
137
138 static void dmabuf_exp_wait_obj_signal(struct gntdev_dmabuf_priv *priv,
139                                        struct gntdev_dmabuf *gntdev_dmabuf)
140 {
141         struct gntdev_dmabuf_wait_obj *obj;
142
143         list_for_each_entry(obj, &priv->exp_wait_list, next)
144                 if (obj->gntdev_dmabuf == gntdev_dmabuf) {
145                         pr_debug("Found gntdev_dmabuf in the wait list, wake\n");
146                         complete_all(&obj->completion);
147                         break;
148                 }
149 }
150
151 static struct gntdev_dmabuf *
152 dmabuf_exp_wait_obj_get_dmabuf(struct gntdev_dmabuf_priv *priv, int fd)
153 {
154         struct gntdev_dmabuf *gntdev_dmabuf, *ret = ERR_PTR(-ENOENT);
155
156         mutex_lock(&priv->lock);
157         list_for_each_entry(gntdev_dmabuf, &priv->exp_list, next)
158                 if (gntdev_dmabuf->fd == fd) {
159                         pr_debug("Found gntdev_dmabuf in the wait list\n");
160                         kref_get(&gntdev_dmabuf->u.exp.refcount);
161                         ret = gntdev_dmabuf;
162                         break;
163                 }
164         mutex_unlock(&priv->lock);
165         return ret;
166 }
167
168 static int dmabuf_exp_wait_released(struct gntdev_dmabuf_priv *priv, int fd,
169                                     int wait_to_ms)
170 {
171         struct gntdev_dmabuf *gntdev_dmabuf;
172         struct gntdev_dmabuf_wait_obj *obj;
173         int ret;
174
175         pr_debug("Will wait for dma-buf with fd %d\n", fd);
176         /*
177          * Try to find the DMA buffer: if not found means that
178          * either the buffer has already been released or file descriptor
179          * provided is wrong.
180          */
181         gntdev_dmabuf = dmabuf_exp_wait_obj_get_dmabuf(priv, fd);
182         if (IS_ERR(gntdev_dmabuf))
183                 return PTR_ERR(gntdev_dmabuf);
184
185         /*
186          * gntdev_dmabuf still exists and is reference count locked by us now,
187          * so prepare to wait: allocate wait object and add it to the wait list,
188          * so we can find it on release.
189          */
190         obj = dmabuf_exp_wait_obj_new(priv, gntdev_dmabuf);
191         if (IS_ERR(obj))
192                 return PTR_ERR(obj);
193
194         ret = dmabuf_exp_wait_obj_wait(obj, wait_to_ms);
195         dmabuf_exp_wait_obj_free(priv, obj);
196         return ret;
197 }
198
199 /* DMA buffer export support. */
200
201 static struct sg_table *
202 dmabuf_pages_to_sgt(struct page **pages, unsigned int nr_pages)
203 {
204         struct sg_table *sgt;
205         int ret;
206
207         sgt = kmalloc(sizeof(*sgt), GFP_KERNEL);
208         if (!sgt) {
209                 ret = -ENOMEM;
210                 goto out;
211         }
212
213         ret = sg_alloc_table_from_pages(sgt, pages, nr_pages, 0,
214                                         nr_pages << PAGE_SHIFT,
215                                         GFP_KERNEL);
216         if (ret)
217                 goto out;
218
219         return sgt;
220
221 out:
222         kfree(sgt);
223         return ERR_PTR(ret);
224 }
225
226 static int dmabuf_exp_ops_attach(struct dma_buf *dma_buf,
227                                  struct dma_buf_attachment *attach)
228 {
229         struct gntdev_dmabuf_attachment *gntdev_dmabuf_attach;
230
231         gntdev_dmabuf_attach = kzalloc(sizeof(*gntdev_dmabuf_attach),
232                                        GFP_KERNEL);
233         if (!gntdev_dmabuf_attach)
234                 return -ENOMEM;
235
236         gntdev_dmabuf_attach->dir = DMA_NONE;
237         attach->priv = gntdev_dmabuf_attach;
238         return 0;
239 }
240
241 static void dmabuf_exp_ops_detach(struct dma_buf *dma_buf,
242                                   struct dma_buf_attachment *attach)
243 {
244         struct gntdev_dmabuf_attachment *gntdev_dmabuf_attach = attach->priv;
245
246         if (gntdev_dmabuf_attach) {
247                 struct sg_table *sgt = gntdev_dmabuf_attach->sgt;
248
249                 if (sgt) {
250                         if (gntdev_dmabuf_attach->dir != DMA_NONE)
251                                 dma_unmap_sgtable(attach->dev, sgt,
252                                                   gntdev_dmabuf_attach->dir,
253                                                   DMA_ATTR_SKIP_CPU_SYNC);
254                         sg_free_table(sgt);
255                 }
256
257                 kfree(sgt);
258                 kfree(gntdev_dmabuf_attach);
259                 attach->priv = NULL;
260         }
261 }
262
263 static struct sg_table *
264 dmabuf_exp_ops_map_dma_buf(struct dma_buf_attachment *attach,
265                            enum dma_data_direction dir)
266 {
267         struct gntdev_dmabuf_attachment *gntdev_dmabuf_attach = attach->priv;
268         struct gntdev_dmabuf *gntdev_dmabuf = attach->dmabuf->priv;
269         struct sg_table *sgt;
270
271         pr_debug("Mapping %d pages for dev %p\n", gntdev_dmabuf->nr_pages,
272                  attach->dev);
273
274         if (dir == DMA_NONE || !gntdev_dmabuf_attach)
275                 return ERR_PTR(-EINVAL);
276
277         /* Return the cached mapping when possible. */
278         if (gntdev_dmabuf_attach->dir == dir)
279                 return gntdev_dmabuf_attach->sgt;
280
281         /*
282          * Two mappings with different directions for the same attachment are
283          * not allowed.
284          */
285         if (gntdev_dmabuf_attach->dir != DMA_NONE)
286                 return ERR_PTR(-EBUSY);
287
288         sgt = dmabuf_pages_to_sgt(gntdev_dmabuf->pages,
289                                   gntdev_dmabuf->nr_pages);
290         if (!IS_ERR(sgt)) {
291                 if (dma_map_sgtable(attach->dev, sgt, dir,
292                                     DMA_ATTR_SKIP_CPU_SYNC)) {
293                         sg_free_table(sgt);
294                         kfree(sgt);
295                         sgt = ERR_PTR(-ENOMEM);
296                 } else {
297                         gntdev_dmabuf_attach->sgt = sgt;
298                         gntdev_dmabuf_attach->dir = dir;
299                 }
300         }
301         if (IS_ERR(sgt))
302                 pr_debug("Failed to map sg table for dev %p\n", attach->dev);
303         return sgt;
304 }
305
306 static void dmabuf_exp_ops_unmap_dma_buf(struct dma_buf_attachment *attach,
307                                          struct sg_table *sgt,
308                                          enum dma_data_direction dir)
309 {
310         /* Not implemented. The unmap is done at dmabuf_exp_ops_detach(). */
311 }
312
313 static void dmabuf_exp_release(struct kref *kref)
314 {
315         struct gntdev_dmabuf *gntdev_dmabuf =
316                 container_of(kref, struct gntdev_dmabuf, u.exp.refcount);
317
318         dmabuf_exp_wait_obj_signal(gntdev_dmabuf->priv, gntdev_dmabuf);
319         list_del(&gntdev_dmabuf->next);
320         fput(gntdev_dmabuf->priv->filp);
321         kfree(gntdev_dmabuf);
322 }
323
324 static void dmabuf_exp_remove_map(struct gntdev_priv *priv,
325                                   struct gntdev_grant_map *map)
326 {
327         mutex_lock(&priv->lock);
328         list_del(&map->next);
329         gntdev_put_map(NULL /* already removed */, map);
330         mutex_unlock(&priv->lock);
331 }
332
333 static void dmabuf_exp_ops_release(struct dma_buf *dma_buf)
334 {
335         struct gntdev_dmabuf *gntdev_dmabuf = dma_buf->priv;
336         struct gntdev_dmabuf_priv *priv = gntdev_dmabuf->priv;
337
338         dmabuf_exp_remove_map(gntdev_dmabuf->u.exp.priv,
339                               gntdev_dmabuf->u.exp.map);
340         mutex_lock(&priv->lock);
341         kref_put(&gntdev_dmabuf->u.exp.refcount, dmabuf_exp_release);
342         mutex_unlock(&priv->lock);
343 }
344
345 static const struct dma_buf_ops dmabuf_exp_ops =  {
346         .attach = dmabuf_exp_ops_attach,
347         .detach = dmabuf_exp_ops_detach,
348         .map_dma_buf = dmabuf_exp_ops_map_dma_buf,
349         .unmap_dma_buf = dmabuf_exp_ops_unmap_dma_buf,
350         .release = dmabuf_exp_ops_release,
351 };
352
353 struct gntdev_dmabuf_export_args {
354         struct gntdev_priv *priv;
355         struct gntdev_grant_map *map;
356         struct gntdev_dmabuf_priv *dmabuf_priv;
357         struct device *dev;
358         int count;
359         struct page **pages;
360         u32 fd;
361 };
362
363 static int dmabuf_exp_from_pages(struct gntdev_dmabuf_export_args *args)
364 {
365         DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
366         struct gntdev_dmabuf *gntdev_dmabuf;
367         int ret;
368
369         gntdev_dmabuf = kzalloc(sizeof(*gntdev_dmabuf), GFP_KERNEL);
370         if (!gntdev_dmabuf)
371                 return -ENOMEM;
372
373         kref_init(&gntdev_dmabuf->u.exp.refcount);
374
375         gntdev_dmabuf->priv = args->dmabuf_priv;
376         gntdev_dmabuf->nr_pages = args->count;
377         gntdev_dmabuf->pages = args->pages;
378         gntdev_dmabuf->u.exp.priv = args->priv;
379         gntdev_dmabuf->u.exp.map = args->map;
380
381         exp_info.exp_name = KBUILD_MODNAME;
382         if (args->dev->driver && args->dev->driver->owner)
383                 exp_info.owner = args->dev->driver->owner;
384         else
385                 exp_info.owner = THIS_MODULE;
386         exp_info.ops = &dmabuf_exp_ops;
387         exp_info.size = args->count << PAGE_SHIFT;
388         exp_info.flags = O_RDWR;
389         exp_info.priv = gntdev_dmabuf;
390
391         gntdev_dmabuf->dmabuf = dma_buf_export(&exp_info);
392         if (IS_ERR(gntdev_dmabuf->dmabuf)) {
393                 ret = PTR_ERR(gntdev_dmabuf->dmabuf);
394                 gntdev_dmabuf->dmabuf = NULL;
395                 goto fail;
396         }
397
398         ret = dma_buf_fd(gntdev_dmabuf->dmabuf, O_CLOEXEC);
399         if (ret < 0)
400                 goto fail;
401
402         gntdev_dmabuf->fd = ret;
403         args->fd = ret;
404
405         pr_debug("Exporting DMA buffer with fd %d\n", ret);
406
407         mutex_lock(&args->dmabuf_priv->lock);
408         list_add(&gntdev_dmabuf->next, &args->dmabuf_priv->exp_list);
409         mutex_unlock(&args->dmabuf_priv->lock);
410         get_file(gntdev_dmabuf->priv->filp);
411         return 0;
412
413 fail:
414         if (gntdev_dmabuf->dmabuf)
415                 dma_buf_put(gntdev_dmabuf->dmabuf);
416         kfree(gntdev_dmabuf);
417         return ret;
418 }
419
420 static struct gntdev_grant_map *
421 dmabuf_exp_alloc_backing_storage(struct gntdev_priv *priv, int dmabuf_flags,
422                                  int count)
423 {
424         struct gntdev_grant_map *map;
425
426         if (unlikely(gntdev_test_page_count(count)))
427                 return ERR_PTR(-EINVAL);
428
429         if ((dmabuf_flags & GNTDEV_DMA_FLAG_WC) &&
430             (dmabuf_flags & GNTDEV_DMA_FLAG_COHERENT)) {
431                 pr_debug("Wrong dma-buf flags: 0x%x\n", dmabuf_flags);
432                 return ERR_PTR(-EINVAL);
433         }
434
435         map = gntdev_alloc_map(priv, count, dmabuf_flags);
436         if (!map)
437                 return ERR_PTR(-ENOMEM);
438
439         return map;
440 }
441
442 static int dmabuf_exp_from_refs(struct gntdev_priv *priv, int flags,
443                                 int count, u32 domid, u32 *refs, u32 *fd)
444 {
445         struct gntdev_grant_map *map;
446         struct gntdev_dmabuf_export_args args;
447         int i, ret;
448
449         map = dmabuf_exp_alloc_backing_storage(priv, flags, count);
450         if (IS_ERR(map))
451                 return PTR_ERR(map);
452
453         for (i = 0; i < count; i++) {
454                 map->grants[i].domid = domid;
455                 map->grants[i].ref = refs[i];
456         }
457
458         mutex_lock(&priv->lock);
459         gntdev_add_map(priv, map);
460         mutex_unlock(&priv->lock);
461
462         map->flags |= GNTMAP_host_map;
463 #if defined(CONFIG_X86)
464         map->flags |= GNTMAP_device_map;
465 #endif
466
467         ret = gntdev_map_grant_pages(map);
468         if (ret < 0)
469                 goto out;
470
471         args.priv = priv;
472         args.map = map;
473         args.dev = priv->dma_dev;
474         args.dmabuf_priv = priv->dmabuf_priv;
475         args.count = map->count;
476         args.pages = map->pages;
477         args.fd = -1; /* Shut up unnecessary gcc warning for i386 */
478
479         ret = dmabuf_exp_from_pages(&args);
480         if (ret < 0)
481                 goto out;
482
483         *fd = args.fd;
484         return 0;
485
486 out:
487         dmabuf_exp_remove_map(priv, map);
488         return ret;
489 }
490
491 /* DMA buffer import support. */
492
493 static int
494 dmabuf_imp_grant_foreign_access(unsigned long *gfns, u32 *refs,
495                                 int count, int domid)
496 {
497         grant_ref_t priv_gref_head;
498         int i, ret;
499
500         ret = gnttab_alloc_grant_references(count, &priv_gref_head);
501         if (ret < 0) {
502                 pr_debug("Cannot allocate grant references, ret %d\n", ret);
503                 return ret;
504         }
505
506         for (i = 0; i < count; i++) {
507                 int cur_ref;
508
509                 cur_ref = gnttab_claim_grant_reference(&priv_gref_head);
510                 if (cur_ref < 0) {
511                         ret = cur_ref;
512                         pr_debug("Cannot claim grant reference, ret %d\n", ret);
513                         goto out;
514                 }
515
516                 gnttab_grant_foreign_access_ref(cur_ref, domid,
517                                                 gfns[i], 0);
518                 refs[i] = cur_ref;
519         }
520
521         return 0;
522
523 out:
524         gnttab_free_grant_references(priv_gref_head);
525         return ret;
526 }
527
528 static void dmabuf_imp_end_foreign_access(u32 *refs, int count)
529 {
530         int i;
531
532         for (i = 0; i < count; i++)
533                 if (refs[i] != GRANT_INVALID_REF)
534                         gnttab_end_foreign_access(refs[i], 0, 0UL);
535 }
536
537 static void dmabuf_imp_free_storage(struct gntdev_dmabuf *gntdev_dmabuf)
538 {
539         kfree(gntdev_dmabuf->u.imp.refs);
540         kfree(gntdev_dmabuf);
541 }
542
543 static struct gntdev_dmabuf *dmabuf_imp_alloc_storage(int count)
544 {
545         struct gntdev_dmabuf *gntdev_dmabuf;
546         int i;
547
548         gntdev_dmabuf = kzalloc(sizeof(*gntdev_dmabuf), GFP_KERNEL);
549         if (!gntdev_dmabuf)
550                 goto fail_no_free;
551
552         gntdev_dmabuf->u.imp.refs = kcalloc(count,
553                                             sizeof(gntdev_dmabuf->u.imp.refs[0]),
554                                             GFP_KERNEL);
555         if (!gntdev_dmabuf->u.imp.refs)
556                 goto fail;
557
558         gntdev_dmabuf->nr_pages = count;
559
560         for (i = 0; i < count; i++)
561                 gntdev_dmabuf->u.imp.refs[i] = GRANT_INVALID_REF;
562
563         return gntdev_dmabuf;
564
565 fail:
566         dmabuf_imp_free_storage(gntdev_dmabuf);
567 fail_no_free:
568         return ERR_PTR(-ENOMEM);
569 }
570
571 static struct gntdev_dmabuf *
572 dmabuf_imp_to_refs(struct gntdev_dmabuf_priv *priv, struct device *dev,
573                    int fd, int count, int domid)
574 {
575         struct gntdev_dmabuf *gntdev_dmabuf, *ret;
576         struct dma_buf *dma_buf;
577         struct dma_buf_attachment *attach;
578         struct sg_table *sgt;
579         struct sg_dma_page_iter sg_iter;
580         unsigned long *gfns;
581         int i;
582
583         dma_buf = dma_buf_get(fd);
584         if (IS_ERR(dma_buf))
585                 return ERR_CAST(dma_buf);
586
587         gntdev_dmabuf = dmabuf_imp_alloc_storage(count);
588         if (IS_ERR(gntdev_dmabuf)) {
589                 ret = gntdev_dmabuf;
590                 goto fail_put;
591         }
592
593         gntdev_dmabuf->priv = priv;
594         gntdev_dmabuf->fd = fd;
595
596         attach = dma_buf_attach(dma_buf, dev);
597         if (IS_ERR(attach)) {
598                 ret = ERR_CAST(attach);
599                 goto fail_free_obj;
600         }
601
602         gntdev_dmabuf->u.imp.attach = attach;
603
604         sgt = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
605         if (IS_ERR(sgt)) {
606                 ret = ERR_CAST(sgt);
607                 goto fail_detach;
608         }
609
610         /* Check that we have zero offset. */
611         if (sgt->sgl->offset) {
612                 ret = ERR_PTR(-EINVAL);
613                 pr_debug("DMA buffer has %d bytes offset, user-space expects 0\n",
614                          sgt->sgl->offset);
615                 goto fail_unmap;
616         }
617
618         /* Check number of pages that imported buffer has. */
619         if (attach->dmabuf->size != gntdev_dmabuf->nr_pages << PAGE_SHIFT) {
620                 ret = ERR_PTR(-EINVAL);
621                 pr_debug("DMA buffer has %zu pages, user-space expects %d\n",
622                          attach->dmabuf->size, gntdev_dmabuf->nr_pages);
623                 goto fail_unmap;
624         }
625
626         gntdev_dmabuf->u.imp.sgt = sgt;
627
628         gfns = kcalloc(count, sizeof(*gfns), GFP_KERNEL);
629         if (!gfns) {
630                 ret = ERR_PTR(-ENOMEM);
631                 goto fail_unmap;
632         }
633
634         /*
635          * Now convert sgt to array of gfns without accessing underlying pages.
636          * It is not allowed to access the underlying struct page of an sg table
637          * exported by DMA-buf, but since we deal with special Xen dma device here
638          * (not a normal physical one) look at the dma addresses in the sg table
639          * and then calculate gfns directly from them.
640          */
641         i = 0;
642         for_each_sgtable_dma_page(sgt, &sg_iter, 0) {
643                 dma_addr_t addr = sg_page_iter_dma_address(&sg_iter);
644                 unsigned long pfn = bfn_to_pfn(XEN_PFN_DOWN(dma_to_phys(dev, addr)));
645
646                 gfns[i++] = pfn_to_gfn(pfn);
647         }
648
649         ret = ERR_PTR(dmabuf_imp_grant_foreign_access(gfns,
650                                                       gntdev_dmabuf->u.imp.refs,
651                                                       count, domid));
652         kfree(gfns);
653         if (IS_ERR(ret))
654                 goto fail_end_access;
655
656         pr_debug("Imported DMA buffer with fd %d\n", fd);
657
658         mutex_lock(&priv->lock);
659         list_add(&gntdev_dmabuf->next, &priv->imp_list);
660         mutex_unlock(&priv->lock);
661
662         return gntdev_dmabuf;
663
664 fail_end_access:
665         dmabuf_imp_end_foreign_access(gntdev_dmabuf->u.imp.refs, count);
666 fail_unmap:
667         dma_buf_unmap_attachment(attach, sgt, DMA_BIDIRECTIONAL);
668 fail_detach:
669         dma_buf_detach(dma_buf, attach);
670 fail_free_obj:
671         dmabuf_imp_free_storage(gntdev_dmabuf);
672 fail_put:
673         dma_buf_put(dma_buf);
674         return ret;
675 }
676
677 /*
678  * Find the hyper dma-buf by its file descriptor and remove
679  * it from the buffer's list.
680  */
681 static struct gntdev_dmabuf *
682 dmabuf_imp_find_unlink(struct gntdev_dmabuf_priv *priv, int fd)
683 {
684         struct gntdev_dmabuf *q, *gntdev_dmabuf, *ret = ERR_PTR(-ENOENT);
685
686         mutex_lock(&priv->lock);
687         list_for_each_entry_safe(gntdev_dmabuf, q, &priv->imp_list, next) {
688                 if (gntdev_dmabuf->fd == fd) {
689                         pr_debug("Found gntdev_dmabuf in the import list\n");
690                         ret = gntdev_dmabuf;
691                         list_del(&gntdev_dmabuf->next);
692                         break;
693                 }
694         }
695         mutex_unlock(&priv->lock);
696         return ret;
697 }
698
699 static int dmabuf_imp_release(struct gntdev_dmabuf_priv *priv, u32 fd)
700 {
701         struct gntdev_dmabuf *gntdev_dmabuf;
702         struct dma_buf_attachment *attach;
703         struct dma_buf *dma_buf;
704
705         gntdev_dmabuf = dmabuf_imp_find_unlink(priv, fd);
706         if (IS_ERR(gntdev_dmabuf))
707                 return PTR_ERR(gntdev_dmabuf);
708
709         pr_debug("Releasing DMA buffer with fd %d\n", fd);
710
711         dmabuf_imp_end_foreign_access(gntdev_dmabuf->u.imp.refs,
712                                       gntdev_dmabuf->nr_pages);
713
714         attach = gntdev_dmabuf->u.imp.attach;
715
716         if (gntdev_dmabuf->u.imp.sgt)
717                 dma_buf_unmap_attachment(attach, gntdev_dmabuf->u.imp.sgt,
718                                          DMA_BIDIRECTIONAL);
719         dma_buf = attach->dmabuf;
720         dma_buf_detach(attach->dmabuf, attach);
721         dma_buf_put(dma_buf);
722
723         dmabuf_imp_free_storage(gntdev_dmabuf);
724         return 0;
725 }
726
727 static void dmabuf_imp_release_all(struct gntdev_dmabuf_priv *priv)
728 {
729         struct gntdev_dmabuf *q, *gntdev_dmabuf;
730
731         list_for_each_entry_safe(gntdev_dmabuf, q, &priv->imp_list, next)
732                 dmabuf_imp_release(priv, gntdev_dmabuf->fd);
733 }
734
735 /* DMA buffer IOCTL support. */
736
737 long gntdev_ioctl_dmabuf_exp_from_refs(struct gntdev_priv *priv, int use_ptemod,
738                                        struct ioctl_gntdev_dmabuf_exp_from_refs __user *u)
739 {
740         struct ioctl_gntdev_dmabuf_exp_from_refs op;
741         u32 *refs;
742         long ret;
743
744         if (use_ptemod) {
745                 pr_debug("Cannot provide dma-buf: use_ptemode %d\n",
746                          use_ptemod);
747                 return -EINVAL;
748         }
749
750         if (copy_from_user(&op, u, sizeof(op)) != 0)
751                 return -EFAULT;
752
753         if (unlikely(gntdev_test_page_count(op.count)))
754                 return -EINVAL;
755
756         refs = kcalloc(op.count, sizeof(*refs), GFP_KERNEL);
757         if (!refs)
758                 return -ENOMEM;
759
760         if (copy_from_user(refs, u->refs, sizeof(*refs) * op.count) != 0) {
761                 ret = -EFAULT;
762                 goto out;
763         }
764
765         ret = dmabuf_exp_from_refs(priv, op.flags, op.count,
766                                    op.domid, refs, &op.fd);
767         if (ret)
768                 goto out;
769
770         if (copy_to_user(u, &op, sizeof(op)) != 0)
771                 ret = -EFAULT;
772
773 out:
774         kfree(refs);
775         return ret;
776 }
777
778 long gntdev_ioctl_dmabuf_exp_wait_released(struct gntdev_priv *priv,
779                                            struct ioctl_gntdev_dmabuf_exp_wait_released __user *u)
780 {
781         struct ioctl_gntdev_dmabuf_exp_wait_released op;
782
783         if (copy_from_user(&op, u, sizeof(op)) != 0)
784                 return -EFAULT;
785
786         return dmabuf_exp_wait_released(priv->dmabuf_priv, op.fd,
787                                         op.wait_to_ms);
788 }
789
790 long gntdev_ioctl_dmabuf_imp_to_refs(struct gntdev_priv *priv,
791                                      struct ioctl_gntdev_dmabuf_imp_to_refs __user *u)
792 {
793         struct ioctl_gntdev_dmabuf_imp_to_refs op;
794         struct gntdev_dmabuf *gntdev_dmabuf;
795         long ret;
796
797         if (copy_from_user(&op, u, sizeof(op)) != 0)
798                 return -EFAULT;
799
800         if (unlikely(gntdev_test_page_count(op.count)))
801                 return -EINVAL;
802
803         gntdev_dmabuf = dmabuf_imp_to_refs(priv->dmabuf_priv,
804                                            priv->dma_dev, op.fd,
805                                            op.count, op.domid);
806         if (IS_ERR(gntdev_dmabuf))
807                 return PTR_ERR(gntdev_dmabuf);
808
809         if (copy_to_user(u->refs, gntdev_dmabuf->u.imp.refs,
810                          sizeof(*u->refs) * op.count) != 0) {
811                 ret = -EFAULT;
812                 goto out_release;
813         }
814         return 0;
815
816 out_release:
817         dmabuf_imp_release(priv->dmabuf_priv, op.fd);
818         return ret;
819 }
820
821 long gntdev_ioctl_dmabuf_imp_release(struct gntdev_priv *priv,
822                                      struct ioctl_gntdev_dmabuf_imp_release __user *u)
823 {
824         struct ioctl_gntdev_dmabuf_imp_release op;
825
826         if (copy_from_user(&op, u, sizeof(op)) != 0)
827                 return -EFAULT;
828
829         return dmabuf_imp_release(priv->dmabuf_priv, op.fd);
830 }
831
832 struct gntdev_dmabuf_priv *gntdev_dmabuf_init(struct file *filp)
833 {
834         struct gntdev_dmabuf_priv *priv;
835
836         priv = kzalloc(sizeof(*priv), GFP_KERNEL);
837         if (!priv)
838                 return ERR_PTR(-ENOMEM);
839
840         mutex_init(&priv->lock);
841         INIT_LIST_HEAD(&priv->exp_list);
842         INIT_LIST_HEAD(&priv->exp_wait_list);
843         INIT_LIST_HEAD(&priv->imp_list);
844
845         priv->filp = filp;
846
847         return priv;
848 }
849
850 void gntdev_dmabuf_fini(struct gntdev_dmabuf_priv *priv)
851 {
852         dmabuf_imp_release_all(priv);
853         kfree(priv);
854 }