GNU Linux-libre 5.19-rc6-gnu
[releases.git] / drivers / media / common / videobuf2 / videobuf2-dma-contig.c
1 /*
2  * videobuf2-dma-contig.c - DMA contig memory allocator for videobuf2
3  *
4  * Copyright (C) 2010 Samsung Electronics
5  *
6  * Author: Pawel Osciak <pawel@osciak.com>
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License as published by
10  * the Free Software Foundation.
11  */
12
13 #include <linux/dma-buf.h>
14 #include <linux/module.h>
15 #include <linux/refcount.h>
16 #include <linux/scatterlist.h>
17 #include <linux/sched.h>
18 #include <linux/slab.h>
19 #include <linux/dma-mapping.h>
20 #include <linux/highmem.h>
21
22 #include <media/videobuf2-v4l2.h>
23 #include <media/videobuf2-dma-contig.h>
24 #include <media/videobuf2-memops.h>
25
26 struct vb2_dc_buf {
27         struct device                   *dev;
28         void                            *vaddr;
29         unsigned long                   size;
30         void                            *cookie;
31         dma_addr_t                      dma_addr;
32         unsigned long                   attrs;
33         enum dma_data_direction         dma_dir;
34         struct sg_table                 *dma_sgt;
35         struct frame_vector             *vec;
36
37         /* MMAP related */
38         struct vb2_vmarea_handler       handler;
39         refcount_t                      refcount;
40         struct sg_table                 *sgt_base;
41
42         /* DMABUF related */
43         struct dma_buf_attachment       *db_attach;
44
45         struct vb2_buffer               *vb;
46         bool                            non_coherent_mem;
47 };
48
49 /*********************************************/
50 /*        scatterlist table functions        */
51 /*********************************************/
52
53 static unsigned long vb2_dc_get_contiguous_size(struct sg_table *sgt)
54 {
55         struct scatterlist *s;
56         dma_addr_t expected = sg_dma_address(sgt->sgl);
57         unsigned int i;
58         unsigned long size = 0;
59
60         for_each_sgtable_dma_sg(sgt, s, i) {
61                 if (sg_dma_address(s) != expected)
62                         break;
63                 expected += sg_dma_len(s);
64                 size += sg_dma_len(s);
65         }
66         return size;
67 }
68
69 /*********************************************/
70 /*         callbacks for all buffers         */
71 /*********************************************/
72
73 static void *vb2_dc_cookie(struct vb2_buffer *vb, void *buf_priv)
74 {
75         struct vb2_dc_buf *buf = buf_priv;
76
77         return &buf->dma_addr;
78 }
79
80 /*
81  * This function may fail if:
82  *
83  * - dma_buf_vmap() fails
84  *   E.g. due to lack of virtual mapping address space, or due to
85  *   dmabuf->ops misconfiguration.
86  *
87  * - dma_vmap_noncontiguous() fails
88  *   For instance, when requested buffer size is larger than totalram_pages().
89  *   Relevant for buffers that use non-coherent memory.
90  *
91  * - Queue DMA attrs have DMA_ATTR_NO_KERNEL_MAPPING set
92  *   Relevant for buffers that use coherent memory.
93  */
94 static void *vb2_dc_vaddr(struct vb2_buffer *vb, void *buf_priv)
95 {
96         struct vb2_dc_buf *buf = buf_priv;
97
98         if (buf->vaddr)
99                 return buf->vaddr;
100
101         if (buf->db_attach) {
102                 struct iosys_map map;
103
104                 if (!dma_buf_vmap(buf->db_attach->dmabuf, &map))
105                         buf->vaddr = map.vaddr;
106
107                 return buf->vaddr;
108         }
109
110         if (buf->non_coherent_mem)
111                 buf->vaddr = dma_vmap_noncontiguous(buf->dev, buf->size,
112                                                     buf->dma_sgt);
113         return buf->vaddr;
114 }
115
116 static unsigned int vb2_dc_num_users(void *buf_priv)
117 {
118         struct vb2_dc_buf *buf = buf_priv;
119
120         return refcount_read(&buf->refcount);
121 }
122
123 static void vb2_dc_prepare(void *buf_priv)
124 {
125         struct vb2_dc_buf *buf = buf_priv;
126         struct sg_table *sgt = buf->dma_sgt;
127
128         /* This takes care of DMABUF and user-enforced cache sync hint */
129         if (buf->vb->skip_cache_sync_on_prepare)
130                 return;
131
132         if (!buf->non_coherent_mem)
133                 return;
134
135         /* Non-coherent MMAP only */
136         if (buf->vaddr)
137                 flush_kernel_vmap_range(buf->vaddr, buf->size);
138
139         /* For both USERPTR and non-coherent MMAP */
140         dma_sync_sgtable_for_device(buf->dev, sgt, buf->dma_dir);
141 }
142
143 static void vb2_dc_finish(void *buf_priv)
144 {
145         struct vb2_dc_buf *buf = buf_priv;
146         struct sg_table *sgt = buf->dma_sgt;
147
148         /* This takes care of DMABUF and user-enforced cache sync hint */
149         if (buf->vb->skip_cache_sync_on_finish)
150                 return;
151
152         if (!buf->non_coherent_mem)
153                 return;
154
155         /* Non-coherent MMAP only */
156         if (buf->vaddr)
157                 invalidate_kernel_vmap_range(buf->vaddr, buf->size);
158
159         /* For both USERPTR and non-coherent MMAP */
160         dma_sync_sgtable_for_cpu(buf->dev, sgt, buf->dma_dir);
161 }
162
163 /*********************************************/
164 /*        callbacks for MMAP buffers         */
165 /*********************************************/
166
167 static void vb2_dc_put(void *buf_priv)
168 {
169         struct vb2_dc_buf *buf = buf_priv;
170
171         if (!refcount_dec_and_test(&buf->refcount))
172                 return;
173
174         if (buf->non_coherent_mem) {
175                 if (buf->vaddr)
176                         dma_vunmap_noncontiguous(buf->dev, buf->vaddr);
177                 dma_free_noncontiguous(buf->dev, buf->size,
178                                        buf->dma_sgt, buf->dma_dir);
179         } else {
180                 if (buf->sgt_base) {
181                         sg_free_table(buf->sgt_base);
182                         kfree(buf->sgt_base);
183                 }
184                 dma_free_attrs(buf->dev, buf->size, buf->cookie,
185                                buf->dma_addr, buf->attrs);
186         }
187         put_device(buf->dev);
188         kfree(buf);
189 }
190
191 static int vb2_dc_alloc_coherent(struct vb2_dc_buf *buf)
192 {
193         struct vb2_queue *q = buf->vb->vb2_queue;
194
195         buf->cookie = dma_alloc_attrs(buf->dev,
196                                       buf->size,
197                                       &buf->dma_addr,
198                                       GFP_KERNEL | q->gfp_flags,
199                                       buf->attrs);
200         if (!buf->cookie)
201                 return -ENOMEM;
202
203         if (q->dma_attrs & DMA_ATTR_NO_KERNEL_MAPPING)
204                 return 0;
205
206         buf->vaddr = buf->cookie;
207         return 0;
208 }
209
210 static int vb2_dc_alloc_non_coherent(struct vb2_dc_buf *buf)
211 {
212         struct vb2_queue *q = buf->vb->vb2_queue;
213
214         buf->dma_sgt = dma_alloc_noncontiguous(buf->dev,
215                                                buf->size,
216                                                buf->dma_dir,
217                                                GFP_KERNEL | q->gfp_flags,
218                                                buf->attrs);
219         if (!buf->dma_sgt)
220                 return -ENOMEM;
221
222         buf->dma_addr = sg_dma_address(buf->dma_sgt->sgl);
223
224         /*
225          * For non-coherent buffers the kernel mapping is created on demand
226          * in vb2_dc_vaddr().
227          */
228         return 0;
229 }
230
231 static void *vb2_dc_alloc(struct vb2_buffer *vb,
232                           struct device *dev,
233                           unsigned long size)
234 {
235         struct vb2_dc_buf *buf;
236         int ret;
237
238         if (WARN_ON(!dev))
239                 return ERR_PTR(-EINVAL);
240
241         buf = kzalloc(sizeof *buf, GFP_KERNEL);
242         if (!buf)
243                 return ERR_PTR(-ENOMEM);
244
245         buf->attrs = vb->vb2_queue->dma_attrs;
246         buf->dma_dir = vb->vb2_queue->dma_dir;
247         buf->vb = vb;
248         buf->non_coherent_mem = vb->vb2_queue->non_coherent_mem;
249
250         buf->size = size;
251         /* Prevent the device from being released while the buffer is used */
252         buf->dev = get_device(dev);
253
254         if (buf->non_coherent_mem)
255                 ret = vb2_dc_alloc_non_coherent(buf);
256         else
257                 ret = vb2_dc_alloc_coherent(buf);
258
259         if (ret) {
260                 dev_err(dev, "dma alloc of size %lu failed\n", size);
261                 kfree(buf);
262                 return ERR_PTR(-ENOMEM);
263         }
264
265         buf->handler.refcount = &buf->refcount;
266         buf->handler.put = vb2_dc_put;
267         buf->handler.arg = buf;
268
269         refcount_set(&buf->refcount, 1);
270
271         return buf;
272 }
273
274 static int vb2_dc_mmap(void *buf_priv, struct vm_area_struct *vma)
275 {
276         struct vb2_dc_buf *buf = buf_priv;
277         int ret;
278
279         if (!buf) {
280                 printk(KERN_ERR "No buffer to map\n");
281                 return -EINVAL;
282         }
283
284         if (buf->non_coherent_mem)
285                 ret = dma_mmap_noncontiguous(buf->dev, vma, buf->size,
286                                              buf->dma_sgt);
287         else
288                 ret = dma_mmap_attrs(buf->dev, vma, buf->cookie, buf->dma_addr,
289                                      buf->size, buf->attrs);
290         if (ret) {
291                 pr_err("Remapping memory failed, error: %d\n", ret);
292                 return ret;
293         }
294
295         vma->vm_flags           |= VM_DONTEXPAND | VM_DONTDUMP;
296         vma->vm_private_data    = &buf->handler;
297         vma->vm_ops             = &vb2_common_vm_ops;
298
299         vma->vm_ops->open(vma);
300
301         pr_debug("%s: mapped dma addr 0x%08lx at 0x%08lx, size %lu\n",
302                  __func__, (unsigned long)buf->dma_addr, vma->vm_start,
303                  buf->size);
304
305         return 0;
306 }
307
308 /*********************************************/
309 /*         DMABUF ops for exporters          */
310 /*********************************************/
311
312 struct vb2_dc_attachment {
313         struct sg_table sgt;
314         enum dma_data_direction dma_dir;
315 };
316
317 static int vb2_dc_dmabuf_ops_attach(struct dma_buf *dbuf,
318         struct dma_buf_attachment *dbuf_attach)
319 {
320         struct vb2_dc_attachment *attach;
321         unsigned int i;
322         struct scatterlist *rd, *wr;
323         struct sg_table *sgt;
324         struct vb2_dc_buf *buf = dbuf->priv;
325         int ret;
326
327         attach = kzalloc(sizeof(*attach), GFP_KERNEL);
328         if (!attach)
329                 return -ENOMEM;
330
331         sgt = &attach->sgt;
332         /* Copy the buf->base_sgt scatter list to the attachment, as we can't
333          * map the same scatter list to multiple attachments at the same time.
334          */
335         ret = sg_alloc_table(sgt, buf->sgt_base->orig_nents, GFP_KERNEL);
336         if (ret) {
337                 kfree(attach);
338                 return -ENOMEM;
339         }
340
341         rd = buf->sgt_base->sgl;
342         wr = sgt->sgl;
343         for (i = 0; i < sgt->orig_nents; ++i) {
344                 sg_set_page(wr, sg_page(rd), rd->length, rd->offset);
345                 rd = sg_next(rd);
346                 wr = sg_next(wr);
347         }
348
349         attach->dma_dir = DMA_NONE;
350         dbuf_attach->priv = attach;
351
352         return 0;
353 }
354
355 static void vb2_dc_dmabuf_ops_detach(struct dma_buf *dbuf,
356         struct dma_buf_attachment *db_attach)
357 {
358         struct vb2_dc_attachment *attach = db_attach->priv;
359         struct sg_table *sgt;
360
361         if (!attach)
362                 return;
363
364         sgt = &attach->sgt;
365
366         /* release the scatterlist cache */
367         if (attach->dma_dir != DMA_NONE)
368                 /*
369                  * Cache sync can be skipped here, as the vb2_dc memory is
370                  * allocated from device coherent memory, which means the
371                  * memory locations do not require any explicit cache
372                  * maintenance prior or after being used by the device.
373                  */
374                 dma_unmap_sgtable(db_attach->dev, sgt, attach->dma_dir,
375                                   DMA_ATTR_SKIP_CPU_SYNC);
376         sg_free_table(sgt);
377         kfree(attach);
378         db_attach->priv = NULL;
379 }
380
381 static struct sg_table *vb2_dc_dmabuf_ops_map(
382         struct dma_buf_attachment *db_attach, enum dma_data_direction dma_dir)
383 {
384         struct vb2_dc_attachment *attach = db_attach->priv;
385         /* stealing dmabuf mutex to serialize map/unmap operations */
386         struct mutex *lock = &db_attach->dmabuf->lock;
387         struct sg_table *sgt;
388
389         mutex_lock(lock);
390
391         sgt = &attach->sgt;
392         /* return previously mapped sg table */
393         if (attach->dma_dir == dma_dir) {
394                 mutex_unlock(lock);
395                 return sgt;
396         }
397
398         /* release any previous cache */
399         if (attach->dma_dir != DMA_NONE) {
400                 dma_unmap_sgtable(db_attach->dev, sgt, attach->dma_dir,
401                                   DMA_ATTR_SKIP_CPU_SYNC);
402                 attach->dma_dir = DMA_NONE;
403         }
404
405         /*
406          * mapping to the client with new direction, no cache sync
407          * required see comment in vb2_dc_dmabuf_ops_detach()
408          */
409         if (dma_map_sgtable(db_attach->dev, sgt, dma_dir,
410                             DMA_ATTR_SKIP_CPU_SYNC)) {
411                 pr_err("failed to map scatterlist\n");
412                 mutex_unlock(lock);
413                 return ERR_PTR(-EIO);
414         }
415
416         attach->dma_dir = dma_dir;
417
418         mutex_unlock(lock);
419
420         return sgt;
421 }
422
423 static void vb2_dc_dmabuf_ops_unmap(struct dma_buf_attachment *db_attach,
424         struct sg_table *sgt, enum dma_data_direction dma_dir)
425 {
426         /* nothing to be done here */
427 }
428
429 static void vb2_dc_dmabuf_ops_release(struct dma_buf *dbuf)
430 {
431         /* drop reference obtained in vb2_dc_get_dmabuf */
432         vb2_dc_put(dbuf->priv);
433 }
434
435 static int
436 vb2_dc_dmabuf_ops_begin_cpu_access(struct dma_buf *dbuf,
437                                    enum dma_data_direction direction)
438 {
439         return 0;
440 }
441
442 static int
443 vb2_dc_dmabuf_ops_end_cpu_access(struct dma_buf *dbuf,
444                                  enum dma_data_direction direction)
445 {
446         return 0;
447 }
448
449 static int vb2_dc_dmabuf_ops_vmap(struct dma_buf *dbuf, struct iosys_map *map)
450 {
451         struct vb2_dc_buf *buf;
452         void *vaddr;
453
454         buf = dbuf->priv;
455         vaddr = vb2_dc_vaddr(buf->vb, buf);
456         if (!vaddr)
457                 return -EINVAL;
458
459         iosys_map_set_vaddr(map, vaddr);
460
461         return 0;
462 }
463
464 static int vb2_dc_dmabuf_ops_mmap(struct dma_buf *dbuf,
465         struct vm_area_struct *vma)
466 {
467         return vb2_dc_mmap(dbuf->priv, vma);
468 }
469
470 static const struct dma_buf_ops vb2_dc_dmabuf_ops = {
471         .attach = vb2_dc_dmabuf_ops_attach,
472         .detach = vb2_dc_dmabuf_ops_detach,
473         .map_dma_buf = vb2_dc_dmabuf_ops_map,
474         .unmap_dma_buf = vb2_dc_dmabuf_ops_unmap,
475         .begin_cpu_access = vb2_dc_dmabuf_ops_begin_cpu_access,
476         .end_cpu_access = vb2_dc_dmabuf_ops_end_cpu_access,
477         .vmap = vb2_dc_dmabuf_ops_vmap,
478         .mmap = vb2_dc_dmabuf_ops_mmap,
479         .release = vb2_dc_dmabuf_ops_release,
480 };
481
482 static struct sg_table *vb2_dc_get_base_sgt(struct vb2_dc_buf *buf)
483 {
484         int ret;
485         struct sg_table *sgt;
486
487         if (buf->non_coherent_mem)
488                 return buf->dma_sgt;
489
490         sgt = kmalloc(sizeof(*sgt), GFP_KERNEL);
491         if (!sgt) {
492                 dev_err(buf->dev, "failed to alloc sg table\n");
493                 return NULL;
494         }
495
496         ret = dma_get_sgtable_attrs(buf->dev, sgt, buf->cookie, buf->dma_addr,
497                 buf->size, buf->attrs);
498         if (ret < 0) {
499                 dev_err(buf->dev, "failed to get scatterlist from DMA API\n");
500                 kfree(sgt);
501                 return NULL;
502         }
503
504         return sgt;
505 }
506
507 static struct dma_buf *vb2_dc_get_dmabuf(struct vb2_buffer *vb,
508                                          void *buf_priv,
509                                          unsigned long flags)
510 {
511         struct vb2_dc_buf *buf = buf_priv;
512         struct dma_buf *dbuf;
513         DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
514
515         exp_info.ops = &vb2_dc_dmabuf_ops;
516         exp_info.size = buf->size;
517         exp_info.flags = flags;
518         exp_info.priv = buf;
519
520         if (!buf->sgt_base)
521                 buf->sgt_base = vb2_dc_get_base_sgt(buf);
522
523         if (WARN_ON(!buf->sgt_base))
524                 return NULL;
525
526         dbuf = dma_buf_export(&exp_info);
527         if (IS_ERR(dbuf))
528                 return NULL;
529
530         /* dmabuf keeps reference to vb2 buffer */
531         refcount_inc(&buf->refcount);
532
533         return dbuf;
534 }
535
536 /*********************************************/
537 /*       callbacks for USERPTR buffers       */
538 /*********************************************/
539
540 static void vb2_dc_put_userptr(void *buf_priv)
541 {
542         struct vb2_dc_buf *buf = buf_priv;
543         struct sg_table *sgt = buf->dma_sgt;
544         int i;
545         struct page **pages;
546
547         if (sgt) {
548                 /*
549                  * No need to sync to CPU, it's already synced to the CPU
550                  * since the finish() memop will have been called before this.
551                  */
552                 dma_unmap_sgtable(buf->dev, sgt, buf->dma_dir,
553                                   DMA_ATTR_SKIP_CPU_SYNC);
554                 pages = frame_vector_pages(buf->vec);
555                 /* sgt should exist only if vector contains pages... */
556                 BUG_ON(IS_ERR(pages));
557                 if (buf->dma_dir == DMA_FROM_DEVICE ||
558                     buf->dma_dir == DMA_BIDIRECTIONAL)
559                         for (i = 0; i < frame_vector_count(buf->vec); i++)
560                                 set_page_dirty_lock(pages[i]);
561                 sg_free_table(sgt);
562                 kfree(sgt);
563         } else {
564                 dma_unmap_resource(buf->dev, buf->dma_addr, buf->size,
565                                    buf->dma_dir, 0);
566         }
567         vb2_destroy_framevec(buf->vec);
568         kfree(buf);
569 }
570
571 static void *vb2_dc_get_userptr(struct vb2_buffer *vb, struct device *dev,
572                                 unsigned long vaddr, unsigned long size)
573 {
574         struct vb2_dc_buf *buf;
575         struct frame_vector *vec;
576         unsigned int offset;
577         int n_pages, i;
578         int ret = 0;
579         struct sg_table *sgt;
580         unsigned long contig_size;
581         unsigned long dma_align = dma_get_cache_alignment();
582
583         /* Only cache aligned DMA transfers are reliable */
584         if (!IS_ALIGNED(vaddr | size, dma_align)) {
585                 pr_debug("user data must be aligned to %lu bytes\n", dma_align);
586                 return ERR_PTR(-EINVAL);
587         }
588
589         if (!size) {
590                 pr_debug("size is zero\n");
591                 return ERR_PTR(-EINVAL);
592         }
593
594         if (WARN_ON(!dev))
595                 return ERR_PTR(-EINVAL);
596
597         buf = kzalloc(sizeof *buf, GFP_KERNEL);
598         if (!buf)
599                 return ERR_PTR(-ENOMEM);
600
601         buf->dev = dev;
602         buf->dma_dir = vb->vb2_queue->dma_dir;
603         buf->vb = vb;
604
605         offset = lower_32_bits(offset_in_page(vaddr));
606         vec = vb2_create_framevec(vaddr, size);
607         if (IS_ERR(vec)) {
608                 ret = PTR_ERR(vec);
609                 goto fail_buf;
610         }
611         buf->vec = vec;
612         n_pages = frame_vector_count(vec);
613         ret = frame_vector_to_pages(vec);
614         if (ret < 0) {
615                 unsigned long *nums = frame_vector_pfns(vec);
616
617                 /*
618                  * Failed to convert to pages... Check the memory is physically
619                  * contiguous and use direct mapping
620                  */
621                 for (i = 1; i < n_pages; i++)
622                         if (nums[i-1] + 1 != nums[i])
623                                 goto fail_pfnvec;
624                 buf->dma_addr = dma_map_resource(buf->dev,
625                                 __pfn_to_phys(nums[0]), size, buf->dma_dir, 0);
626                 if (dma_mapping_error(buf->dev, buf->dma_addr)) {
627                         ret = -ENOMEM;
628                         goto fail_pfnvec;
629                 }
630                 goto out;
631         }
632
633         sgt = kzalloc(sizeof(*sgt), GFP_KERNEL);
634         if (!sgt) {
635                 pr_err("failed to allocate sg table\n");
636                 ret = -ENOMEM;
637                 goto fail_pfnvec;
638         }
639
640         ret = sg_alloc_table_from_pages(sgt, frame_vector_pages(vec), n_pages,
641                 offset, size, GFP_KERNEL);
642         if (ret) {
643                 pr_err("failed to initialize sg table\n");
644                 goto fail_sgt;
645         }
646
647         /*
648          * No need to sync to the device, this will happen later when the
649          * prepare() memop is called.
650          */
651         if (dma_map_sgtable(buf->dev, sgt, buf->dma_dir,
652                             DMA_ATTR_SKIP_CPU_SYNC)) {
653                 pr_err("failed to map scatterlist\n");
654                 ret = -EIO;
655                 goto fail_sgt_init;
656         }
657
658         contig_size = vb2_dc_get_contiguous_size(sgt);
659         if (contig_size < size) {
660                 pr_err("contiguous mapping is too small %lu/%lu\n",
661                         contig_size, size);
662                 ret = -EFAULT;
663                 goto fail_map_sg;
664         }
665
666         buf->dma_addr = sg_dma_address(sgt->sgl);
667         buf->dma_sgt = sgt;
668         buf->non_coherent_mem = 1;
669
670 out:
671         buf->size = size;
672
673         return buf;
674
675 fail_map_sg:
676         dma_unmap_sgtable(buf->dev, sgt, buf->dma_dir, DMA_ATTR_SKIP_CPU_SYNC);
677
678 fail_sgt_init:
679         sg_free_table(sgt);
680
681 fail_sgt:
682         kfree(sgt);
683
684 fail_pfnvec:
685         vb2_destroy_framevec(vec);
686
687 fail_buf:
688         kfree(buf);
689
690         return ERR_PTR(ret);
691 }
692
693 /*********************************************/
694 /*       callbacks for DMABUF buffers        */
695 /*********************************************/
696
697 static int vb2_dc_map_dmabuf(void *mem_priv)
698 {
699         struct vb2_dc_buf *buf = mem_priv;
700         struct sg_table *sgt;
701         unsigned long contig_size;
702
703         if (WARN_ON(!buf->db_attach)) {
704                 pr_err("trying to pin a non attached buffer\n");
705                 return -EINVAL;
706         }
707
708         if (WARN_ON(buf->dma_sgt)) {
709                 pr_err("dmabuf buffer is already pinned\n");
710                 return 0;
711         }
712
713         /* get the associated scatterlist for this buffer */
714         sgt = dma_buf_map_attachment(buf->db_attach, buf->dma_dir);
715         if (IS_ERR(sgt)) {
716                 pr_err("Error getting dmabuf scatterlist\n");
717                 return -EINVAL;
718         }
719
720         /* checking if dmabuf is big enough to store contiguous chunk */
721         contig_size = vb2_dc_get_contiguous_size(sgt);
722         if (contig_size < buf->size) {
723                 pr_err("contiguous chunk is too small %lu/%lu\n",
724                        contig_size, buf->size);
725                 dma_buf_unmap_attachment(buf->db_attach, sgt, buf->dma_dir);
726                 return -EFAULT;
727         }
728
729         buf->dma_addr = sg_dma_address(sgt->sgl);
730         buf->dma_sgt = sgt;
731         buf->vaddr = NULL;
732
733         return 0;
734 }
735
736 static void vb2_dc_unmap_dmabuf(void *mem_priv)
737 {
738         struct vb2_dc_buf *buf = mem_priv;
739         struct sg_table *sgt = buf->dma_sgt;
740         struct iosys_map map = IOSYS_MAP_INIT_VADDR(buf->vaddr);
741
742         if (WARN_ON(!buf->db_attach)) {
743                 pr_err("trying to unpin a not attached buffer\n");
744                 return;
745         }
746
747         if (WARN_ON(!sgt)) {
748                 pr_err("dmabuf buffer is already unpinned\n");
749                 return;
750         }
751
752         if (buf->vaddr) {
753                 dma_buf_vunmap(buf->db_attach->dmabuf, &map);
754                 buf->vaddr = NULL;
755         }
756         dma_buf_unmap_attachment(buf->db_attach, sgt, buf->dma_dir);
757
758         buf->dma_addr = 0;
759         buf->dma_sgt = NULL;
760 }
761
762 static void vb2_dc_detach_dmabuf(void *mem_priv)
763 {
764         struct vb2_dc_buf *buf = mem_priv;
765
766         /* if vb2 works correctly you should never detach mapped buffer */
767         if (WARN_ON(buf->dma_addr))
768                 vb2_dc_unmap_dmabuf(buf);
769
770         /* detach this attachment */
771         dma_buf_detach(buf->db_attach->dmabuf, buf->db_attach);
772         kfree(buf);
773 }
774
775 static void *vb2_dc_attach_dmabuf(struct vb2_buffer *vb, struct device *dev,
776                                   struct dma_buf *dbuf, unsigned long size)
777 {
778         struct vb2_dc_buf *buf;
779         struct dma_buf_attachment *dba;
780
781         if (dbuf->size < size)
782                 return ERR_PTR(-EFAULT);
783
784         if (WARN_ON(!dev))
785                 return ERR_PTR(-EINVAL);
786
787         buf = kzalloc(sizeof(*buf), GFP_KERNEL);
788         if (!buf)
789                 return ERR_PTR(-ENOMEM);
790
791         buf->dev = dev;
792         buf->vb = vb;
793
794         /* create attachment for the dmabuf with the user device */
795         dba = dma_buf_attach(dbuf, buf->dev);
796         if (IS_ERR(dba)) {
797                 pr_err("failed to attach dmabuf\n");
798                 kfree(buf);
799                 return dba;
800         }
801
802         buf->dma_dir = vb->vb2_queue->dma_dir;
803         buf->size = size;
804         buf->db_attach = dba;
805
806         return buf;
807 }
808
809 /*********************************************/
810 /*       DMA CONTIG exported functions       */
811 /*********************************************/
812
813 const struct vb2_mem_ops vb2_dma_contig_memops = {
814         .alloc          = vb2_dc_alloc,
815         .put            = vb2_dc_put,
816         .get_dmabuf     = vb2_dc_get_dmabuf,
817         .cookie         = vb2_dc_cookie,
818         .vaddr          = vb2_dc_vaddr,
819         .mmap           = vb2_dc_mmap,
820         .get_userptr    = vb2_dc_get_userptr,
821         .put_userptr    = vb2_dc_put_userptr,
822         .prepare        = vb2_dc_prepare,
823         .finish         = vb2_dc_finish,
824         .map_dmabuf     = vb2_dc_map_dmabuf,
825         .unmap_dmabuf   = vb2_dc_unmap_dmabuf,
826         .attach_dmabuf  = vb2_dc_attach_dmabuf,
827         .detach_dmabuf  = vb2_dc_detach_dmabuf,
828         .num_users      = vb2_dc_num_users,
829 };
830 EXPORT_SYMBOL_GPL(vb2_dma_contig_memops);
831
832 /**
833  * vb2_dma_contig_set_max_seg_size() - configure DMA max segment size
834  * @dev:        device for configuring DMA parameters
835  * @size:       size of DMA max segment size to set
836  *
837  * To allow mapping the scatter-list into a single chunk in the DMA
838  * address space, the device is required to have the DMA max segment
839  * size parameter set to a value larger than the buffer size. Otherwise,
840  * the DMA-mapping subsystem will split the mapping into max segment
841  * size chunks. This function sets the DMA max segment size
842  * parameter to let DMA-mapping map a buffer as a single chunk in DMA
843  * address space.
844  * This code assumes that the DMA-mapping subsystem will merge all
845  * scatterlist segments if this is really possible (for example when
846  * an IOMMU is available and enabled).
847  * Ideally, this parameter should be set by the generic bus code, but it
848  * is left with the default 64KiB value due to historical litmiations in
849  * other subsystems (like limited USB host drivers) and there no good
850  * place to set it to the proper value.
851  * This function should be called from the drivers, which are known to
852  * operate on platforms with IOMMU and provide access to shared buffers
853  * (either USERPTR or DMABUF). This should be done before initializing
854  * videobuf2 queue.
855  */
856 int vb2_dma_contig_set_max_seg_size(struct device *dev, unsigned int size)
857 {
858         if (!dev->dma_parms) {
859                 dev_err(dev, "Failed to set max_seg_size: dma_parms is NULL\n");
860                 return -ENODEV;
861         }
862         if (dma_get_max_seg_size(dev) < size)
863                 return dma_set_max_seg_size(dev, size);
864
865         return 0;
866 }
867 EXPORT_SYMBOL_GPL(vb2_dma_contig_set_max_seg_size);
868
869 MODULE_DESCRIPTION("DMA-contig memory handling routines for videobuf2");
870 MODULE_AUTHOR("Pawel Osciak <pawel@osciak.com>");
871 MODULE_LICENSE("GPL");
872 MODULE_IMPORT_NS(DMA_BUF);