fe626109ef4db62b381d5c0959fa0a693bea5d5e
[releases.git] / videobuf2-dma-contig.c
1 /*
2  * videobuf2-dma-contig.c - DMA contig memory allocator for videobuf2
3  *
4  * Copyright (C) 2010 Samsung Electronics
5  *
6  * Author: Pawel Osciak <pawel@osciak.com>
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License as published by
10  * the Free Software Foundation.
11  */
12
13 #include <linux/dma-buf.h>
14 #include <linux/module.h>
15 #include <linux/refcount.h>
16 #include <linux/scatterlist.h>
17 #include <linux/sched.h>
18 #include <linux/slab.h>
19 #include <linux/dma-mapping.h>
20
21 #include <media/videobuf2-v4l2.h>
22 #include <media/videobuf2-dma-contig.h>
23 #include <media/videobuf2-memops.h>
24
25 struct vb2_dc_buf {
26         struct device                   *dev;
27         void                            *vaddr;
28         unsigned long                   size;
29         void                            *cookie;
30         dma_addr_t                      dma_addr;
31         unsigned long                   attrs;
32         enum dma_data_direction         dma_dir;
33         struct sg_table                 *dma_sgt;
34         struct frame_vector             *vec;
35
36         /* MMAP related */
37         struct vb2_vmarea_handler       handler;
38         refcount_t                      refcount;
39         struct sg_table                 *sgt_base;
40
41         /* DMABUF related */
42         struct dma_buf_attachment       *db_attach;
43 };
44
45 /*********************************************/
46 /*        scatterlist table functions        */
47 /*********************************************/
48
49 static unsigned long vb2_dc_get_contiguous_size(struct sg_table *sgt)
50 {
51         struct scatterlist *s;
52         dma_addr_t expected = sg_dma_address(sgt->sgl);
53         unsigned int i;
54         unsigned long size = 0;
55
56         for_each_sgtable_dma_sg(sgt, s, i) {
57                 if (sg_dma_address(s) != expected)
58                         break;
59                 expected += sg_dma_len(s);
60                 size += sg_dma_len(s);
61         }
62         return size;
63 }
64
65 /*********************************************/
66 /*         callbacks for all buffers         */
67 /*********************************************/
68
69 static void *vb2_dc_cookie(void *buf_priv)
70 {
71         struct vb2_dc_buf *buf = buf_priv;
72
73         return &buf->dma_addr;
74 }
75
76 static void *vb2_dc_vaddr(void *buf_priv)
77 {
78         struct vb2_dc_buf *buf = buf_priv;
79
80         if (!buf->vaddr && buf->db_attach)
81                 buf->vaddr = dma_buf_vmap(buf->db_attach->dmabuf);
82
83         return buf->vaddr;
84 }
85
86 static unsigned int vb2_dc_num_users(void *buf_priv)
87 {
88         struct vb2_dc_buf *buf = buf_priv;
89
90         return refcount_read(&buf->refcount);
91 }
92
93 static void vb2_dc_prepare(void *buf_priv)
94 {
95         struct vb2_dc_buf *buf = buf_priv;
96         struct sg_table *sgt = buf->dma_sgt;
97
98         if (!sgt)
99                 return;
100
101         dma_sync_sgtable_for_device(buf->dev, sgt, buf->dma_dir);
102 }
103
104 static void vb2_dc_finish(void *buf_priv)
105 {
106         struct vb2_dc_buf *buf = buf_priv;
107         struct sg_table *sgt = buf->dma_sgt;
108
109         if (!sgt)
110                 return;
111
112         dma_sync_sgtable_for_cpu(buf->dev, sgt, buf->dma_dir);
113 }
114
115 /*********************************************/
116 /*        callbacks for MMAP buffers         */
117 /*********************************************/
118
119 static void vb2_dc_put(void *buf_priv)
120 {
121         struct vb2_dc_buf *buf = buf_priv;
122
123         if (!refcount_dec_and_test(&buf->refcount))
124                 return;
125
126         if (buf->sgt_base) {
127                 sg_free_table(buf->sgt_base);
128                 kfree(buf->sgt_base);
129         }
130         dma_free_attrs(buf->dev, buf->size, buf->cookie, buf->dma_addr,
131                        buf->attrs);
132         put_device(buf->dev);
133         kfree(buf);
134 }
135
136 static void *vb2_dc_alloc(struct device *dev, unsigned long attrs,
137                           unsigned long size, enum dma_data_direction dma_dir,
138                           gfp_t gfp_flags)
139 {
140         struct vb2_dc_buf *buf;
141
142         if (WARN_ON(!dev))
143                 return ERR_PTR(-EINVAL);
144
145         buf = kzalloc(sizeof *buf, GFP_KERNEL);
146         if (!buf)
147                 return ERR_PTR(-ENOMEM);
148
149         buf->attrs = attrs;
150         buf->cookie = dma_alloc_attrs(dev, size, &buf->dma_addr,
151                                         GFP_KERNEL | gfp_flags, buf->attrs);
152         if (!buf->cookie) {
153                 dev_err(dev, "dma_alloc_coherent of size %lu failed\n", size);
154                 kfree(buf);
155                 return ERR_PTR(-ENOMEM);
156         }
157
158         if ((buf->attrs & DMA_ATTR_NO_KERNEL_MAPPING) == 0)
159                 buf->vaddr = buf->cookie;
160
161         /* Prevent the device from being released while the buffer is used */
162         buf->dev = get_device(dev);
163         buf->size = size;
164         buf->dma_dir = dma_dir;
165
166         buf->handler.refcount = &buf->refcount;
167         buf->handler.put = vb2_dc_put;
168         buf->handler.arg = buf;
169
170         refcount_set(&buf->refcount, 1);
171
172         return buf;
173 }
174
175 static int vb2_dc_mmap(void *buf_priv, struct vm_area_struct *vma)
176 {
177         struct vb2_dc_buf *buf = buf_priv;
178         int ret;
179
180         if (!buf) {
181                 printk(KERN_ERR "No buffer to map\n");
182                 return -EINVAL;
183         }
184
185         ret = dma_mmap_attrs(buf->dev, vma, buf->cookie,
186                 buf->dma_addr, buf->size, buf->attrs);
187
188         if (ret) {
189                 pr_err("Remapping memory failed, error: %d\n", ret);
190                 return ret;
191         }
192
193         vma->vm_flags           |= VM_DONTEXPAND | VM_DONTDUMP;
194         vma->vm_private_data    = &buf->handler;
195         vma->vm_ops             = &vb2_common_vm_ops;
196
197         vma->vm_ops->open(vma);
198
199         pr_debug("%s: mapped dma addr 0x%08lx at 0x%08lx, size %lu\n",
200                  __func__, (unsigned long)buf->dma_addr, vma->vm_start,
201                  buf->size);
202
203         return 0;
204 }
205
206 /*********************************************/
207 /*         DMABUF ops for exporters          */
208 /*********************************************/
209
210 struct vb2_dc_attachment {
211         struct sg_table sgt;
212         enum dma_data_direction dma_dir;
213 };
214
215 static int vb2_dc_dmabuf_ops_attach(struct dma_buf *dbuf,
216         struct dma_buf_attachment *dbuf_attach)
217 {
218         struct vb2_dc_attachment *attach;
219         unsigned int i;
220         struct scatterlist *rd, *wr;
221         struct sg_table *sgt;
222         struct vb2_dc_buf *buf = dbuf->priv;
223         int ret;
224
225         attach = kzalloc(sizeof(*attach), GFP_KERNEL);
226         if (!attach)
227                 return -ENOMEM;
228
229         sgt = &attach->sgt;
230         /* Copy the buf->base_sgt scatter list to the attachment, as we can't
231          * map the same scatter list to multiple attachments at the same time.
232          */
233         ret = sg_alloc_table(sgt, buf->sgt_base->orig_nents, GFP_KERNEL);
234         if (ret) {
235                 kfree(attach);
236                 return -ENOMEM;
237         }
238
239         rd = buf->sgt_base->sgl;
240         wr = sgt->sgl;
241         for (i = 0; i < sgt->orig_nents; ++i) {
242                 sg_set_page(wr, sg_page(rd), rd->length, rd->offset);
243                 rd = sg_next(rd);
244                 wr = sg_next(wr);
245         }
246
247         attach->dma_dir = DMA_NONE;
248         dbuf_attach->priv = attach;
249
250         return 0;
251 }
252
253 static void vb2_dc_dmabuf_ops_detach(struct dma_buf *dbuf,
254         struct dma_buf_attachment *db_attach)
255 {
256         struct vb2_dc_attachment *attach = db_attach->priv;
257         struct sg_table *sgt;
258
259         if (!attach)
260                 return;
261
262         sgt = &attach->sgt;
263
264         /* release the scatterlist cache */
265         if (attach->dma_dir != DMA_NONE)
266                 /*
267                  * Cache sync can be skipped here, as the vb2_dc memory is
268                  * allocated from device coherent memory, which means the
269                  * memory locations do not require any explicit cache
270                  * maintenance prior or after being used by the device.
271                  */
272                 dma_unmap_sgtable(db_attach->dev, sgt, attach->dma_dir,
273                                   DMA_ATTR_SKIP_CPU_SYNC);
274         sg_free_table(sgt);
275         kfree(attach);
276         db_attach->priv = NULL;
277 }
278
279 static struct sg_table *vb2_dc_dmabuf_ops_map(
280         struct dma_buf_attachment *db_attach, enum dma_data_direction dma_dir)
281 {
282         struct vb2_dc_attachment *attach = db_attach->priv;
283         /* stealing dmabuf mutex to serialize map/unmap operations */
284         struct mutex *lock = &db_attach->dmabuf->lock;
285         struct sg_table *sgt;
286
287         mutex_lock(lock);
288
289         sgt = &attach->sgt;
290         /* return previously mapped sg table */
291         if (attach->dma_dir == dma_dir) {
292                 mutex_unlock(lock);
293                 return sgt;
294         }
295
296         /* release any previous cache */
297         if (attach->dma_dir != DMA_NONE) {
298                 dma_unmap_sgtable(db_attach->dev, sgt, attach->dma_dir,
299                                   DMA_ATTR_SKIP_CPU_SYNC);
300                 attach->dma_dir = DMA_NONE;
301         }
302
303         /*
304          * mapping to the client with new direction, no cache sync
305          * required see comment in vb2_dc_dmabuf_ops_detach()
306          */
307         if (dma_map_sgtable(db_attach->dev, sgt, dma_dir,
308                             DMA_ATTR_SKIP_CPU_SYNC)) {
309                 pr_err("failed to map scatterlist\n");
310                 mutex_unlock(lock);
311                 return ERR_PTR(-EIO);
312         }
313
314         attach->dma_dir = dma_dir;
315
316         mutex_unlock(lock);
317
318         return sgt;
319 }
320
321 static void vb2_dc_dmabuf_ops_unmap(struct dma_buf_attachment *db_attach,
322         struct sg_table *sgt, enum dma_data_direction dma_dir)
323 {
324         /* nothing to be done here */
325 }
326
327 static void vb2_dc_dmabuf_ops_release(struct dma_buf *dbuf)
328 {
329         /* drop reference obtained in vb2_dc_get_dmabuf */
330         vb2_dc_put(dbuf->priv);
331 }
332
333 static int
334 vb2_dc_dmabuf_ops_begin_cpu_access(struct dma_buf *dbuf,
335                                    enum dma_data_direction direction)
336 {
337         return 0;
338 }
339
340 static int
341 vb2_dc_dmabuf_ops_end_cpu_access(struct dma_buf *dbuf,
342                                  enum dma_data_direction direction)
343 {
344         return 0;
345 }
346
347 static void *vb2_dc_dmabuf_ops_vmap(struct dma_buf *dbuf)
348 {
349         struct vb2_dc_buf *buf = dbuf->priv;
350
351         return buf->vaddr;
352 }
353
354 static int vb2_dc_dmabuf_ops_mmap(struct dma_buf *dbuf,
355         struct vm_area_struct *vma)
356 {
357         return vb2_dc_mmap(dbuf->priv, vma);
358 }
359
360 static const struct dma_buf_ops vb2_dc_dmabuf_ops = {
361         .attach = vb2_dc_dmabuf_ops_attach,
362         .detach = vb2_dc_dmabuf_ops_detach,
363         .map_dma_buf = vb2_dc_dmabuf_ops_map,
364         .unmap_dma_buf = vb2_dc_dmabuf_ops_unmap,
365         .begin_cpu_access = vb2_dc_dmabuf_ops_begin_cpu_access,
366         .end_cpu_access = vb2_dc_dmabuf_ops_end_cpu_access,
367         .vmap = vb2_dc_dmabuf_ops_vmap,
368         .mmap = vb2_dc_dmabuf_ops_mmap,
369         .release = vb2_dc_dmabuf_ops_release,
370 };
371
372 static struct sg_table *vb2_dc_get_base_sgt(struct vb2_dc_buf *buf)
373 {
374         int ret;
375         struct sg_table *sgt;
376
377         sgt = kmalloc(sizeof(*sgt), GFP_KERNEL);
378         if (!sgt) {
379                 dev_err(buf->dev, "failed to alloc sg table\n");
380                 return NULL;
381         }
382
383         ret = dma_get_sgtable_attrs(buf->dev, sgt, buf->cookie, buf->dma_addr,
384                 buf->size, buf->attrs);
385         if (ret < 0) {
386                 dev_err(buf->dev, "failed to get scatterlist from DMA API\n");
387                 kfree(sgt);
388                 return NULL;
389         }
390
391         return sgt;
392 }
393
394 static struct dma_buf *vb2_dc_get_dmabuf(void *buf_priv, unsigned long flags)
395 {
396         struct vb2_dc_buf *buf = buf_priv;
397         struct dma_buf *dbuf;
398         DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
399
400         exp_info.ops = &vb2_dc_dmabuf_ops;
401         exp_info.size = buf->size;
402         exp_info.flags = flags;
403         exp_info.priv = buf;
404
405         if (!buf->sgt_base)
406                 buf->sgt_base = vb2_dc_get_base_sgt(buf);
407
408         if (WARN_ON(!buf->sgt_base))
409                 return NULL;
410
411         dbuf = dma_buf_export(&exp_info);
412         if (IS_ERR(dbuf))
413                 return NULL;
414
415         /* dmabuf keeps reference to vb2 buffer */
416         refcount_inc(&buf->refcount);
417
418         return dbuf;
419 }
420
421 /*********************************************/
422 /*       callbacks for USERPTR buffers       */
423 /*********************************************/
424
425 static void vb2_dc_put_userptr(void *buf_priv)
426 {
427         struct vb2_dc_buf *buf = buf_priv;
428         struct sg_table *sgt = buf->dma_sgt;
429         int i;
430         struct page **pages;
431
432         if (sgt) {
433                 /*
434                  * No need to sync to CPU, it's already synced to the CPU
435                  * since the finish() memop will have been called before this.
436                  */
437                 dma_unmap_sgtable(buf->dev, sgt, buf->dma_dir,
438                                   DMA_ATTR_SKIP_CPU_SYNC);
439                 pages = frame_vector_pages(buf->vec);
440                 /* sgt should exist only if vector contains pages... */
441                 BUG_ON(IS_ERR(pages));
442                 if (buf->dma_dir == DMA_FROM_DEVICE ||
443                     buf->dma_dir == DMA_BIDIRECTIONAL)
444                         for (i = 0; i < frame_vector_count(buf->vec); i++)
445                                 set_page_dirty_lock(pages[i]);
446                 sg_free_table(sgt);
447                 kfree(sgt);
448         } else {
449                 dma_unmap_resource(buf->dev, buf->dma_addr, buf->size,
450                                    buf->dma_dir, 0);
451         }
452         vb2_destroy_framevec(buf->vec);
453         kfree(buf);
454 }
455
456 static void *vb2_dc_get_userptr(struct device *dev, unsigned long vaddr,
457         unsigned long size, enum dma_data_direction dma_dir)
458 {
459         struct vb2_dc_buf *buf;
460         struct frame_vector *vec;
461         unsigned int offset;
462         int n_pages, i;
463         int ret = 0;
464         struct sg_table *sgt;
465         unsigned long contig_size;
466         unsigned long dma_align = dma_get_cache_alignment();
467
468         /* Only cache aligned DMA transfers are reliable */
469         if (!IS_ALIGNED(vaddr | size, dma_align)) {
470                 pr_debug("user data must be aligned to %lu bytes\n", dma_align);
471                 return ERR_PTR(-EINVAL);
472         }
473
474         if (!size) {
475                 pr_debug("size is zero\n");
476                 return ERR_PTR(-EINVAL);
477         }
478
479         if (WARN_ON(!dev))
480                 return ERR_PTR(-EINVAL);
481
482         buf = kzalloc(sizeof *buf, GFP_KERNEL);
483         if (!buf)
484                 return ERR_PTR(-ENOMEM);
485
486         buf->dev = dev;
487         buf->dma_dir = dma_dir;
488
489         offset = lower_32_bits(offset_in_page(vaddr));
490         vec = vb2_create_framevec(vaddr, size);
491         if (IS_ERR(vec)) {
492                 ret = PTR_ERR(vec);
493                 goto fail_buf;
494         }
495         buf->vec = vec;
496         n_pages = frame_vector_count(vec);
497         ret = frame_vector_to_pages(vec);
498         if (ret < 0) {
499                 unsigned long *nums = frame_vector_pfns(vec);
500
501                 /*
502                  * Failed to convert to pages... Check the memory is physically
503                  * contiguous and use direct mapping
504                  */
505                 for (i = 1; i < n_pages; i++)
506                         if (nums[i-1] + 1 != nums[i])
507                                 goto fail_pfnvec;
508                 buf->dma_addr = dma_map_resource(buf->dev,
509                                 __pfn_to_phys(nums[0]), size, buf->dma_dir, 0);
510                 if (dma_mapping_error(buf->dev, buf->dma_addr)) {
511                         ret = -ENOMEM;
512                         goto fail_pfnvec;
513                 }
514                 goto out;
515         }
516
517         sgt = kzalloc(sizeof(*sgt), GFP_KERNEL);
518         if (!sgt) {
519                 pr_err("failed to allocate sg table\n");
520                 ret = -ENOMEM;
521                 goto fail_pfnvec;
522         }
523
524         ret = sg_alloc_table_from_pages(sgt, frame_vector_pages(vec), n_pages,
525                 offset, size, GFP_KERNEL);
526         if (ret) {
527                 pr_err("failed to initialize sg table\n");
528                 goto fail_sgt;
529         }
530
531         /*
532          * No need to sync to the device, this will happen later when the
533          * prepare() memop is called.
534          */
535         if (dma_map_sgtable(buf->dev, sgt, buf->dma_dir,
536                             DMA_ATTR_SKIP_CPU_SYNC)) {
537                 pr_err("failed to map scatterlist\n");
538                 ret = -EIO;
539                 goto fail_sgt_init;
540         }
541
542         contig_size = vb2_dc_get_contiguous_size(sgt);
543         if (contig_size < size) {
544                 pr_err("contiguous mapping is too small %lu/%lu\n",
545                         contig_size, size);
546                 ret = -EFAULT;
547                 goto fail_map_sg;
548         }
549
550         buf->dma_addr = sg_dma_address(sgt->sgl);
551         buf->dma_sgt = sgt;
552 out:
553         buf->size = size;
554
555         return buf;
556
557 fail_map_sg:
558         dma_unmap_sgtable(buf->dev, sgt, buf->dma_dir, DMA_ATTR_SKIP_CPU_SYNC);
559
560 fail_sgt_init:
561         sg_free_table(sgt);
562
563 fail_sgt:
564         kfree(sgt);
565
566 fail_pfnvec:
567         vb2_destroy_framevec(vec);
568
569 fail_buf:
570         kfree(buf);
571
572         return ERR_PTR(ret);
573 }
574
575 /*********************************************/
576 /*       callbacks for DMABUF buffers        */
577 /*********************************************/
578
579 static int vb2_dc_map_dmabuf(void *mem_priv)
580 {
581         struct vb2_dc_buf *buf = mem_priv;
582         struct sg_table *sgt;
583         unsigned long contig_size;
584
585         if (WARN_ON(!buf->db_attach)) {
586                 pr_err("trying to pin a non attached buffer\n");
587                 return -EINVAL;
588         }
589
590         if (WARN_ON(buf->dma_sgt)) {
591                 pr_err("dmabuf buffer is already pinned\n");
592                 return 0;
593         }
594
595         /* get the associated scatterlist for this buffer */
596         sgt = dma_buf_map_attachment(buf->db_attach, buf->dma_dir);
597         if (IS_ERR(sgt)) {
598                 pr_err("Error getting dmabuf scatterlist\n");
599                 return -EINVAL;
600         }
601
602         /* checking if dmabuf is big enough to store contiguous chunk */
603         contig_size = vb2_dc_get_contiguous_size(sgt);
604         if (contig_size < buf->size) {
605                 pr_err("contiguous chunk is too small %lu/%lu\n",
606                        contig_size, buf->size);
607                 dma_buf_unmap_attachment(buf->db_attach, sgt, buf->dma_dir);
608                 return -EFAULT;
609         }
610
611         buf->dma_addr = sg_dma_address(sgt->sgl);
612         buf->dma_sgt = sgt;
613         buf->vaddr = NULL;
614
615         return 0;
616 }
617
618 static void vb2_dc_unmap_dmabuf(void *mem_priv)
619 {
620         struct vb2_dc_buf *buf = mem_priv;
621         struct sg_table *sgt = buf->dma_sgt;
622
623         if (WARN_ON(!buf->db_attach)) {
624                 pr_err("trying to unpin a not attached buffer\n");
625                 return;
626         }
627
628         if (WARN_ON(!sgt)) {
629                 pr_err("dmabuf buffer is already unpinned\n");
630                 return;
631         }
632
633         if (buf->vaddr) {
634                 dma_buf_vunmap(buf->db_attach->dmabuf, buf->vaddr);
635                 buf->vaddr = NULL;
636         }
637         dma_buf_unmap_attachment(buf->db_attach, sgt, buf->dma_dir);
638
639         buf->dma_addr = 0;
640         buf->dma_sgt = NULL;
641 }
642
643 static void vb2_dc_detach_dmabuf(void *mem_priv)
644 {
645         struct vb2_dc_buf *buf = mem_priv;
646
647         /* if vb2 works correctly you should never detach mapped buffer */
648         if (WARN_ON(buf->dma_addr))
649                 vb2_dc_unmap_dmabuf(buf);
650
651         /* detach this attachment */
652         dma_buf_detach(buf->db_attach->dmabuf, buf->db_attach);
653         kfree(buf);
654 }
655
656 static void *vb2_dc_attach_dmabuf(struct device *dev, struct dma_buf *dbuf,
657         unsigned long size, enum dma_data_direction dma_dir)
658 {
659         struct vb2_dc_buf *buf;
660         struct dma_buf_attachment *dba;
661
662         if (dbuf->size < size)
663                 return ERR_PTR(-EFAULT);
664
665         if (WARN_ON(!dev))
666                 return ERR_PTR(-EINVAL);
667
668         buf = kzalloc(sizeof(*buf), GFP_KERNEL);
669         if (!buf)
670                 return ERR_PTR(-ENOMEM);
671
672         buf->dev = dev;
673         /* create attachment for the dmabuf with the user device */
674         dba = dma_buf_attach(dbuf, buf->dev);
675         if (IS_ERR(dba)) {
676                 pr_err("failed to attach dmabuf\n");
677                 kfree(buf);
678                 return dba;
679         }
680
681         buf->dma_dir = dma_dir;
682         buf->size = size;
683         buf->db_attach = dba;
684
685         return buf;
686 }
687
688 /*********************************************/
689 /*       DMA CONTIG exported functions       */
690 /*********************************************/
691
692 const struct vb2_mem_ops vb2_dma_contig_memops = {
693         .alloc          = vb2_dc_alloc,
694         .put            = vb2_dc_put,
695         .get_dmabuf     = vb2_dc_get_dmabuf,
696         .cookie         = vb2_dc_cookie,
697         .vaddr          = vb2_dc_vaddr,
698         .mmap           = vb2_dc_mmap,
699         .get_userptr    = vb2_dc_get_userptr,
700         .put_userptr    = vb2_dc_put_userptr,
701         .prepare        = vb2_dc_prepare,
702         .finish         = vb2_dc_finish,
703         .map_dmabuf     = vb2_dc_map_dmabuf,
704         .unmap_dmabuf   = vb2_dc_unmap_dmabuf,
705         .attach_dmabuf  = vb2_dc_attach_dmabuf,
706         .detach_dmabuf  = vb2_dc_detach_dmabuf,
707         .num_users      = vb2_dc_num_users,
708 };
709 EXPORT_SYMBOL_GPL(vb2_dma_contig_memops);
710
711 /**
712  * vb2_dma_contig_set_max_seg_size() - configure DMA max segment size
713  * @dev:        device for configuring DMA parameters
714  * @size:       size of DMA max segment size to set
715  *
716  * To allow mapping the scatter-list into a single chunk in the DMA
717  * address space, the device is required to have the DMA max segment
718  * size parameter set to a value larger than the buffer size. Otherwise,
719  * the DMA-mapping subsystem will split the mapping into max segment
720  * size chunks. This function sets the DMA max segment size
721  * parameter to let DMA-mapping map a buffer as a single chunk in DMA
722  * address space.
723  * This code assumes that the DMA-mapping subsystem will merge all
724  * scatterlist segments if this is really possible (for example when
725  * an IOMMU is available and enabled).
726  * Ideally, this parameter should be set by the generic bus code, but it
727  * is left with the default 64KiB value due to historical litmiations in
728  * other subsystems (like limited USB host drivers) and there no good
729  * place to set it to the proper value.
730  * This function should be called from the drivers, which are known to
731  * operate on platforms with IOMMU and provide access to shared buffers
732  * (either USERPTR or DMABUF). This should be done before initializing
733  * videobuf2 queue.
734  */
735 int vb2_dma_contig_set_max_seg_size(struct device *dev, unsigned int size)
736 {
737         if (!dev->dma_parms) {
738                 dev_err(dev, "Failed to set max_seg_size: dma_parms is NULL\n");
739                 return -ENODEV;
740         }
741         if (dma_get_max_seg_size(dev) < size)
742                 return dma_set_max_seg_size(dev, size);
743
744         return 0;
745 }
746 EXPORT_SYMBOL_GPL(vb2_dma_contig_set_max_seg_size);
747
748 MODULE_DESCRIPTION("DMA-contig memory handling routines for videobuf2");
749 MODULE_AUTHOR("Pawel Osciak <pawel@osciak.com>");
750 MODULE_LICENSE("GPL");