1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Copyright (c) by Jaroslav Kysela <perex@perex.cz>
4 * Takashi Iwai <tiwai@suse.de>
6 * Generic memory allocators
9 #include <linux/slab.h>
11 #include <linux/dma-mapping.h>
12 #include <linux/dma-map-ops.h>
13 #include <linux/genalloc.h>
14 #include <linux/highmem.h>
15 #include <linux/vmalloc.h>
17 #include <asm/set_memory.h>
19 #include <sound/memalloc.h>
20 #include "memalloc_local.h"
24 __GFP_COMP | /* compound page lets parts be mapped */ \
25 __GFP_RETRY_MAYFAIL | /* don't trigger OOM-killer */ \
26 __GFP_NOWARN) /* no stack trace print - this call is non-critical */
28 static const struct snd_malloc_ops *snd_dma_get_ops(struct snd_dma_buffer *dmab);
30 #ifdef CONFIG_SND_DMA_SGBUF
31 static void *snd_dma_sg_fallback_alloc(struct snd_dma_buffer *dmab, size_t size);
34 static void *__snd_dma_alloc_pages(struct snd_dma_buffer *dmab, size_t size)
36 const struct snd_malloc_ops *ops = snd_dma_get_ops(dmab);
38 if (WARN_ON_ONCE(!ops || !ops->alloc))
40 return ops->alloc(dmab, size);
44 * snd_dma_alloc_dir_pages - allocate the buffer area according to the given
46 * @type: the DMA buffer type
47 * @device: the device pointer
49 * @size: the buffer size to allocate
50 * @dmab: buffer allocation record to store the allocated data
52 * Calls the memory-allocator function for the corresponding
55 * Return: Zero if the buffer with the given size is allocated successfully,
56 * otherwise a negative value on error.
58 int snd_dma_alloc_dir_pages(int type, struct device *device,
59 enum dma_data_direction dir, size_t size,
60 struct snd_dma_buffer *dmab)
67 size = PAGE_ALIGN(size);
68 dmab->dev.type = type;
69 dmab->dev.dev = device;
73 dmab->private_data = NULL;
74 dmab->area = __snd_dma_alloc_pages(dmab, size);
80 EXPORT_SYMBOL(snd_dma_alloc_dir_pages);
83 * snd_dma_alloc_pages_fallback - allocate the buffer area according to the given type with fallback
84 * @type: the DMA buffer type
85 * @device: the device pointer
86 * @size: the buffer size to allocate
87 * @dmab: buffer allocation record to store the allocated data
89 * Calls the memory-allocator function for the corresponding
90 * buffer type. When no space is left, this function reduces the size and
91 * tries to allocate again. The size actually allocated is stored in
94 * Return: Zero if the buffer with the given size is allocated successfully,
95 * otherwise a negative value on error.
97 int snd_dma_alloc_pages_fallback(int type, struct device *device, size_t size,
98 struct snd_dma_buffer *dmab)
102 while ((err = snd_dma_alloc_pages(type, device, size, dmab)) < 0) {
105 if (size <= PAGE_SIZE)
108 size = PAGE_SIZE << get_order(size);
114 EXPORT_SYMBOL(snd_dma_alloc_pages_fallback);
117 * snd_dma_free_pages - release the allocated buffer
118 * @dmab: the buffer allocation record to release
120 * Releases the allocated buffer via snd_dma_alloc_pages().
122 void snd_dma_free_pages(struct snd_dma_buffer *dmab)
124 const struct snd_malloc_ops *ops = snd_dma_get_ops(dmab);
126 if (ops && ops->free)
129 EXPORT_SYMBOL(snd_dma_free_pages);
131 /* called by devres */
132 static void __snd_release_pages(struct device *dev, void *res)
134 snd_dma_free_pages(res);
138 * snd_devm_alloc_dir_pages - allocate the buffer and manage with devres
139 * @dev: the device pointer
140 * @type: the DMA buffer type
141 * @dir: DMA direction
142 * @size: the buffer size to allocate
144 * Allocate buffer pages depending on the given type and manage using devres.
145 * The pages will be released automatically at the device removal.
147 * Unlike snd_dma_alloc_pages(), this function requires the real device pointer,
148 * hence it can't work with SNDRV_DMA_TYPE_CONTINUOUS or
149 * SNDRV_DMA_TYPE_VMALLOC type.
151 * Return: the snd_dma_buffer object at success, or NULL if failed
153 struct snd_dma_buffer *
154 snd_devm_alloc_dir_pages(struct device *dev, int type,
155 enum dma_data_direction dir, size_t size)
157 struct snd_dma_buffer *dmab;
160 if (WARN_ON(type == SNDRV_DMA_TYPE_CONTINUOUS ||
161 type == SNDRV_DMA_TYPE_VMALLOC))
164 dmab = devres_alloc(__snd_release_pages, sizeof(*dmab), GFP_KERNEL);
168 err = snd_dma_alloc_dir_pages(type, dev, dir, size, dmab);
174 devres_add(dev, dmab);
177 EXPORT_SYMBOL_GPL(snd_devm_alloc_dir_pages);
180 * snd_dma_buffer_mmap - perform mmap of the given DMA buffer
181 * @dmab: buffer allocation information
182 * @area: VM area information
184 * Return: zero if successful, or a negative error code
186 int snd_dma_buffer_mmap(struct snd_dma_buffer *dmab,
187 struct vm_area_struct *area)
189 const struct snd_malloc_ops *ops;
193 ops = snd_dma_get_ops(dmab);
194 if (ops && ops->mmap)
195 return ops->mmap(dmab, area);
199 EXPORT_SYMBOL(snd_dma_buffer_mmap);
201 #ifdef CONFIG_HAS_DMA
203 * snd_dma_buffer_sync - sync DMA buffer between CPU and device
204 * @dmab: buffer allocation information
207 void snd_dma_buffer_sync(struct snd_dma_buffer *dmab,
208 enum snd_dma_sync_mode mode)
210 const struct snd_malloc_ops *ops;
212 if (!dmab || !dmab->dev.need_sync)
214 ops = snd_dma_get_ops(dmab);
215 if (ops && ops->sync)
216 ops->sync(dmab, mode);
218 EXPORT_SYMBOL_GPL(snd_dma_buffer_sync);
219 #endif /* CONFIG_HAS_DMA */
222 * snd_sgbuf_get_addr - return the physical address at the corresponding offset
223 * @dmab: buffer allocation information
224 * @offset: offset in the ring buffer
226 * Return: the physical address
228 dma_addr_t snd_sgbuf_get_addr(struct snd_dma_buffer *dmab, size_t offset)
230 const struct snd_malloc_ops *ops = snd_dma_get_ops(dmab);
232 if (ops && ops->get_addr)
233 return ops->get_addr(dmab, offset);
235 return dmab->addr + offset;
237 EXPORT_SYMBOL(snd_sgbuf_get_addr);
240 * snd_sgbuf_get_page - return the physical page at the corresponding offset
241 * @dmab: buffer allocation information
242 * @offset: offset in the ring buffer
244 * Return: the page pointer
246 struct page *snd_sgbuf_get_page(struct snd_dma_buffer *dmab, size_t offset)
248 const struct snd_malloc_ops *ops = snd_dma_get_ops(dmab);
250 if (ops && ops->get_page)
251 return ops->get_page(dmab, offset);
253 return virt_to_page(dmab->area + offset);
255 EXPORT_SYMBOL(snd_sgbuf_get_page);
258 * snd_sgbuf_get_chunk_size - compute the max chunk size with continuous pages
260 * @dmab: buffer allocation information
261 * @ofs: offset in the ring buffer
262 * @size: the requested size
264 * Return: the chunk size
266 unsigned int snd_sgbuf_get_chunk_size(struct snd_dma_buffer *dmab,
267 unsigned int ofs, unsigned int size)
269 const struct snd_malloc_ops *ops = snd_dma_get_ops(dmab);
271 if (ops && ops->get_chunk_size)
272 return ops->get_chunk_size(dmab, ofs, size);
276 EXPORT_SYMBOL(snd_sgbuf_get_chunk_size);
279 * Continuous pages allocator
281 static void *do_alloc_pages(struct device *dev, size_t size, dma_addr_t *addr,
285 gfp_t gfp = GFP_KERNEL | __GFP_NORETRY | __GFP_NOWARN;
288 p = alloc_pages_exact(size, gfp);
291 *addr = page_to_phys(virt_to_page(p));
294 if ((*addr + size - 1) & ~dev->coherent_dma_mask) {
295 if (IS_ENABLED(CONFIG_ZONE_DMA32) && !(gfp & GFP_DMA32)) {
299 if (IS_ENABLED(CONFIG_ZONE_DMA) && !(gfp & GFP_DMA)) {
300 gfp = (gfp & ~GFP_DMA32) | GFP_DMA;
306 set_memory_wc((unsigned long)(p), size >> PAGE_SHIFT);
311 static void do_free_pages(void *p, size_t size, bool wc)
315 set_memory_wb((unsigned long)(p), size >> PAGE_SHIFT);
317 free_pages_exact(p, size);
321 static void *snd_dma_continuous_alloc(struct snd_dma_buffer *dmab, size_t size)
323 return do_alloc_pages(dmab->dev.dev, size, &dmab->addr, false);
326 static void snd_dma_continuous_free(struct snd_dma_buffer *dmab)
328 do_free_pages(dmab->area, dmab->bytes, false);
331 static int snd_dma_continuous_mmap(struct snd_dma_buffer *dmab,
332 struct vm_area_struct *area)
334 return remap_pfn_range(area, area->vm_start,
335 dmab->addr >> PAGE_SHIFT,
336 area->vm_end - area->vm_start,
340 static const struct snd_malloc_ops snd_dma_continuous_ops = {
341 .alloc = snd_dma_continuous_alloc,
342 .free = snd_dma_continuous_free,
343 .mmap = snd_dma_continuous_mmap,
349 static void *snd_dma_vmalloc_alloc(struct snd_dma_buffer *dmab, size_t size)
351 return vmalloc(size);
354 static void snd_dma_vmalloc_free(struct snd_dma_buffer *dmab)
359 static int snd_dma_vmalloc_mmap(struct snd_dma_buffer *dmab,
360 struct vm_area_struct *area)
362 return remap_vmalloc_range(area, dmab->area, 0);
365 #define get_vmalloc_page_addr(dmab, offset) \
366 page_to_phys(vmalloc_to_page((dmab)->area + (offset)))
368 static dma_addr_t snd_dma_vmalloc_get_addr(struct snd_dma_buffer *dmab,
371 return get_vmalloc_page_addr(dmab, offset) + offset % PAGE_SIZE;
374 static struct page *snd_dma_vmalloc_get_page(struct snd_dma_buffer *dmab,
377 return vmalloc_to_page(dmab->area + offset);
381 snd_dma_vmalloc_get_chunk_size(struct snd_dma_buffer *dmab,
382 unsigned int ofs, unsigned int size)
384 unsigned int start, end;
387 start = ALIGN_DOWN(ofs, PAGE_SIZE);
388 end = ofs + size - 1; /* the last byte address */
389 /* check page continuity */
390 addr = get_vmalloc_page_addr(dmab, start);
396 if (get_vmalloc_page_addr(dmab, start) != addr)
399 /* ok, all on continuous pages */
403 static const struct snd_malloc_ops snd_dma_vmalloc_ops = {
404 .alloc = snd_dma_vmalloc_alloc,
405 .free = snd_dma_vmalloc_free,
406 .mmap = snd_dma_vmalloc_mmap,
407 .get_addr = snd_dma_vmalloc_get_addr,
408 .get_page = snd_dma_vmalloc_get_page,
409 .get_chunk_size = snd_dma_vmalloc_get_chunk_size,
412 #ifdef CONFIG_HAS_DMA
416 #ifdef CONFIG_GENERIC_ALLOCATOR
417 static void *snd_dma_iram_alloc(struct snd_dma_buffer *dmab, size_t size)
419 struct device *dev = dmab->dev.dev;
420 struct gen_pool *pool;
424 pool = of_gen_pool_get(dev->of_node, "iram", 0);
425 /* Assign the pool into private_data field */
426 dmab->private_data = pool;
428 p = gen_pool_dma_alloc_align(pool, size, &dmab->addr, PAGE_SIZE);
433 /* Internal memory might have limited size and no enough space,
434 * so if we fail to malloc, try to fetch memory traditionally.
436 dmab->dev.type = SNDRV_DMA_TYPE_DEV;
437 return __snd_dma_alloc_pages(dmab, size);
440 static void snd_dma_iram_free(struct snd_dma_buffer *dmab)
442 struct gen_pool *pool = dmab->private_data;
444 if (pool && dmab->area)
445 gen_pool_free(pool, (unsigned long)dmab->area, dmab->bytes);
448 static int snd_dma_iram_mmap(struct snd_dma_buffer *dmab,
449 struct vm_area_struct *area)
451 area->vm_page_prot = pgprot_writecombine(area->vm_page_prot);
452 return remap_pfn_range(area, area->vm_start,
453 dmab->addr >> PAGE_SHIFT,
454 area->vm_end - area->vm_start,
458 static const struct snd_malloc_ops snd_dma_iram_ops = {
459 .alloc = snd_dma_iram_alloc,
460 .free = snd_dma_iram_free,
461 .mmap = snd_dma_iram_mmap,
463 #endif /* CONFIG_GENERIC_ALLOCATOR */
466 * Coherent device pages allocator
468 static void *snd_dma_dev_alloc(struct snd_dma_buffer *dmab, size_t size)
470 return dma_alloc_coherent(dmab->dev.dev, size, &dmab->addr, DEFAULT_GFP);
473 static void snd_dma_dev_free(struct snd_dma_buffer *dmab)
475 dma_free_coherent(dmab->dev.dev, dmab->bytes, dmab->area, dmab->addr);
478 static int snd_dma_dev_mmap(struct snd_dma_buffer *dmab,
479 struct vm_area_struct *area)
481 return dma_mmap_coherent(dmab->dev.dev, area,
482 dmab->area, dmab->addr, dmab->bytes);
485 static const struct snd_malloc_ops snd_dma_dev_ops = {
486 .alloc = snd_dma_dev_alloc,
487 .free = snd_dma_dev_free,
488 .mmap = snd_dma_dev_mmap,
492 * Write-combined pages
494 /* x86-specific allocations */
495 #ifdef CONFIG_SND_DMA_SGBUF
496 static void *snd_dma_wc_alloc(struct snd_dma_buffer *dmab, size_t size)
498 return do_alloc_pages(dmab->dev.dev, size, &dmab->addr, true);
501 static void snd_dma_wc_free(struct snd_dma_buffer *dmab)
503 do_free_pages(dmab->area, dmab->bytes, true);
506 static int snd_dma_wc_mmap(struct snd_dma_buffer *dmab,
507 struct vm_area_struct *area)
509 area->vm_page_prot = pgprot_writecombine(area->vm_page_prot);
510 return snd_dma_continuous_mmap(dmab, area);
513 static void *snd_dma_wc_alloc(struct snd_dma_buffer *dmab, size_t size)
515 return dma_alloc_wc(dmab->dev.dev, size, &dmab->addr, DEFAULT_GFP);
518 static void snd_dma_wc_free(struct snd_dma_buffer *dmab)
520 dma_free_wc(dmab->dev.dev, dmab->bytes, dmab->area, dmab->addr);
523 static int snd_dma_wc_mmap(struct snd_dma_buffer *dmab,
524 struct vm_area_struct *area)
526 return dma_mmap_wc(dmab->dev.dev, area,
527 dmab->area, dmab->addr, dmab->bytes);
529 #endif /* CONFIG_SND_DMA_SGBUF */
531 static const struct snd_malloc_ops snd_dma_wc_ops = {
532 .alloc = snd_dma_wc_alloc,
533 .free = snd_dma_wc_free,
534 .mmap = snd_dma_wc_mmap,
538 * Non-contiguous pages allocator
540 static void *snd_dma_noncontig_alloc(struct snd_dma_buffer *dmab, size_t size)
542 struct sg_table *sgt;
545 #ifdef CONFIG_SND_DMA_SGBUF
546 if (cpu_feature_enabled(X86_FEATURE_XENPV))
547 return snd_dma_sg_fallback_alloc(dmab, size);
549 sgt = dma_alloc_noncontiguous(dmab->dev.dev, size, dmab->dev.dir,
551 #ifdef CONFIG_SND_DMA_SGBUF
552 if (!sgt && !get_dma_ops(dmab->dev.dev))
553 return snd_dma_sg_fallback_alloc(dmab, size);
558 dmab->dev.need_sync = dma_need_sync(dmab->dev.dev,
559 sg_dma_address(sgt->sgl));
560 p = dma_vmap_noncontiguous(dmab->dev.dev, size, sgt);
562 dmab->private_data = sgt;
563 /* store the first page address for convenience */
564 dmab->addr = snd_sgbuf_get_addr(dmab, 0);
566 dma_free_noncontiguous(dmab->dev.dev, size, sgt, dmab->dev.dir);
571 static void snd_dma_noncontig_free(struct snd_dma_buffer *dmab)
573 dma_vunmap_noncontiguous(dmab->dev.dev, dmab->area);
574 dma_free_noncontiguous(dmab->dev.dev, dmab->bytes, dmab->private_data,
578 static int snd_dma_noncontig_mmap(struct snd_dma_buffer *dmab,
579 struct vm_area_struct *area)
581 return dma_mmap_noncontiguous(dmab->dev.dev, area,
582 dmab->bytes, dmab->private_data);
585 static void snd_dma_noncontig_sync(struct snd_dma_buffer *dmab,
586 enum snd_dma_sync_mode mode)
588 if (mode == SNDRV_DMA_SYNC_CPU) {
589 if (dmab->dev.dir == DMA_TO_DEVICE)
591 invalidate_kernel_vmap_range(dmab->area, dmab->bytes);
592 dma_sync_sgtable_for_cpu(dmab->dev.dev, dmab->private_data,
595 if (dmab->dev.dir == DMA_FROM_DEVICE)
597 flush_kernel_vmap_range(dmab->area, dmab->bytes);
598 dma_sync_sgtable_for_device(dmab->dev.dev, dmab->private_data,
603 static inline void snd_dma_noncontig_iter_set(struct snd_dma_buffer *dmab,
604 struct sg_page_iter *piter,
607 struct sg_table *sgt = dmab->private_data;
609 __sg_page_iter_start(piter, sgt->sgl, sgt->orig_nents,
610 offset >> PAGE_SHIFT);
613 static dma_addr_t snd_dma_noncontig_get_addr(struct snd_dma_buffer *dmab,
616 struct sg_dma_page_iter iter;
618 snd_dma_noncontig_iter_set(dmab, &iter.base, offset);
619 __sg_page_iter_dma_next(&iter);
620 return sg_page_iter_dma_address(&iter) + offset % PAGE_SIZE;
623 static struct page *snd_dma_noncontig_get_page(struct snd_dma_buffer *dmab,
626 struct sg_page_iter iter;
628 snd_dma_noncontig_iter_set(dmab, &iter, offset);
629 __sg_page_iter_next(&iter);
630 return sg_page_iter_page(&iter);
634 snd_dma_noncontig_get_chunk_size(struct snd_dma_buffer *dmab,
635 unsigned int ofs, unsigned int size)
637 struct sg_dma_page_iter iter;
638 unsigned int start, end;
641 start = ALIGN_DOWN(ofs, PAGE_SIZE);
642 end = ofs + size - 1; /* the last byte address */
643 snd_dma_noncontig_iter_set(dmab, &iter.base, start);
644 if (!__sg_page_iter_dma_next(&iter))
646 /* check page continuity */
647 addr = sg_page_iter_dma_address(&iter);
653 if (!__sg_page_iter_dma_next(&iter) ||
654 sg_page_iter_dma_address(&iter) != addr)
657 /* ok, all on continuous pages */
661 static const struct snd_malloc_ops snd_dma_noncontig_ops = {
662 .alloc = snd_dma_noncontig_alloc,
663 .free = snd_dma_noncontig_free,
664 .mmap = snd_dma_noncontig_mmap,
665 .sync = snd_dma_noncontig_sync,
666 .get_addr = snd_dma_noncontig_get_addr,
667 .get_page = snd_dma_noncontig_get_page,
668 .get_chunk_size = snd_dma_noncontig_get_chunk_size,
671 /* x86-specific SG-buffer with WC pages */
672 #ifdef CONFIG_SND_DMA_SGBUF
673 #define sg_wc_address(it) ((unsigned long)page_address(sg_page_iter_page(it)))
675 static void *snd_dma_sg_wc_alloc(struct snd_dma_buffer *dmab, size_t size)
677 void *p = snd_dma_noncontig_alloc(dmab, size);
678 struct sg_table *sgt = dmab->private_data;
679 struct sg_page_iter iter;
683 if (dmab->dev.type != SNDRV_DMA_TYPE_DEV_WC_SG)
685 for_each_sgtable_page(sgt, &iter, 0)
686 set_memory_wc(sg_wc_address(&iter), 1);
690 static void snd_dma_sg_wc_free(struct snd_dma_buffer *dmab)
692 struct sg_table *sgt = dmab->private_data;
693 struct sg_page_iter iter;
695 for_each_sgtable_page(sgt, &iter, 0)
696 set_memory_wb(sg_wc_address(&iter), 1);
697 snd_dma_noncontig_free(dmab);
700 static int snd_dma_sg_wc_mmap(struct snd_dma_buffer *dmab,
701 struct vm_area_struct *area)
703 area->vm_page_prot = pgprot_writecombine(area->vm_page_prot);
704 return dma_mmap_noncontiguous(dmab->dev.dev, area,
705 dmab->bytes, dmab->private_data);
708 static const struct snd_malloc_ops snd_dma_sg_wc_ops = {
709 .alloc = snd_dma_sg_wc_alloc,
710 .free = snd_dma_sg_wc_free,
711 .mmap = snd_dma_sg_wc_mmap,
712 .sync = snd_dma_noncontig_sync,
713 .get_addr = snd_dma_noncontig_get_addr,
714 .get_page = snd_dma_noncontig_get_page,
715 .get_chunk_size = snd_dma_noncontig_get_chunk_size,
718 /* Fallback SG-buffer allocations for x86 */
719 struct snd_dma_sg_fallback {
720 bool use_dma_alloc_coherent;
723 /* DMA address array; the first page contains #pages in ~PAGE_MASK */
727 static void __snd_dma_sg_fallback_free(struct snd_dma_buffer *dmab,
728 struct snd_dma_sg_fallback *sgbuf)
732 if (sgbuf->pages && sgbuf->addrs) {
734 while (i < sgbuf->count) {
735 if (!sgbuf->pages[i] || !sgbuf->addrs[i])
737 size = sgbuf->addrs[i] & ~PAGE_MASK;
740 if (sgbuf->use_dma_alloc_coherent)
741 dma_free_coherent(dmab->dev.dev, size << PAGE_SHIFT,
742 page_address(sgbuf->pages[i]),
743 sgbuf->addrs[i] & PAGE_MASK);
745 do_free_pages(page_address(sgbuf->pages[i]),
746 size << PAGE_SHIFT, false);
750 kvfree(sgbuf->pages);
751 kvfree(sgbuf->addrs);
755 static void *snd_dma_sg_fallback_alloc(struct snd_dma_buffer *dmab, size_t size)
757 struct snd_dma_sg_fallback *sgbuf;
758 struct page **pagep, *curp;
759 size_t chunk, npages;
764 /* correct the type */
765 if (dmab->dev.type == SNDRV_DMA_TYPE_DEV_SG)
766 dmab->dev.type = SNDRV_DMA_TYPE_DEV_SG_FALLBACK;
767 else if (dmab->dev.type == SNDRV_DMA_TYPE_DEV_WC_SG)
768 dmab->dev.type = SNDRV_DMA_TYPE_DEV_WC_SG_FALLBACK;
770 sgbuf = kzalloc(sizeof(*sgbuf), GFP_KERNEL);
773 sgbuf->use_dma_alloc_coherent = cpu_feature_enabled(X86_FEATURE_XENPV);
774 size = PAGE_ALIGN(size);
775 sgbuf->count = size >> PAGE_SHIFT;
776 sgbuf->pages = kvcalloc(sgbuf->count, sizeof(*sgbuf->pages), GFP_KERNEL);
777 sgbuf->addrs = kvcalloc(sgbuf->count, sizeof(*sgbuf->addrs), GFP_KERNEL);
778 if (!sgbuf->pages || !sgbuf->addrs)
781 pagep = sgbuf->pages;
782 addrp = sgbuf->addrs;
783 chunk = (PAGE_SIZE - 1) << PAGE_SHIFT; /* to fit in low bits in addrs */
785 chunk = min(size, chunk);
786 if (sgbuf->use_dma_alloc_coherent)
787 p = dma_alloc_coherent(dmab->dev.dev, chunk, &addr, DEFAULT_GFP);
789 p = do_alloc_pages(dmab->dev.dev, chunk, &addr, false);
791 if (chunk <= PAGE_SIZE)
794 chunk = PAGE_SIZE << get_order(chunk);
800 npages = chunk >> PAGE_SHIFT;
801 *addrp = npages; /* store in lower bits */
802 curp = virt_to_page(p);
810 p = vmap(sgbuf->pages, sgbuf->count, VM_MAP, PAGE_KERNEL);
814 if (dmab->dev.type == SNDRV_DMA_TYPE_DEV_WC_SG_FALLBACK)
815 set_pages_array_wc(sgbuf->pages, sgbuf->count);
817 dmab->private_data = sgbuf;
818 /* store the first page address for convenience */
819 dmab->addr = sgbuf->addrs[0] & PAGE_MASK;
823 __snd_dma_sg_fallback_free(dmab, sgbuf);
827 static void snd_dma_sg_fallback_free(struct snd_dma_buffer *dmab)
829 struct snd_dma_sg_fallback *sgbuf = dmab->private_data;
831 if (dmab->dev.type == SNDRV_DMA_TYPE_DEV_WC_SG_FALLBACK)
832 set_pages_array_wb(sgbuf->pages, sgbuf->count);
834 __snd_dma_sg_fallback_free(dmab, dmab->private_data);
837 static dma_addr_t snd_dma_sg_fallback_get_addr(struct snd_dma_buffer *dmab,
840 struct snd_dma_sg_fallback *sgbuf = dmab->private_data;
841 size_t index = offset >> PAGE_SHIFT;
843 return (sgbuf->addrs[index] & PAGE_MASK) | (offset & ~PAGE_MASK);
846 static int snd_dma_sg_fallback_mmap(struct snd_dma_buffer *dmab,
847 struct vm_area_struct *area)
849 struct snd_dma_sg_fallback *sgbuf = dmab->private_data;
851 if (dmab->dev.type == SNDRV_DMA_TYPE_DEV_WC_SG_FALLBACK)
852 area->vm_page_prot = pgprot_writecombine(area->vm_page_prot);
853 return vm_map_pages(area, sgbuf->pages, sgbuf->count);
856 static const struct snd_malloc_ops snd_dma_sg_fallback_ops = {
857 .alloc = snd_dma_sg_fallback_alloc,
858 .free = snd_dma_sg_fallback_free,
859 .mmap = snd_dma_sg_fallback_mmap,
860 .get_addr = snd_dma_sg_fallback_get_addr,
861 /* reuse vmalloc helpers */
862 .get_page = snd_dma_vmalloc_get_page,
863 .get_chunk_size = snd_dma_vmalloc_get_chunk_size,
865 #endif /* CONFIG_SND_DMA_SGBUF */
868 * Non-coherent pages allocator
870 static void *snd_dma_noncoherent_alloc(struct snd_dma_buffer *dmab, size_t size)
874 p = dma_alloc_noncoherent(dmab->dev.dev, size, &dmab->addr,
875 dmab->dev.dir, DEFAULT_GFP);
877 dmab->dev.need_sync = dma_need_sync(dmab->dev.dev, dmab->addr);
881 static void snd_dma_noncoherent_free(struct snd_dma_buffer *dmab)
883 dma_free_noncoherent(dmab->dev.dev, dmab->bytes, dmab->area,
884 dmab->addr, dmab->dev.dir);
887 static int snd_dma_noncoherent_mmap(struct snd_dma_buffer *dmab,
888 struct vm_area_struct *area)
890 area->vm_page_prot = vm_get_page_prot(area->vm_flags);
891 return dma_mmap_pages(dmab->dev.dev, area,
892 area->vm_end - area->vm_start,
893 virt_to_page(dmab->area));
896 static void snd_dma_noncoherent_sync(struct snd_dma_buffer *dmab,
897 enum snd_dma_sync_mode mode)
899 if (mode == SNDRV_DMA_SYNC_CPU) {
900 if (dmab->dev.dir != DMA_TO_DEVICE)
901 dma_sync_single_for_cpu(dmab->dev.dev, dmab->addr,
902 dmab->bytes, dmab->dev.dir);
904 if (dmab->dev.dir != DMA_FROM_DEVICE)
905 dma_sync_single_for_device(dmab->dev.dev, dmab->addr,
906 dmab->bytes, dmab->dev.dir);
910 static const struct snd_malloc_ops snd_dma_noncoherent_ops = {
911 .alloc = snd_dma_noncoherent_alloc,
912 .free = snd_dma_noncoherent_free,
913 .mmap = snd_dma_noncoherent_mmap,
914 .sync = snd_dma_noncoherent_sync,
917 #endif /* CONFIG_HAS_DMA */
922 static const struct snd_malloc_ops *snd_dma_ops[] = {
923 [SNDRV_DMA_TYPE_CONTINUOUS] = &snd_dma_continuous_ops,
924 [SNDRV_DMA_TYPE_VMALLOC] = &snd_dma_vmalloc_ops,
925 #ifdef CONFIG_HAS_DMA
926 [SNDRV_DMA_TYPE_DEV] = &snd_dma_dev_ops,
927 [SNDRV_DMA_TYPE_DEV_WC] = &snd_dma_wc_ops,
928 [SNDRV_DMA_TYPE_NONCONTIG] = &snd_dma_noncontig_ops,
929 [SNDRV_DMA_TYPE_NONCOHERENT] = &snd_dma_noncoherent_ops,
930 #ifdef CONFIG_SND_DMA_SGBUF
931 [SNDRV_DMA_TYPE_DEV_WC_SG] = &snd_dma_sg_wc_ops,
933 #ifdef CONFIG_GENERIC_ALLOCATOR
934 [SNDRV_DMA_TYPE_DEV_IRAM] = &snd_dma_iram_ops,
935 #endif /* CONFIG_GENERIC_ALLOCATOR */
936 #ifdef CONFIG_SND_DMA_SGBUF
937 [SNDRV_DMA_TYPE_DEV_SG_FALLBACK] = &snd_dma_sg_fallback_ops,
938 [SNDRV_DMA_TYPE_DEV_WC_SG_FALLBACK] = &snd_dma_sg_fallback_ops,
940 #endif /* CONFIG_HAS_DMA */
943 static const struct snd_malloc_ops *snd_dma_get_ops(struct snd_dma_buffer *dmab)
945 if (WARN_ON_ONCE(!dmab))
947 if (WARN_ON_ONCE(dmab->dev.type <= SNDRV_DMA_TYPE_UNKNOWN ||
948 dmab->dev.type >= ARRAY_SIZE(snd_dma_ops)))
950 return snd_dma_ops[dmab->dev.type];