1 // SPDX-License-Identifier: GPL-2.0-only
3 * Dynamic DMA mapping support.
5 * This implementation is a fallback for platforms that do not support
6 * I/O TLBs (aka DMA address translation hardware).
7 * Copyright (C) 2000 Asit Mallick <Asit.K.Mallick@intel.com>
8 * Copyright (C) 2000 Goutham Rao <goutham.rao@intel.com>
9 * Copyright (C) 2000, 2003 Hewlett-Packard Co
10 * David Mosberger-Tang <davidm@hpl.hp.com>
12 * 03/05/07 davidm Switch from PCI-DMA to generic device DMA API.
13 * 00/12/13 davidm Rename to swiotlb.c and add mark_clean() to avoid
14 * unnecessary i-cache flushing.
15 * 04/07/.. ak Better overflow handling. Assorted fixes.
16 * 05/09/10 linville Add support for syncing ranges, support syncing for
17 * DMA_BIDIRECTIONAL mappings, miscellaneous cleanup.
18 * 08/12/11 beckyb Add highmem support
21 #define pr_fmt(fmt) "software IO TLB: " fmt
23 #include <linux/cache.h>
24 #include <linux/dma-direct.h>
25 #include <linux/dma-map-ops.h>
27 #include <linux/export.h>
28 #include <linux/spinlock.h>
29 #include <linux/string.h>
30 #include <linux/swiotlb.h>
31 #include <linux/pfn.h>
32 #include <linux/types.h>
33 #include <linux/ctype.h>
34 #include <linux/highmem.h>
35 #include <linux/gfp.h>
36 #include <linux/scatterlist.h>
37 #include <linux/cc_platform.h>
38 #include <linux/set_memory.h>
39 #ifdef CONFIG_DEBUG_FS
40 #include <linux/debugfs.h>
42 #ifdef CONFIG_DMA_RESTRICTED_POOL
45 #include <linux/of_fdt.h>
46 #include <linux/of_reserved_mem.h>
47 #include <linux/slab.h>
54 #include <linux/init.h>
55 #include <linux/memblock.h>
56 #include <linux/iommu-helper.h>
58 #define CREATE_TRACE_POINTS
59 #include <trace/events/swiotlb.h>
61 #define SLABS_PER_PAGE (1 << (PAGE_SHIFT - IO_TLB_SHIFT))
64 * Minimum IO TLB size to bother booting with. Systems with mainly
65 * 64bit capable cards will only lightly use the swiotlb. If we can't
66 * allocate a contiguous 1MB, we're probably in trouble anyway.
68 #define IO_TLB_MIN_SLABS ((1<<20) >> IO_TLB_SHIFT)
70 #define INVALID_PHYS_ADDR (~(phys_addr_t)0)
72 enum swiotlb_force swiotlb_force;
74 struct io_tlb_mem io_tlb_default_mem;
76 phys_addr_t swiotlb_unencrypted_base;
79 * Max segment that we can provide which (if pages are contingous) will
80 * not be bounced (unless SWIOTLB_FORCE is set).
82 static unsigned int max_segment;
84 static unsigned long default_nslabs = IO_TLB_DEFAULT_SIZE >> IO_TLB_SHIFT;
87 setup_io_tlb_npages(char *str)
90 /* avoid tail segment of size < IO_TLB_SEGSIZE */
92 ALIGN(simple_strtoul(str, &str, 0), IO_TLB_SEGSIZE);
96 if (!strcmp(str, "force"))
97 swiotlb_force = SWIOTLB_FORCE;
98 else if (!strcmp(str, "noforce"))
99 swiotlb_force = SWIOTLB_NO_FORCE;
103 early_param("swiotlb", setup_io_tlb_npages);
105 unsigned int swiotlb_max_segment(void)
107 return io_tlb_default_mem.nslabs ? max_segment : 0;
109 EXPORT_SYMBOL_GPL(swiotlb_max_segment);
111 void swiotlb_set_max_segment(unsigned int val)
113 if (swiotlb_force == SWIOTLB_FORCE)
116 max_segment = rounddown(val, PAGE_SIZE);
119 unsigned long swiotlb_size_or_default(void)
121 return default_nslabs << IO_TLB_SHIFT;
124 void __init swiotlb_adjust_size(unsigned long size)
127 * If swiotlb parameter has not been specified, give a chance to
128 * architectures such as those supporting memory encryption to
129 * adjust/expand SWIOTLB size for their use.
131 if (default_nslabs != IO_TLB_DEFAULT_SIZE >> IO_TLB_SHIFT)
133 size = ALIGN(size, IO_TLB_SIZE);
134 default_nslabs = ALIGN(size >> IO_TLB_SHIFT, IO_TLB_SEGSIZE);
135 pr_info("SWIOTLB bounce buffer size adjusted to %luMB", size >> 20);
138 void swiotlb_print_info(void)
140 struct io_tlb_mem *mem = &io_tlb_default_mem;
143 pr_warn("No low mem\n");
147 pr_info("mapped [mem %pa-%pa] (%luMB)\n", &mem->start, &mem->end,
148 (mem->nslabs << IO_TLB_SHIFT) >> 20);
151 static inline unsigned long io_tlb_offset(unsigned long val)
153 return val & (IO_TLB_SEGSIZE - 1);
156 static inline unsigned long nr_slots(u64 val)
158 return DIV_ROUND_UP(val, IO_TLB_SIZE);
162 * Remap swioltb memory in the unencrypted physical address space
163 * when swiotlb_unencrypted_base is set. (e.g. for Hyper-V AMD SEV-SNP
166 #ifdef CONFIG_HAS_IOMEM
167 static void *swiotlb_mem_remap(struct io_tlb_mem *mem, unsigned long bytes)
171 if (swiotlb_unencrypted_base) {
172 phys_addr_t paddr = mem->start + swiotlb_unencrypted_base;
174 vaddr = memremap(paddr, bytes, MEMREMAP_WB);
176 pr_err("Failed to map the unencrypted memory %pa size %lx.\n",
183 static void *swiotlb_mem_remap(struct io_tlb_mem *mem, unsigned long bytes)
190 * Early SWIOTLB allocation may be too early to allow an architecture to
191 * perform the desired operations. This function allows the architecture to
192 * call SWIOTLB when the operations are possible. It needs to be called
193 * before the SWIOTLB memory is used.
195 void __init swiotlb_update_mem_attributes(void)
197 struct io_tlb_mem *mem = &io_tlb_default_mem;
201 if (!mem->nslabs || mem->late_alloc)
203 vaddr = phys_to_virt(mem->start);
204 bytes = PAGE_ALIGN(mem->nslabs << IO_TLB_SHIFT);
205 set_memory_decrypted((unsigned long)vaddr, bytes >> PAGE_SHIFT);
207 mem->vaddr = swiotlb_mem_remap(mem, bytes);
211 memset(mem->vaddr, 0, bytes);
214 static void swiotlb_init_io_tlb_mem(struct io_tlb_mem *mem, phys_addr_t start,
215 unsigned long nslabs, bool late_alloc)
217 void *vaddr = phys_to_virt(start);
218 unsigned long bytes = nslabs << IO_TLB_SHIFT, i;
220 mem->nslabs = nslabs;
222 mem->end = mem->start + bytes;
224 mem->late_alloc = late_alloc;
226 if (swiotlb_force == SWIOTLB_FORCE)
227 mem->force_bounce = true;
229 spin_lock_init(&mem->lock);
230 for (i = 0; i < mem->nslabs; i++) {
231 mem->slots[i].list = IO_TLB_SEGSIZE - io_tlb_offset(i);
232 mem->slots[i].orig_addr = INVALID_PHYS_ADDR;
233 mem->slots[i].alloc_size = 0;
237 * If swiotlb_unencrypted_base is set, the bounce buffer memory will
238 * be remapped and cleared in swiotlb_update_mem_attributes.
240 if (swiotlb_unencrypted_base)
243 memset(vaddr, 0, bytes);
248 int __init swiotlb_init_with_tbl(char *tlb, unsigned long nslabs, int verbose)
250 struct io_tlb_mem *mem = &io_tlb_default_mem;
253 if (swiotlb_force == SWIOTLB_NO_FORCE)
256 /* protect against double initialization */
257 if (WARN_ON_ONCE(mem->nslabs))
260 alloc_size = PAGE_ALIGN(array_size(sizeof(*mem->slots), nslabs));
261 mem->slots = memblock_alloc(alloc_size, PAGE_SIZE);
263 panic("%s: Failed to allocate %zu bytes align=0x%lx\n",
264 __func__, alloc_size, PAGE_SIZE);
266 swiotlb_init_io_tlb_mem(mem, __pa(tlb), nslabs, false);
269 swiotlb_print_info();
270 swiotlb_set_max_segment(mem->nslabs << IO_TLB_SHIFT);
275 * Statically reserve bounce buffer space and initialize bounce buffer data
276 * structures for the software IO TLB used to implement the DMA API.
279 swiotlb_init(int verbose)
281 size_t bytes = PAGE_ALIGN(default_nslabs << IO_TLB_SHIFT);
284 if (swiotlb_force == SWIOTLB_NO_FORCE)
287 /* Get IO TLB memory from the low pages */
288 tlb = memblock_alloc_low(bytes, PAGE_SIZE);
291 if (swiotlb_init_with_tbl(tlb, default_nslabs, verbose))
296 memblock_free(tlb, bytes);
298 pr_warn("Cannot allocate buffer");
302 * Systems with larger DMA zones (those that don't support ISA) can
303 * initialize the swiotlb later using the slab allocator if needed.
304 * This should be just like above, but with some error catching.
307 swiotlb_late_init_with_default_size(size_t default_size)
309 unsigned long nslabs =
310 ALIGN(default_size >> IO_TLB_SHIFT, IO_TLB_SEGSIZE);
312 unsigned char *vstart = NULL;
316 if (swiotlb_force == SWIOTLB_NO_FORCE)
320 * Get IO TLB memory from the low pages
322 order = get_order(nslabs << IO_TLB_SHIFT);
323 nslabs = SLABS_PER_PAGE << order;
324 bytes = nslabs << IO_TLB_SHIFT;
326 while ((SLABS_PER_PAGE << order) > IO_TLB_MIN_SLABS) {
327 vstart = (void *)__get_free_pages(GFP_DMA | __GFP_NOWARN,
337 if (order != get_order(bytes)) {
338 pr_warn("only able to allocate %ld MB\n",
339 (PAGE_SIZE << order) >> 20);
340 nslabs = SLABS_PER_PAGE << order;
342 rc = swiotlb_late_init_with_tbl(vstart, nslabs);
344 free_pages((unsigned long)vstart, order);
350 swiotlb_late_init_with_tbl(char *tlb, unsigned long nslabs)
352 struct io_tlb_mem *mem = &io_tlb_default_mem;
353 unsigned long bytes = nslabs << IO_TLB_SHIFT;
355 if (swiotlb_force == SWIOTLB_NO_FORCE)
358 /* protect against double initialization */
359 if (WARN_ON_ONCE(mem->nslabs))
362 mem->slots = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
363 get_order(array_size(sizeof(*mem->slots), nslabs)));
367 set_memory_decrypted((unsigned long)tlb, bytes >> PAGE_SHIFT);
368 swiotlb_init_io_tlb_mem(mem, virt_to_phys(tlb), nslabs, true);
370 swiotlb_print_info();
371 swiotlb_set_max_segment(mem->nslabs << IO_TLB_SHIFT);
375 void __init swiotlb_exit(void)
377 struct io_tlb_mem *mem = &io_tlb_default_mem;
378 unsigned long tbl_vaddr;
379 size_t tbl_size, slots_size;
384 pr_info("tearing down default memory pool\n");
385 tbl_vaddr = (unsigned long)phys_to_virt(mem->start);
386 tbl_size = PAGE_ALIGN(mem->end - mem->start);
387 slots_size = PAGE_ALIGN(array_size(sizeof(*mem->slots), mem->nslabs));
389 set_memory_encrypted(tbl_vaddr, tbl_size >> PAGE_SHIFT);
390 if (mem->late_alloc) {
391 free_pages(tbl_vaddr, get_order(tbl_size));
392 free_pages((unsigned long)mem->slots, get_order(slots_size));
394 memblock_free_late(mem->start, tbl_size);
395 memblock_free_late(__pa(mem->slots), slots_size);
398 memset(mem, 0, sizeof(*mem));
402 * Return the offset into a iotlb slot required to keep the device happy.
404 static unsigned int swiotlb_align_offset(struct device *dev, u64 addr)
406 return addr & dma_get_min_align_mask(dev) & (IO_TLB_SIZE - 1);
410 * Bounce: copy the swiotlb buffer from or back to the original dma location
412 static void swiotlb_bounce(struct device *dev, phys_addr_t tlb_addr, size_t size,
413 enum dma_data_direction dir)
415 struct io_tlb_mem *mem = dev->dma_io_tlb_mem;
416 int index = (tlb_addr - mem->start) >> IO_TLB_SHIFT;
417 phys_addr_t orig_addr = mem->slots[index].orig_addr;
418 size_t alloc_size = mem->slots[index].alloc_size;
419 unsigned long pfn = PFN_DOWN(orig_addr);
420 unsigned char *vaddr = mem->vaddr + tlb_addr - mem->start;
421 unsigned int tlb_offset, orig_addr_offset;
423 if (orig_addr == INVALID_PHYS_ADDR)
426 tlb_offset = tlb_addr & (IO_TLB_SIZE - 1);
427 orig_addr_offset = swiotlb_align_offset(dev, orig_addr);
428 if (tlb_offset < orig_addr_offset) {
429 dev_WARN_ONCE(dev, 1,
430 "Access before mapping start detected. orig offset %u, requested offset %u.\n",
431 orig_addr_offset, tlb_offset);
435 tlb_offset -= orig_addr_offset;
436 if (tlb_offset > alloc_size) {
437 dev_WARN_ONCE(dev, 1,
438 "Buffer overflow detected. Allocation size: %zu. Mapping size: %zu+%u.\n",
439 alloc_size, size, tlb_offset);
443 orig_addr += tlb_offset;
444 alloc_size -= tlb_offset;
446 if (size > alloc_size) {
447 dev_WARN_ONCE(dev, 1,
448 "Buffer overflow detected. Allocation size: %zu. Mapping size: %zu.\n",
453 if (PageHighMem(pfn_to_page(pfn))) {
454 /* The buffer does not have a mapping. Map it in and copy */
455 unsigned int offset = orig_addr & ~PAGE_MASK;
461 sz = min_t(size_t, PAGE_SIZE - offset, size);
463 local_irq_save(flags);
464 buffer = kmap_atomic(pfn_to_page(pfn));
465 if (dir == DMA_TO_DEVICE)
466 memcpy(vaddr, buffer + offset, sz);
468 memcpy(buffer + offset, vaddr, sz);
469 kunmap_atomic(buffer);
470 local_irq_restore(flags);
477 } else if (dir == DMA_TO_DEVICE) {
478 memcpy(vaddr, phys_to_virt(orig_addr), size);
480 memcpy(phys_to_virt(orig_addr), vaddr, size);
484 #define slot_addr(start, idx) ((start) + ((idx) << IO_TLB_SHIFT))
487 * Carefully handle integer overflow which can occur when boundary_mask == ~0UL.
489 static inline unsigned long get_max_slots(unsigned long boundary_mask)
491 if (boundary_mask == ~0UL)
492 return 1UL << (BITS_PER_LONG - IO_TLB_SHIFT);
493 return nr_slots(boundary_mask + 1);
496 static unsigned int wrap_index(struct io_tlb_mem *mem, unsigned int index)
498 if (index >= mem->nslabs)
504 * Find a suitable number of IO TLB entries size that will fit this request and
505 * allocate a buffer from that IO TLB pool.
507 static int swiotlb_find_slots(struct device *dev, phys_addr_t orig_addr,
508 size_t alloc_size, unsigned int alloc_align_mask)
510 struct io_tlb_mem *mem = dev->dma_io_tlb_mem;
511 unsigned long boundary_mask = dma_get_seg_boundary(dev);
512 dma_addr_t tbl_dma_addr =
513 phys_to_dma_unencrypted(dev, mem->start) & boundary_mask;
514 unsigned long max_slots = get_max_slots(boundary_mask);
515 unsigned int iotlb_align_mask =
516 dma_get_min_align_mask(dev) & ~(IO_TLB_SIZE - 1);
517 unsigned int nslots = nr_slots(alloc_size), stride;
518 unsigned int index, wrap, count = 0, i;
519 unsigned int offset = swiotlb_align_offset(dev, orig_addr);
525 * For mappings with an alignment requirement don't bother looping to
526 * unaligned slots once we found an aligned one. For allocations of
527 * PAGE_SIZE or larger only look for page aligned allocations.
529 stride = (iotlb_align_mask >> IO_TLB_SHIFT) + 1;
530 if (alloc_size >= PAGE_SIZE)
531 stride = max(stride, stride << (PAGE_SHIFT - IO_TLB_SHIFT));
532 stride = max(stride, (alloc_align_mask >> IO_TLB_SHIFT) + 1);
534 spin_lock_irqsave(&mem->lock, flags);
535 if (unlikely(nslots > mem->nslabs - mem->used))
538 index = wrap = wrap_index(mem, ALIGN(mem->index, stride));
541 (slot_addr(tbl_dma_addr, index) & iotlb_align_mask) !=
542 (orig_addr & iotlb_align_mask)) {
543 index = wrap_index(mem, index + 1);
548 * If we find a slot that indicates we have 'nslots' number of
549 * contiguous buffers, we allocate the buffers from that slot
550 * and mark the entries as '0' indicating unavailable.
552 if (!iommu_is_span_boundary(index, nslots,
553 nr_slots(tbl_dma_addr),
555 if (mem->slots[index].list >= nslots)
558 index = wrap_index(mem, index + stride);
559 } while (index != wrap);
562 spin_unlock_irqrestore(&mem->lock, flags);
566 for (i = index; i < index + nslots; i++) {
567 mem->slots[i].list = 0;
568 mem->slots[i].alloc_size =
569 alloc_size - (offset + ((i - index) << IO_TLB_SHIFT));
572 io_tlb_offset(i) != IO_TLB_SEGSIZE - 1 &&
573 mem->slots[i].list; i--)
574 mem->slots[i].list = ++count;
577 * Update the indices to avoid searching in the next round.
579 if (index + nslots < mem->nslabs)
580 mem->index = index + nslots;
585 spin_unlock_irqrestore(&mem->lock, flags);
589 phys_addr_t swiotlb_tbl_map_single(struct device *dev, phys_addr_t orig_addr,
590 size_t mapping_size, size_t alloc_size,
591 unsigned int alloc_align_mask, enum dma_data_direction dir,
594 struct io_tlb_mem *mem = dev->dma_io_tlb_mem;
595 unsigned int offset = swiotlb_align_offset(dev, orig_addr);
598 phys_addr_t tlb_addr;
601 panic("Can not allocate SWIOTLB buffer earlier and can't now provide you with the DMA bounce buffer");
603 if (cc_platform_has(CC_ATTR_MEM_ENCRYPT))
604 pr_warn_once("Memory encryption is active and system is using DMA bounce buffers\n");
606 if (mapping_size > alloc_size) {
607 dev_warn_once(dev, "Invalid sizes (mapping: %zd bytes, alloc: %zd bytes)",
608 mapping_size, alloc_size);
609 return (phys_addr_t)DMA_MAPPING_ERROR;
612 index = swiotlb_find_slots(dev, orig_addr,
613 alloc_size + offset, alloc_align_mask);
615 if (!(attrs & DMA_ATTR_NO_WARN))
616 dev_warn_ratelimited(dev,
617 "swiotlb buffer is full (sz: %zd bytes), total %lu (slots), used %lu (slots)\n",
618 alloc_size, mem->nslabs, mem->used);
619 return (phys_addr_t)DMA_MAPPING_ERROR;
623 * Save away the mapping from the original address to the DMA address.
624 * This is needed when we sync the memory. Then we sync the buffer if
627 for (i = 0; i < nr_slots(alloc_size + offset); i++)
628 mem->slots[index + i].orig_addr = slot_addr(orig_addr, i);
629 tlb_addr = slot_addr(mem->start, index) + offset;
631 * When dir == DMA_FROM_DEVICE we could omit the copy from the orig
632 * to the tlb buffer, if we knew for sure the device will
633 * overwirte the entire current content. But we don't. Thus
634 * unconditional bounce may prevent leaking swiotlb content (i.e.
635 * kernel memory) to user-space.
637 swiotlb_bounce(dev, tlb_addr, mapping_size, DMA_TO_DEVICE);
641 static void swiotlb_release_slots(struct device *dev, phys_addr_t tlb_addr)
643 struct io_tlb_mem *mem = dev->dma_io_tlb_mem;
645 unsigned int offset = swiotlb_align_offset(dev, tlb_addr);
646 int index = (tlb_addr - offset - mem->start) >> IO_TLB_SHIFT;
647 int nslots = nr_slots(mem->slots[index].alloc_size + offset);
651 * Return the buffer to the free list by setting the corresponding
652 * entries to indicate the number of contiguous entries available.
653 * While returning the entries to the free list, we merge the entries
654 * with slots below and above the pool being returned.
656 spin_lock_irqsave(&mem->lock, flags);
657 if (index + nslots < ALIGN(index + 1, IO_TLB_SEGSIZE))
658 count = mem->slots[index + nslots].list;
663 * Step 1: return the slots to the free list, merging the slots with
666 for (i = index + nslots - 1; i >= index; i--) {
667 mem->slots[i].list = ++count;
668 mem->slots[i].orig_addr = INVALID_PHYS_ADDR;
669 mem->slots[i].alloc_size = 0;
673 * Step 2: merge the returned slots with the preceding slots, if
674 * available (non zero)
677 io_tlb_offset(i) != IO_TLB_SEGSIZE - 1 && mem->slots[i].list;
679 mem->slots[i].list = ++count;
681 spin_unlock_irqrestore(&mem->lock, flags);
685 * tlb_addr is the physical address of the bounce buffer to unmap.
687 void swiotlb_tbl_unmap_single(struct device *dev, phys_addr_t tlb_addr,
688 size_t mapping_size, enum dma_data_direction dir,
692 * First, sync the memory before unmapping the entry
694 if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC) &&
695 (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL))
696 swiotlb_bounce(dev, tlb_addr, mapping_size, DMA_FROM_DEVICE);
698 swiotlb_release_slots(dev, tlb_addr);
701 void swiotlb_sync_single_for_device(struct device *dev, phys_addr_t tlb_addr,
702 size_t size, enum dma_data_direction dir)
704 if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL)
705 swiotlb_bounce(dev, tlb_addr, size, DMA_TO_DEVICE);
707 BUG_ON(dir != DMA_FROM_DEVICE);
710 void swiotlb_sync_single_for_cpu(struct device *dev, phys_addr_t tlb_addr,
711 size_t size, enum dma_data_direction dir)
713 if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)
714 swiotlb_bounce(dev, tlb_addr, size, DMA_FROM_DEVICE);
716 BUG_ON(dir != DMA_TO_DEVICE);
720 * Create a swiotlb mapping for the buffer at @paddr, and in case of DMAing
721 * to the device copy the data into it as well.
723 dma_addr_t swiotlb_map(struct device *dev, phys_addr_t paddr, size_t size,
724 enum dma_data_direction dir, unsigned long attrs)
726 phys_addr_t swiotlb_addr;
729 trace_swiotlb_bounced(dev, phys_to_dma(dev, paddr), size,
732 swiotlb_addr = swiotlb_tbl_map_single(dev, paddr, size, size, 0, dir,
734 if (swiotlb_addr == (phys_addr_t)DMA_MAPPING_ERROR)
735 return DMA_MAPPING_ERROR;
737 /* Ensure that the address returned is DMA'ble */
738 dma_addr = phys_to_dma_unencrypted(dev, swiotlb_addr);
739 if (unlikely(!dma_capable(dev, dma_addr, size, true))) {
740 swiotlb_tbl_unmap_single(dev, swiotlb_addr, size, dir,
741 attrs | DMA_ATTR_SKIP_CPU_SYNC);
742 dev_WARN_ONCE(dev, 1,
743 "swiotlb addr %pad+%zu overflow (mask %llx, bus limit %llx).\n",
744 &dma_addr, size, *dev->dma_mask, dev->bus_dma_limit);
745 return DMA_MAPPING_ERROR;
748 if (!dev_is_dma_coherent(dev) && !(attrs & DMA_ATTR_SKIP_CPU_SYNC))
749 arch_sync_dma_for_device(swiotlb_addr, size, dir);
753 size_t swiotlb_max_mapping_size(struct device *dev)
755 return ((size_t)IO_TLB_SIZE) * IO_TLB_SEGSIZE;
758 bool is_swiotlb_active(struct device *dev)
760 struct io_tlb_mem *mem = dev->dma_io_tlb_mem;
762 return mem && mem->nslabs;
764 EXPORT_SYMBOL_GPL(is_swiotlb_active);
766 #ifdef CONFIG_DEBUG_FS
767 static struct dentry *debugfs_dir;
769 static void swiotlb_create_debugfs_files(struct io_tlb_mem *mem)
771 debugfs_create_ulong("io_tlb_nslabs", 0400, mem->debugfs, &mem->nslabs);
772 debugfs_create_ulong("io_tlb_used", 0400, mem->debugfs, &mem->used);
775 static int __init swiotlb_create_default_debugfs(void)
777 struct io_tlb_mem *mem = &io_tlb_default_mem;
779 debugfs_dir = debugfs_create_dir("swiotlb", NULL);
781 mem->debugfs = debugfs_dir;
782 swiotlb_create_debugfs_files(mem);
787 late_initcall(swiotlb_create_default_debugfs);
791 #ifdef CONFIG_DMA_RESTRICTED_POOL
793 #ifdef CONFIG_DEBUG_FS
794 static void rmem_swiotlb_debugfs_init(struct reserved_mem *rmem)
796 struct io_tlb_mem *mem = rmem->priv;
798 mem->debugfs = debugfs_create_dir(rmem->name, debugfs_dir);
799 swiotlb_create_debugfs_files(mem);
802 static void rmem_swiotlb_debugfs_init(struct reserved_mem *rmem)
807 struct page *swiotlb_alloc(struct device *dev, size_t size)
809 struct io_tlb_mem *mem = dev->dma_io_tlb_mem;
810 phys_addr_t tlb_addr;
816 index = swiotlb_find_slots(dev, 0, size, 0);
820 tlb_addr = slot_addr(mem->start, index);
822 return pfn_to_page(PFN_DOWN(tlb_addr));
825 bool swiotlb_free(struct device *dev, struct page *page, size_t size)
827 phys_addr_t tlb_addr = page_to_phys(page);
829 if (!is_swiotlb_buffer(dev, tlb_addr))
832 swiotlb_release_slots(dev, tlb_addr);
837 static int rmem_swiotlb_device_init(struct reserved_mem *rmem,
840 struct io_tlb_mem *mem = rmem->priv;
841 unsigned long nslabs = rmem->size >> IO_TLB_SHIFT;
844 * Since multiple devices can share the same pool, the private data,
845 * io_tlb_mem struct, will be initialized by the first device attached
849 mem = kzalloc(sizeof(*mem), GFP_KERNEL);
853 mem->slots = kzalloc(array_size(sizeof(*mem->slots), nslabs),
860 set_memory_decrypted((unsigned long)phys_to_virt(rmem->base),
861 rmem->size >> PAGE_SHIFT);
862 swiotlb_init_io_tlb_mem(mem, rmem->base, nslabs, false);
863 mem->force_bounce = true;
864 mem->for_alloc = true;
868 rmem_swiotlb_debugfs_init(rmem);
871 dev->dma_io_tlb_mem = mem;
876 static void rmem_swiotlb_device_release(struct reserved_mem *rmem,
879 dev->dma_io_tlb_mem = &io_tlb_default_mem;
882 static const struct reserved_mem_ops rmem_swiotlb_ops = {
883 .device_init = rmem_swiotlb_device_init,
884 .device_release = rmem_swiotlb_device_release,
887 static int __init rmem_swiotlb_setup(struct reserved_mem *rmem)
889 unsigned long node = rmem->fdt_node;
891 if (of_get_flat_dt_prop(node, "reusable", NULL) ||
892 of_get_flat_dt_prop(node, "linux,cma-default", NULL) ||
893 of_get_flat_dt_prop(node, "linux,dma-default", NULL) ||
894 of_get_flat_dt_prop(node, "no-map", NULL))
897 if (PageHighMem(pfn_to_page(PHYS_PFN(rmem->base)))) {
898 pr_err("Restricted DMA pool must be accessible within the linear mapping.");
902 rmem->ops = &rmem_swiotlb_ops;
903 pr_info("Reserved memory: created restricted DMA pool at %pa, size %ld MiB\n",
904 &rmem->base, (unsigned long)rmem->size / SZ_1M);
908 RESERVEDMEM_OF_DECLARE(dma, "restricted-dma-pool", rmem_swiotlb_setup);
909 #endif /* CONFIG_DMA_RESTRICTED_POOL */