2 * A fairly generic DMA-API to IOMMU-API glue layer.
4 * Copyright (C) 2014-2015 ARM Ltd.
6 * based in part on arch/arm/mm/dma-mapping.c:
7 * Copyright (C) 2000-2004 Russell King
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with this program. If not, see <http://www.gnu.org/licenses/>.
22 #include <linux/device.h>
23 #include <linux/dma-iommu.h>
24 #include <linux/gfp.h>
25 #include <linux/huge_mm.h>
26 #include <linux/iommu.h>
27 #include <linux/iova.h>
28 #include <linux/irq.h>
30 #include <linux/pci.h>
31 #include <linux/scatterlist.h>
32 #include <linux/vmalloc.h>
34 struct iommu_dma_msi_page {
35 struct list_head list;
40 struct iommu_dma_cookie {
41 struct iova_domain iovad;
42 struct list_head msi_page_list;
46 static inline struct iova_domain *cookie_iovad(struct iommu_domain *domain)
48 return &((struct iommu_dma_cookie *)domain->iova_cookie)->iovad;
51 int iommu_dma_init(void)
53 return iova_cache_get();
57 * iommu_get_dma_cookie - Acquire DMA-API resources for a domain
58 * @domain: IOMMU domain to prepare for DMA-API usage
60 * IOMMU drivers should normally call this from their domain_alloc
61 * callback when domain->type == IOMMU_DOMAIN_DMA.
63 int iommu_get_dma_cookie(struct iommu_domain *domain)
65 struct iommu_dma_cookie *cookie;
67 if (domain->iova_cookie)
70 cookie = kzalloc(sizeof(*cookie), GFP_KERNEL);
74 spin_lock_init(&cookie->msi_lock);
75 INIT_LIST_HEAD(&cookie->msi_page_list);
76 domain->iova_cookie = cookie;
79 EXPORT_SYMBOL(iommu_get_dma_cookie);
82 * iommu_put_dma_cookie - Release a domain's DMA mapping resources
83 * @domain: IOMMU domain previously prepared by iommu_get_dma_cookie()
85 * IOMMU drivers should normally call this from their domain_free callback.
87 void iommu_put_dma_cookie(struct iommu_domain *domain)
89 struct iommu_dma_cookie *cookie = domain->iova_cookie;
90 struct iommu_dma_msi_page *msi, *tmp;
95 if (cookie->iovad.granule)
96 put_iova_domain(&cookie->iovad);
98 list_for_each_entry_safe(msi, tmp, &cookie->msi_page_list, list) {
103 domain->iova_cookie = NULL;
105 EXPORT_SYMBOL(iommu_put_dma_cookie);
107 static void iova_reserve_pci_windows(struct pci_dev *dev,
108 struct iova_domain *iovad)
110 struct pci_host_bridge *bridge = pci_find_host_bridge(dev->bus);
111 struct resource_entry *window;
112 unsigned long lo, hi;
114 resource_list_for_each_entry(window, &bridge->windows) {
115 if (resource_type(window->res) != IORESOURCE_MEM)
118 lo = iova_pfn(iovad, window->res->start - window->offset);
119 hi = iova_pfn(iovad, window->res->end - window->offset);
120 reserve_iova(iovad, lo, hi);
125 * iommu_dma_init_domain - Initialise a DMA mapping domain
126 * @domain: IOMMU domain previously prepared by iommu_get_dma_cookie()
127 * @base: IOVA at which the mappable address space starts
128 * @size: Size of IOVA space
129 * @dev: Device the domain is being initialised for
131 * @base and @size should be exact multiples of IOMMU page granularity to
132 * avoid rounding surprises. If necessary, we reserve the page at address 0
133 * to ensure it is an invalid IOVA. It is safe to reinitialise a domain, but
134 * any change which could make prior IOVAs invalid will fail.
136 int iommu_dma_init_domain(struct iommu_domain *domain, dma_addr_t base,
137 u64 size, struct device *dev)
139 struct iova_domain *iovad = cookie_iovad(domain);
140 unsigned long order, base_pfn, end_pfn;
145 /* Use the smallest supported page size for IOVA granularity */
146 order = __ffs(domain->pgsize_bitmap);
147 base_pfn = max_t(unsigned long, 1, base >> order);
148 end_pfn = (base + size - 1) >> order;
150 /* Check the domain allows at least some access to the device... */
151 if (domain->geometry.force_aperture) {
152 if (base > domain->geometry.aperture_end ||
153 base + size <= domain->geometry.aperture_start) {
154 pr_warn("specified DMA range outside IOMMU capability\n");
157 /* ...then finally give it a kicking to make sure it fits */
158 base_pfn = max_t(unsigned long, base_pfn,
159 domain->geometry.aperture_start >> order);
160 end_pfn = min_t(unsigned long, end_pfn,
161 domain->geometry.aperture_end >> order);
164 /* All we can safely do with an existing domain is enlarge it */
165 if (iovad->start_pfn) {
166 if (1UL << order != iovad->granule ||
167 base_pfn != iovad->start_pfn ||
168 end_pfn < iovad->dma_32bit_pfn) {
169 pr_warn("Incompatible range for DMA domain\n");
172 iovad->dma_32bit_pfn = end_pfn;
174 init_iova_domain(iovad, 1UL << order, base_pfn, end_pfn);
175 if (dev && dev_is_pci(dev))
176 iova_reserve_pci_windows(to_pci_dev(dev), iovad);
180 EXPORT_SYMBOL(iommu_dma_init_domain);
183 * dma_direction_to_prot - Translate DMA API directions to IOMMU API page flags
184 * @dir: Direction of DMA transfer
185 * @coherent: Is the DMA master cache-coherent?
187 * Return: corresponding IOMMU API page protection flags
189 int dma_direction_to_prot(enum dma_data_direction dir, bool coherent)
191 int prot = coherent ? IOMMU_CACHE : 0;
194 case DMA_BIDIRECTIONAL:
195 return prot | IOMMU_READ | IOMMU_WRITE;
197 return prot | IOMMU_READ;
198 case DMA_FROM_DEVICE:
199 return prot | IOMMU_WRITE;
205 static struct iova *__alloc_iova(struct iommu_domain *domain, size_t size,
206 dma_addr_t dma_limit)
208 struct iova_domain *iovad = cookie_iovad(domain);
209 unsigned long shift = iova_shift(iovad);
210 unsigned long length = iova_align(iovad, size) >> shift;
212 if (domain->geometry.force_aperture)
213 dma_limit = min(dma_limit, domain->geometry.aperture_end);
215 * Enforce size-alignment to be safe - there could perhaps be an
216 * attribute to control this per-device, or at least per-domain...
218 return alloc_iova(iovad, length, dma_limit >> shift, true);
221 /* The IOVA allocator knows what we mapped, so just unmap whatever that was */
222 static void __iommu_dma_unmap(struct iommu_domain *domain, dma_addr_t dma_addr)
224 struct iova_domain *iovad = cookie_iovad(domain);
225 unsigned long shift = iova_shift(iovad);
226 unsigned long pfn = dma_addr >> shift;
227 struct iova *iova = find_iova(iovad, pfn);
233 size = iova_size(iova) << shift;
234 size -= iommu_unmap(domain, pfn << shift, size);
235 /* ...and if we can't, then something is horribly, horribly wrong */
237 __free_iova(iovad, iova);
240 static void __iommu_dma_free_pages(struct page **pages, int count)
243 __free_page(pages[count]);
247 static struct page **__iommu_dma_alloc_pages(unsigned int count,
248 unsigned long order_mask, gfp_t gfp)
251 unsigned int i = 0, array_size = count * sizeof(*pages);
253 order_mask &= (2U << MAX_ORDER) - 1;
257 if (array_size <= PAGE_SIZE)
258 pages = kzalloc(array_size, GFP_KERNEL);
260 pages = vzalloc(array_size);
264 /* IOMMU can map any pages, so himem can also be used here */
265 gfp |= __GFP_NOWARN | __GFP_HIGHMEM;
268 struct page *page = NULL;
269 unsigned int order_size;
272 * Higher-order allocations are a convenience rather
273 * than a necessity, hence using __GFP_NORETRY until
274 * falling back to minimum-order allocations.
276 for (order_mask &= (2U << __fls(count)) - 1;
277 order_mask; order_mask &= ~order_size) {
278 unsigned int order = __fls(order_mask);
280 order_size = 1U << order;
281 page = alloc_pages((order_mask - order_size) ?
282 gfp | __GFP_NORETRY : gfp, order);
287 if (!PageCompound(page)) {
288 split_page(page, order);
290 } else if (!split_huge_page(page)) {
293 __free_pages(page, order);
296 __iommu_dma_free_pages(pages, i);
307 * iommu_dma_free - Free a buffer allocated by iommu_dma_alloc()
308 * @dev: Device which owns this buffer
309 * @pages: Array of buffer pages as returned by iommu_dma_alloc()
310 * @size: Size of buffer in bytes
311 * @handle: DMA address of buffer
313 * Frees both the pages associated with the buffer, and the array
316 void iommu_dma_free(struct device *dev, struct page **pages, size_t size,
319 __iommu_dma_unmap(iommu_get_domain_for_dev(dev), *handle);
320 __iommu_dma_free_pages(pages, PAGE_ALIGN(size) >> PAGE_SHIFT);
321 *handle = DMA_ERROR_CODE;
325 * iommu_dma_alloc - Allocate and map a buffer contiguous in IOVA space
326 * @dev: Device to allocate memory for. Must be a real device
327 * attached to an iommu_dma_domain
328 * @size: Size of buffer in bytes
329 * @gfp: Allocation flags
330 * @attrs: DMA attributes for this allocation
331 * @prot: IOMMU mapping flags
332 * @handle: Out argument for allocated DMA handle
333 * @flush_page: Arch callback which must ensure PAGE_SIZE bytes from the
334 * given VA/PA are visible to the given non-coherent device.
336 * If @size is less than PAGE_SIZE, then a full CPU page will be allocated,
337 * but an IOMMU which supports smaller pages might not map the whole thing.
339 * Return: Array of struct page pointers describing the buffer,
340 * or NULL on failure.
342 struct page **iommu_dma_alloc(struct device *dev, size_t size, gfp_t gfp,
343 unsigned long attrs, int prot, dma_addr_t *handle,
344 void (*flush_page)(struct device *, const void *, phys_addr_t))
346 struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
347 struct iova_domain *iovad = cookie_iovad(domain);
352 unsigned int count, min_size, alloc_sizes = domain->pgsize_bitmap;
354 *handle = DMA_ERROR_CODE;
356 min_size = alloc_sizes & -alloc_sizes;
357 if (min_size < PAGE_SIZE) {
358 min_size = PAGE_SIZE;
359 alloc_sizes |= PAGE_SIZE;
361 size = ALIGN(size, min_size);
363 if (attrs & DMA_ATTR_ALLOC_SINGLE_PAGES)
364 alloc_sizes = min_size;
366 count = PAGE_ALIGN(size) >> PAGE_SHIFT;
367 pages = __iommu_dma_alloc_pages(count, alloc_sizes >> PAGE_SHIFT, gfp);
371 iova = __alloc_iova(domain, size, dev->coherent_dma_mask);
375 size = iova_align(iovad, size);
376 if (sg_alloc_table_from_pages(&sgt, pages, count, 0, size, GFP_KERNEL))
379 if (!(prot & IOMMU_CACHE)) {
380 struct sg_mapping_iter miter;
382 * The CPU-centric flushing implied by SG_MITER_TO_SG isn't
383 * sufficient here, so skip it by using the "wrong" direction.
385 sg_miter_start(&miter, sgt.sgl, sgt.orig_nents, SG_MITER_FROM_SG);
386 while (sg_miter_next(&miter))
387 flush_page(dev, miter.addr, page_to_phys(miter.page));
388 sg_miter_stop(&miter);
391 dma_addr = iova_dma_addr(iovad, iova);
392 if (iommu_map_sg(domain, dma_addr, sgt.sgl, sgt.orig_nents, prot)
403 __free_iova(iovad, iova);
405 __iommu_dma_free_pages(pages, count);
410 * iommu_dma_mmap - Map a buffer into provided user VMA
411 * @pages: Array representing buffer from iommu_dma_alloc()
412 * @size: Size of buffer in bytes
413 * @vma: VMA describing requested userspace mapping
415 * Maps the pages of the buffer in @pages into @vma. The caller is responsible
416 * for verifying the correct size and protection of @vma beforehand.
419 int iommu_dma_mmap(struct page **pages, size_t size, struct vm_area_struct *vma)
421 unsigned long uaddr = vma->vm_start;
422 unsigned int i, count = PAGE_ALIGN(size) >> PAGE_SHIFT;
425 for (i = vma->vm_pgoff; i < count && uaddr < vma->vm_end; i++) {
426 ret = vm_insert_page(vma, uaddr, pages[i]);
434 dma_addr_t iommu_dma_map_page(struct device *dev, struct page *page,
435 unsigned long offset, size_t size, int prot)
438 struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
439 struct iova_domain *iovad = cookie_iovad(domain);
440 phys_addr_t phys = page_to_phys(page) + offset;
441 size_t iova_off = iova_offset(iovad, phys);
442 size_t len = iova_align(iovad, size + iova_off);
443 struct iova *iova = __alloc_iova(domain, len, dma_get_mask(dev));
446 return DMA_ERROR_CODE;
448 dma_addr = iova_dma_addr(iovad, iova);
449 if (iommu_map(domain, dma_addr, phys - iova_off, len, prot)) {
450 __free_iova(iovad, iova);
451 return DMA_ERROR_CODE;
453 return dma_addr + iova_off;
456 void iommu_dma_unmap_page(struct device *dev, dma_addr_t handle, size_t size,
457 enum dma_data_direction dir, unsigned long attrs)
459 __iommu_dma_unmap(iommu_get_domain_for_dev(dev), handle);
463 * Prepare a successfully-mapped scatterlist to give back to the caller.
465 * At this point the segments are already laid out by iommu_dma_map_sg() to
466 * avoid individually crossing any boundaries, so we merely need to check a
467 * segment's start address to avoid concatenating across one.
469 static int __finalise_sg(struct device *dev, struct scatterlist *sg, int nents,
472 struct scatterlist *s, *cur = sg;
473 unsigned long seg_mask = dma_get_seg_boundary(dev);
474 unsigned int cur_len = 0, max_len = dma_get_max_seg_size(dev);
477 for_each_sg(sg, s, nents, i) {
478 /* Restore this segment's original unaligned fields first */
479 unsigned int s_iova_off = sg_dma_address(s);
480 unsigned int s_length = sg_dma_len(s);
481 unsigned int s_iova_len = s->length;
483 s->offset += s_iova_off;
484 s->length = s_length;
485 sg_dma_address(s) = DMA_ERROR_CODE;
489 * Now fill in the real DMA data. If...
490 * - there is a valid output segment to append to
491 * - and this segment starts on an IOVA page boundary
492 * - but doesn't fall at a segment boundary
493 * - and wouldn't make the resulting output segment too long
495 if (cur_len && !s_iova_off && (dma_addr & seg_mask) &&
496 (max_len - cur_len >= s_length)) {
497 /* ...then concatenate it with the previous one */
500 /* Otherwise start the next output segment */
506 sg_dma_address(cur) = dma_addr + s_iova_off;
509 sg_dma_len(cur) = cur_len;
510 dma_addr += s_iova_len;
512 if (s_length + s_iova_off < s_iova_len)
519 * If mapping failed, then just restore the original list,
520 * but making sure the DMA fields are invalidated.
522 static void __invalidate_sg(struct scatterlist *sg, int nents)
524 struct scatterlist *s;
527 for_each_sg(sg, s, nents, i) {
528 if (sg_dma_address(s) != DMA_ERROR_CODE)
529 s->offset += sg_dma_address(s);
531 s->length = sg_dma_len(s);
532 sg_dma_address(s) = DMA_ERROR_CODE;
538 * The DMA API client is passing in a scatterlist which could describe
539 * any old buffer layout, but the IOMMU API requires everything to be
540 * aligned to IOMMU pages. Hence the need for this complicated bit of
541 * impedance-matching, to be able to hand off a suitably-aligned list,
542 * but still preserve the original offsets and sizes for the caller.
544 int iommu_dma_map_sg(struct device *dev, struct scatterlist *sg,
547 struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
548 struct iova_domain *iovad = cookie_iovad(domain);
550 struct scatterlist *s, *prev = NULL;
553 unsigned long mask = dma_get_seg_boundary(dev);
557 * Work out how much IOVA space we need, and align the segments to
558 * IOVA granules for the IOMMU driver to handle. With some clever
559 * trickery we can modify the list in-place, but reversibly, by
560 * stashing the unaligned parts in the as-yet-unused DMA fields.
562 for_each_sg(sg, s, nents, i) {
563 size_t s_iova_off = iova_offset(iovad, s->offset);
564 size_t s_length = s->length;
565 size_t pad_len = (mask - iova_len + 1) & mask;
567 sg_dma_address(s) = s_iova_off;
568 sg_dma_len(s) = s_length;
569 s->offset -= s_iova_off;
570 s_length = iova_align(iovad, s_length + s_iova_off);
571 s->length = s_length;
574 * Due to the alignment of our single IOVA allocation, we can
575 * depend on these assumptions about the segment boundary mask:
576 * - If mask size >= IOVA size, then the IOVA range cannot
577 * possibly fall across a boundary, so we don't care.
578 * - If mask size < IOVA size, then the IOVA range must start
579 * exactly on a boundary, therefore we can lay things out
580 * based purely on segment lengths without needing to know
581 * the actual addresses beforehand.
582 * - The mask must be a power of 2, so pad_len == 0 if
583 * iova_len == 0, thus we cannot dereference prev the first
584 * time through here (i.e. before it has a meaningful value).
586 if (pad_len && pad_len < s_length - 1) {
587 prev->length += pad_len;
591 iova_len += s_length;
595 iova = __alloc_iova(domain, iova_len, dma_get_mask(dev));
600 * We'll leave any physical concatenation to the IOMMU driver's
601 * implementation - it knows better than we do.
603 dma_addr = iova_dma_addr(iovad, iova);
604 if (iommu_map_sg(domain, dma_addr, sg, nents, prot) < iova_len)
607 return __finalise_sg(dev, sg, nents, dma_addr);
610 __free_iova(iovad, iova);
612 __invalidate_sg(sg, nents);
616 void iommu_dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
617 enum dma_data_direction dir, unsigned long attrs)
620 * The scatterlist segments are mapped into a single
621 * contiguous IOVA allocation, so this is incredibly easy.
623 __iommu_dma_unmap(iommu_get_domain_for_dev(dev), sg_dma_address(sg));
626 int iommu_dma_supported(struct device *dev, u64 mask)
629 * 'Special' IOMMUs which don't have the same addressing capability
630 * as the CPU will have to wait until we have some way to query that
631 * before they'll be able to use this framework.
636 int iommu_dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
638 return dma_addr == DMA_ERROR_CODE;
641 static struct iommu_dma_msi_page *iommu_dma_get_msi_page(struct device *dev,
642 phys_addr_t msi_addr, struct iommu_domain *domain)
644 struct iommu_dma_cookie *cookie = domain->iova_cookie;
645 struct iommu_dma_msi_page *msi_page;
646 struct iova_domain *iovad = &cookie->iovad;
648 int prot = IOMMU_WRITE | IOMMU_NOEXEC | IOMMU_MMIO;
650 msi_addr &= ~(phys_addr_t)iova_mask(iovad);
651 list_for_each_entry(msi_page, &cookie->msi_page_list, list)
652 if (msi_page->phys == msi_addr)
655 msi_page = kzalloc(sizeof(*msi_page), GFP_ATOMIC);
659 iova = __alloc_iova(domain, iovad->granule, dma_get_mask(dev));
663 msi_page->phys = msi_addr;
664 msi_page->iova = iova_dma_addr(iovad, iova);
665 if (iommu_map(domain, msi_page->iova, msi_addr, iovad->granule, prot))
668 INIT_LIST_HEAD(&msi_page->list);
669 list_add(&msi_page->list, &cookie->msi_page_list);
673 __free_iova(iovad, iova);
679 void iommu_dma_map_msi_msg(int irq, struct msi_msg *msg)
681 struct device *dev = msi_desc_to_dev(irq_get_msi_desc(irq));
682 struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
683 struct iommu_dma_cookie *cookie;
684 struct iommu_dma_msi_page *msi_page;
685 phys_addr_t msi_addr = (u64)msg->address_hi << 32 | msg->address_lo;
688 if (!domain || !domain->iova_cookie)
691 cookie = domain->iova_cookie;
694 * We disable IRQs to rule out a possible inversion against
695 * irq_desc_lock if, say, someone tries to retarget the affinity
696 * of an MSI from within an IPI handler.
698 spin_lock_irqsave(&cookie->msi_lock, flags);
699 msi_page = iommu_dma_get_msi_page(dev, msi_addr, domain);
700 spin_unlock_irqrestore(&cookie->msi_lock, flags);
702 if (WARN_ON(!msi_page)) {
704 * We're called from a void callback, so the best we can do is
705 * 'fail' by filling the message with obviously bogus values.
706 * Since we got this far due to an IOMMU being present, it's
707 * not like the existing address would have worked anyway...
709 msg->address_hi = ~0U;
710 msg->address_lo = ~0U;
713 msg->address_hi = upper_32_bits(msi_page->iova);
714 msg->address_lo &= iova_mask(&cookie->iovad);
715 msg->address_lo += lower_32_bits(msi_page->iova);