1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _LINUX_DMA_MAPPING_H
3 #define _LINUX_DMA_MAPPING_H
5 #include <linux/sizes.h>
6 #include <linux/string.h>
7 #include <linux/device.h>
9 #include <linux/dma-debug.h>
10 #include <linux/dma-direction.h>
11 #include <linux/scatterlist.h>
12 #include <linux/bug.h>
13 #include <linux/mem_encrypt.h>
16 * List of possible attributes associated with a DMA mapping. The semantics
17 * of each attribute should be defined in Documentation/DMA-attributes.txt.
19 * DMA_ATTR_WRITE_BARRIER: DMA to a memory region with this attribute
20 * forces all pending DMA writes to complete.
22 #define DMA_ATTR_WRITE_BARRIER (1UL << 0)
24 * DMA_ATTR_WEAK_ORDERING: Specifies that reads and writes to the mapping
25 * may be weakly ordered, that is that reads and writes may pass each other.
27 #define DMA_ATTR_WEAK_ORDERING (1UL << 1)
29 * DMA_ATTR_WRITE_COMBINE: Specifies that writes to the mapping may be
30 * buffered to improve performance.
32 #define DMA_ATTR_WRITE_COMBINE (1UL << 2)
34 * DMA_ATTR_NON_CONSISTENT: Lets the platform to choose to return either
35 * consistent or non-consistent memory as it sees fit.
37 #define DMA_ATTR_NON_CONSISTENT (1UL << 3)
39 * DMA_ATTR_NO_KERNEL_MAPPING: Lets the platform to avoid creating a kernel
40 * virtual mapping for the allocated buffer.
42 #define DMA_ATTR_NO_KERNEL_MAPPING (1UL << 4)
44 * DMA_ATTR_SKIP_CPU_SYNC: Allows platform code to skip synchronization of
45 * the CPU cache for the given buffer assuming that it has been already
46 * transferred to 'device' domain.
48 #define DMA_ATTR_SKIP_CPU_SYNC (1UL << 5)
50 * DMA_ATTR_FORCE_CONTIGUOUS: Forces contiguous allocation of the buffer
53 #define DMA_ATTR_FORCE_CONTIGUOUS (1UL << 6)
55 * DMA_ATTR_ALLOC_SINGLE_PAGES: This is a hint to the DMA-mapping subsystem
56 * that it's probably not worth the time to try to allocate memory to in a way
57 * that gives better TLB efficiency.
59 #define DMA_ATTR_ALLOC_SINGLE_PAGES (1UL << 7)
61 * DMA_ATTR_NO_WARN: This tells the DMA-mapping subsystem to suppress
62 * allocation failure reports (similarly to __GFP_NOWARN).
64 #define DMA_ATTR_NO_WARN (1UL << 8)
67 * DMA_ATTR_PRIVILEGED: used to indicate that the buffer is fully
68 * accessible at an elevated privilege level (and ideally inaccessible or
69 * at least read-only at lesser-privileged levels).
71 #define DMA_ATTR_PRIVILEGED (1UL << 9)
74 * A dma_addr_t can hold any valid DMA or bus address for the platform.
75 * It can be given to a device to use as a DMA source or target. A CPU cannot
76 * reference a dma_addr_t directly because there may be translation between
77 * its physical address space and the bus address space.
80 void* (*alloc)(struct device *dev, size_t size,
81 dma_addr_t *dma_handle, gfp_t gfp,
83 void (*free)(struct device *dev, size_t size,
84 void *vaddr, dma_addr_t dma_handle,
86 int (*mmap)(struct device *, struct vm_area_struct *,
87 void *, dma_addr_t, size_t,
90 int (*get_sgtable)(struct device *dev, struct sg_table *sgt, void *,
91 dma_addr_t, size_t, unsigned long attrs);
93 dma_addr_t (*map_page)(struct device *dev, struct page *page,
94 unsigned long offset, size_t size,
95 enum dma_data_direction dir,
97 void (*unmap_page)(struct device *dev, dma_addr_t dma_handle,
98 size_t size, enum dma_data_direction dir,
101 * map_sg returns 0 on error and a value > 0 on success.
102 * It should never return a value < 0.
104 int (*map_sg)(struct device *dev, struct scatterlist *sg,
105 int nents, enum dma_data_direction dir,
106 unsigned long attrs);
107 void (*unmap_sg)(struct device *dev,
108 struct scatterlist *sg, int nents,
109 enum dma_data_direction dir,
110 unsigned long attrs);
111 dma_addr_t (*map_resource)(struct device *dev, phys_addr_t phys_addr,
112 size_t size, enum dma_data_direction dir,
113 unsigned long attrs);
114 void (*unmap_resource)(struct device *dev, dma_addr_t dma_handle,
115 size_t size, enum dma_data_direction dir,
116 unsigned long attrs);
117 void (*sync_single_for_cpu)(struct device *dev,
118 dma_addr_t dma_handle, size_t size,
119 enum dma_data_direction dir);
120 void (*sync_single_for_device)(struct device *dev,
121 dma_addr_t dma_handle, size_t size,
122 enum dma_data_direction dir);
123 void (*sync_sg_for_cpu)(struct device *dev,
124 struct scatterlist *sg, int nents,
125 enum dma_data_direction dir);
126 void (*sync_sg_for_device)(struct device *dev,
127 struct scatterlist *sg, int nents,
128 enum dma_data_direction dir);
129 void (*cache_sync)(struct device *dev, void *vaddr, size_t size,
130 enum dma_data_direction direction);
131 int (*dma_supported)(struct device *dev, u64 mask);
132 u64 (*get_required_mask)(struct device *dev);
133 size_t (*max_mapping_size)(struct device *dev);
134 unsigned long (*get_merge_boundary)(struct device *dev);
137 #define DMA_MAPPING_ERROR (~(dma_addr_t)0)
139 extern const struct dma_map_ops dma_virt_ops;
140 extern const struct dma_map_ops dma_dummy_ops;
142 #define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : ((1ULL<<(n))-1))
144 #define DMA_MASK_NONE 0x0ULL
146 static inline int valid_dma_direction(int dma_direction)
148 return ((dma_direction == DMA_BIDIRECTIONAL) ||
149 (dma_direction == DMA_TO_DEVICE) ||
150 (dma_direction == DMA_FROM_DEVICE));
153 #ifdef CONFIG_DMA_DECLARE_COHERENT
155 * These three functions are only for dma allocator.
156 * Don't use them in device drivers.
158 int dma_alloc_from_dev_coherent(struct device *dev, ssize_t size,
159 dma_addr_t *dma_handle, void **ret);
160 int dma_release_from_dev_coherent(struct device *dev, int order, void *vaddr);
162 int dma_mmap_from_dev_coherent(struct device *dev, struct vm_area_struct *vma,
163 void *cpu_addr, size_t size, int *ret);
165 void *dma_alloc_from_global_coherent(struct device *dev, ssize_t size, dma_addr_t *dma_handle);
166 int dma_release_from_global_coherent(int order, void *vaddr);
167 int dma_mmap_from_global_coherent(struct vm_area_struct *vma, void *cpu_addr,
168 size_t size, int *ret);
171 #define dma_alloc_from_dev_coherent(dev, size, handle, ret) (0)
172 #define dma_release_from_dev_coherent(dev, order, vaddr) (0)
173 #define dma_mmap_from_dev_coherent(dev, vma, vaddr, order, ret) (0)
175 static inline void *dma_alloc_from_global_coherent(struct device *dev, ssize_t size,
176 dma_addr_t *dma_handle)
181 static inline int dma_release_from_global_coherent(int order, void *vaddr)
186 static inline int dma_mmap_from_global_coherent(struct vm_area_struct *vma,
187 void *cpu_addr, size_t size,
192 #endif /* CONFIG_DMA_DECLARE_COHERENT */
194 static inline bool dma_is_direct(const struct dma_map_ops *ops)
200 * All the dma_direct_* declarations are here just for the indirect call bypass,
201 * and must not be used directly drivers!
203 dma_addr_t dma_direct_map_page(struct device *dev, struct page *page,
204 unsigned long offset, size_t size, enum dma_data_direction dir,
205 unsigned long attrs);
206 int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl, int nents,
207 enum dma_data_direction dir, unsigned long attrs);
208 dma_addr_t dma_direct_map_resource(struct device *dev, phys_addr_t paddr,
209 size_t size, enum dma_data_direction dir, unsigned long attrs);
211 #if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_DEVICE) || \
212 defined(CONFIG_SWIOTLB)
213 void dma_direct_sync_single_for_device(struct device *dev,
214 dma_addr_t addr, size_t size, enum dma_data_direction dir);
215 void dma_direct_sync_sg_for_device(struct device *dev,
216 struct scatterlist *sgl, int nents, enum dma_data_direction dir);
218 static inline void dma_direct_sync_single_for_device(struct device *dev,
219 dma_addr_t addr, size_t size, enum dma_data_direction dir)
222 static inline void dma_direct_sync_sg_for_device(struct device *dev,
223 struct scatterlist *sgl, int nents, enum dma_data_direction dir)
228 #if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU) || \
229 defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU_ALL) || \
230 defined(CONFIG_SWIOTLB)
231 void dma_direct_unmap_page(struct device *dev, dma_addr_t addr,
232 size_t size, enum dma_data_direction dir, unsigned long attrs);
233 void dma_direct_unmap_sg(struct device *dev, struct scatterlist *sgl,
234 int nents, enum dma_data_direction dir, unsigned long attrs);
235 void dma_direct_sync_single_for_cpu(struct device *dev,
236 dma_addr_t addr, size_t size, enum dma_data_direction dir);
237 void dma_direct_sync_sg_for_cpu(struct device *dev,
238 struct scatterlist *sgl, int nents, enum dma_data_direction dir);
240 static inline void dma_direct_unmap_page(struct device *dev, dma_addr_t addr,
241 size_t size, enum dma_data_direction dir, unsigned long attrs)
244 static inline void dma_direct_unmap_sg(struct device *dev,
245 struct scatterlist *sgl, int nents, enum dma_data_direction dir,
249 static inline void dma_direct_sync_single_for_cpu(struct device *dev,
250 dma_addr_t addr, size_t size, enum dma_data_direction dir)
253 static inline void dma_direct_sync_sg_for_cpu(struct device *dev,
254 struct scatterlist *sgl, int nents, enum dma_data_direction dir)
259 size_t dma_direct_max_mapping_size(struct device *dev);
261 #ifdef CONFIG_HAS_DMA
262 #include <asm/dma-mapping.h>
264 static inline const struct dma_map_ops *get_dma_ops(struct device *dev)
268 return get_arch_dma_ops(dev->bus);
271 static inline void set_dma_ops(struct device *dev,
272 const struct dma_map_ops *dma_ops)
274 dev->dma_ops = dma_ops;
277 static inline dma_addr_t dma_map_page_attrs(struct device *dev,
278 struct page *page, size_t offset, size_t size,
279 enum dma_data_direction dir, unsigned long attrs)
281 const struct dma_map_ops *ops = get_dma_ops(dev);
284 BUG_ON(!valid_dma_direction(dir));
285 if (dma_is_direct(ops))
286 addr = dma_direct_map_page(dev, page, offset, size, dir, attrs);
288 addr = ops->map_page(dev, page, offset, size, dir, attrs);
289 debug_dma_map_page(dev, page, offset, size, dir, addr);
294 static inline void dma_unmap_page_attrs(struct device *dev, dma_addr_t addr,
295 size_t size, enum dma_data_direction dir, unsigned long attrs)
297 const struct dma_map_ops *ops = get_dma_ops(dev);
299 BUG_ON(!valid_dma_direction(dir));
300 if (dma_is_direct(ops))
301 dma_direct_unmap_page(dev, addr, size, dir, attrs);
302 else if (ops->unmap_page)
303 ops->unmap_page(dev, addr, size, dir, attrs);
304 debug_dma_unmap_page(dev, addr, size, dir);
308 * dma_maps_sg_attrs returns 0 on error and > 0 on success.
309 * It should never return a value < 0.
311 static inline int dma_map_sg_attrs(struct device *dev, struct scatterlist *sg,
312 int nents, enum dma_data_direction dir,
315 const struct dma_map_ops *ops = get_dma_ops(dev);
318 BUG_ON(!valid_dma_direction(dir));
319 if (dma_is_direct(ops))
320 ents = dma_direct_map_sg(dev, sg, nents, dir, attrs);
322 ents = ops->map_sg(dev, sg, nents, dir, attrs);
324 debug_dma_map_sg(dev, sg, nents, ents, dir);
329 static inline void dma_unmap_sg_attrs(struct device *dev, struct scatterlist *sg,
330 int nents, enum dma_data_direction dir,
333 const struct dma_map_ops *ops = get_dma_ops(dev);
335 BUG_ON(!valid_dma_direction(dir));
336 debug_dma_unmap_sg(dev, sg, nents, dir);
337 if (dma_is_direct(ops))
338 dma_direct_unmap_sg(dev, sg, nents, dir, attrs);
339 else if (ops->unmap_sg)
340 ops->unmap_sg(dev, sg, nents, dir, attrs);
343 static inline dma_addr_t dma_map_resource(struct device *dev,
344 phys_addr_t phys_addr,
346 enum dma_data_direction dir,
349 const struct dma_map_ops *ops = get_dma_ops(dev);
350 dma_addr_t addr = DMA_MAPPING_ERROR;
352 BUG_ON(!valid_dma_direction(dir));
354 /* Don't allow RAM to be mapped */
355 if (WARN_ON_ONCE(pfn_valid(PHYS_PFN(phys_addr))))
356 return DMA_MAPPING_ERROR;
358 if (dma_is_direct(ops))
359 addr = dma_direct_map_resource(dev, phys_addr, size, dir, attrs);
360 else if (ops->map_resource)
361 addr = ops->map_resource(dev, phys_addr, size, dir, attrs);
363 debug_dma_map_resource(dev, phys_addr, size, dir, addr);
367 static inline void dma_unmap_resource(struct device *dev, dma_addr_t addr,
368 size_t size, enum dma_data_direction dir,
371 const struct dma_map_ops *ops = get_dma_ops(dev);
373 BUG_ON(!valid_dma_direction(dir));
374 if (!dma_is_direct(ops) && ops->unmap_resource)
375 ops->unmap_resource(dev, addr, size, dir, attrs);
376 debug_dma_unmap_resource(dev, addr, size, dir);
379 static inline void dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr,
381 enum dma_data_direction dir)
383 const struct dma_map_ops *ops = get_dma_ops(dev);
385 BUG_ON(!valid_dma_direction(dir));
386 if (dma_is_direct(ops))
387 dma_direct_sync_single_for_cpu(dev, addr, size, dir);
388 else if (ops->sync_single_for_cpu)
389 ops->sync_single_for_cpu(dev, addr, size, dir);
390 debug_dma_sync_single_for_cpu(dev, addr, size, dir);
393 static inline void dma_sync_single_for_device(struct device *dev,
394 dma_addr_t addr, size_t size,
395 enum dma_data_direction dir)
397 const struct dma_map_ops *ops = get_dma_ops(dev);
399 BUG_ON(!valid_dma_direction(dir));
400 if (dma_is_direct(ops))
401 dma_direct_sync_single_for_device(dev, addr, size, dir);
402 else if (ops->sync_single_for_device)
403 ops->sync_single_for_device(dev, addr, size, dir);
404 debug_dma_sync_single_for_device(dev, addr, size, dir);
408 dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
409 int nelems, enum dma_data_direction dir)
411 const struct dma_map_ops *ops = get_dma_ops(dev);
413 BUG_ON(!valid_dma_direction(dir));
414 if (dma_is_direct(ops))
415 dma_direct_sync_sg_for_cpu(dev, sg, nelems, dir);
416 else if (ops->sync_sg_for_cpu)
417 ops->sync_sg_for_cpu(dev, sg, nelems, dir);
418 debug_dma_sync_sg_for_cpu(dev, sg, nelems, dir);
422 dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
423 int nelems, enum dma_data_direction dir)
425 const struct dma_map_ops *ops = get_dma_ops(dev);
427 BUG_ON(!valid_dma_direction(dir));
428 if (dma_is_direct(ops))
429 dma_direct_sync_sg_for_device(dev, sg, nelems, dir);
430 else if (ops->sync_sg_for_device)
431 ops->sync_sg_for_device(dev, sg, nelems, dir);
432 debug_dma_sync_sg_for_device(dev, sg, nelems, dir);
436 static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
438 debug_dma_mapping_error(dev, dma_addr);
440 if (dma_addr == DMA_MAPPING_ERROR)
445 void *dma_alloc_attrs(struct device *dev, size_t size, dma_addr_t *dma_handle,
446 gfp_t flag, unsigned long attrs);
447 void dma_free_attrs(struct device *dev, size_t size, void *cpu_addr,
448 dma_addr_t dma_handle, unsigned long attrs);
449 void *dmam_alloc_attrs(struct device *dev, size_t size, dma_addr_t *dma_handle,
450 gfp_t gfp, unsigned long attrs);
451 void dmam_free_coherent(struct device *dev, size_t size, void *vaddr,
452 dma_addr_t dma_handle);
453 void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
454 enum dma_data_direction dir);
455 int dma_get_sgtable_attrs(struct device *dev, struct sg_table *sgt,
456 void *cpu_addr, dma_addr_t dma_addr, size_t size,
457 unsigned long attrs);
458 int dma_mmap_attrs(struct device *dev, struct vm_area_struct *vma,
459 void *cpu_addr, dma_addr_t dma_addr, size_t size,
460 unsigned long attrs);
461 bool dma_can_mmap(struct device *dev);
462 int dma_supported(struct device *dev, u64 mask);
463 int dma_set_mask(struct device *dev, u64 mask);
464 int dma_set_coherent_mask(struct device *dev, u64 mask);
465 u64 dma_get_required_mask(struct device *dev);
466 size_t dma_max_mapping_size(struct device *dev);
467 unsigned long dma_get_merge_boundary(struct device *dev);
468 #else /* CONFIG_HAS_DMA */
469 static inline dma_addr_t dma_map_page_attrs(struct device *dev,
470 struct page *page, size_t offset, size_t size,
471 enum dma_data_direction dir, unsigned long attrs)
473 return DMA_MAPPING_ERROR;
475 static inline void dma_unmap_page_attrs(struct device *dev, dma_addr_t addr,
476 size_t size, enum dma_data_direction dir, unsigned long attrs)
479 static inline int dma_map_sg_attrs(struct device *dev, struct scatterlist *sg,
480 int nents, enum dma_data_direction dir, unsigned long attrs)
484 static inline void dma_unmap_sg_attrs(struct device *dev,
485 struct scatterlist *sg, int nents, enum dma_data_direction dir,
489 static inline dma_addr_t dma_map_resource(struct device *dev,
490 phys_addr_t phys_addr, size_t size, enum dma_data_direction dir,
493 return DMA_MAPPING_ERROR;
495 static inline void dma_unmap_resource(struct device *dev, dma_addr_t addr,
496 size_t size, enum dma_data_direction dir, unsigned long attrs)
499 static inline void dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr,
500 size_t size, enum dma_data_direction dir)
503 static inline void dma_sync_single_for_device(struct device *dev,
504 dma_addr_t addr, size_t size, enum dma_data_direction dir)
507 static inline void dma_sync_sg_for_cpu(struct device *dev,
508 struct scatterlist *sg, int nelems, enum dma_data_direction dir)
511 static inline void dma_sync_sg_for_device(struct device *dev,
512 struct scatterlist *sg, int nelems, enum dma_data_direction dir)
515 static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
519 static inline void *dma_alloc_attrs(struct device *dev, size_t size,
520 dma_addr_t *dma_handle, gfp_t flag, unsigned long attrs)
524 static void dma_free_attrs(struct device *dev, size_t size, void *cpu_addr,
525 dma_addr_t dma_handle, unsigned long attrs)
528 static inline void *dmam_alloc_attrs(struct device *dev, size_t size,
529 dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs)
533 static inline void dmam_free_coherent(struct device *dev, size_t size,
534 void *vaddr, dma_addr_t dma_handle)
537 static inline void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
538 enum dma_data_direction dir)
541 static inline int dma_get_sgtable_attrs(struct device *dev,
542 struct sg_table *sgt, void *cpu_addr, dma_addr_t dma_addr,
543 size_t size, unsigned long attrs)
547 static inline int dma_mmap_attrs(struct device *dev, struct vm_area_struct *vma,
548 void *cpu_addr, dma_addr_t dma_addr, size_t size,
553 static inline bool dma_can_mmap(struct device *dev)
557 static inline int dma_supported(struct device *dev, u64 mask)
561 static inline int dma_set_mask(struct device *dev, u64 mask)
565 static inline int dma_set_coherent_mask(struct device *dev, u64 mask)
569 static inline u64 dma_get_required_mask(struct device *dev)
573 static inline size_t dma_max_mapping_size(struct device *dev)
577 static inline unsigned long dma_get_merge_boundary(struct device *dev)
581 #endif /* CONFIG_HAS_DMA */
583 static inline dma_addr_t dma_map_single_attrs(struct device *dev, void *ptr,
584 size_t size, enum dma_data_direction dir, unsigned long attrs)
586 /* DMA must never operate on areas that might be remapped. */
587 if (dev_WARN_ONCE(dev, is_vmalloc_addr(ptr),
588 "rejecting DMA map of vmalloc memory\n"))
589 return DMA_MAPPING_ERROR;
590 debug_dma_map_single(dev, ptr, size);
591 return dma_map_page_attrs(dev, virt_to_page(ptr), offset_in_page(ptr),
595 static inline void dma_unmap_single_attrs(struct device *dev, dma_addr_t addr,
596 size_t size, enum dma_data_direction dir, unsigned long attrs)
598 return dma_unmap_page_attrs(dev, addr, size, dir, attrs);
601 static inline void dma_sync_single_range_for_cpu(struct device *dev,
602 dma_addr_t addr, unsigned long offset, size_t size,
603 enum dma_data_direction dir)
605 return dma_sync_single_for_cpu(dev, addr + offset, size, dir);
608 static inline void dma_sync_single_range_for_device(struct device *dev,
609 dma_addr_t addr, unsigned long offset, size_t size,
610 enum dma_data_direction dir)
612 return dma_sync_single_for_device(dev, addr + offset, size, dir);
616 * dma_map_sgtable - Map the given buffer for DMA
617 * @dev: The device for which to perform the DMA operation
618 * @sgt: The sg_table object describing the buffer
619 * @dir: DMA direction
620 * @attrs: Optional DMA attributes for the map operation
622 * Maps a buffer described by a scatterlist stored in the given sg_table
623 * object for the @dir DMA operation by the @dev device. After success the
624 * ownership for the buffer is transferred to the DMA domain. One has to
625 * call dma_sync_sgtable_for_cpu() or dma_unmap_sgtable() to move the
626 * ownership of the buffer back to the CPU domain before touching the
629 * Returns 0 on success or -EINVAL on error during mapping the buffer.
631 static inline int dma_map_sgtable(struct device *dev, struct sg_table *sgt,
632 enum dma_data_direction dir, unsigned long attrs)
636 nents = dma_map_sg_attrs(dev, sgt->sgl, sgt->orig_nents, dir, attrs);
644 * dma_unmap_sgtable - Unmap the given buffer for DMA
645 * @dev: The device for which to perform the DMA operation
646 * @sgt: The sg_table object describing the buffer
647 * @dir: DMA direction
648 * @attrs: Optional DMA attributes for the unmap operation
650 * Unmaps a buffer described by a scatterlist stored in the given sg_table
651 * object for the @dir DMA operation by the @dev device. After this function
652 * the ownership of the buffer is transferred back to the CPU domain.
654 static inline void dma_unmap_sgtable(struct device *dev, struct sg_table *sgt,
655 enum dma_data_direction dir, unsigned long attrs)
657 dma_unmap_sg_attrs(dev, sgt->sgl, sgt->orig_nents, dir, attrs);
661 * dma_sync_sgtable_for_cpu - Synchronize the given buffer for CPU access
662 * @dev: The device for which to perform the DMA operation
663 * @sgt: The sg_table object describing the buffer
664 * @dir: DMA direction
666 * Performs the needed cache synchronization and moves the ownership of the
667 * buffer back to the CPU domain, so it is safe to perform any access to it
668 * by the CPU. Before doing any further DMA operations, one has to transfer
669 * the ownership of the buffer back to the DMA domain by calling the
670 * dma_sync_sgtable_for_device().
672 static inline void dma_sync_sgtable_for_cpu(struct device *dev,
673 struct sg_table *sgt, enum dma_data_direction dir)
675 dma_sync_sg_for_cpu(dev, sgt->sgl, sgt->orig_nents, dir);
679 * dma_sync_sgtable_for_device - Synchronize the given buffer for DMA
680 * @dev: The device for which to perform the DMA operation
681 * @sgt: The sg_table object describing the buffer
682 * @dir: DMA direction
684 * Performs the needed cache synchronization and moves the ownership of the
685 * buffer back to the DMA domain, so it is safe to perform the DMA operation.
686 * Once finished, one has to call dma_sync_sgtable_for_cpu() or
687 * dma_unmap_sgtable().
689 static inline void dma_sync_sgtable_for_device(struct device *dev,
690 struct sg_table *sgt, enum dma_data_direction dir)
692 dma_sync_sg_for_device(dev, sgt->sgl, sgt->orig_nents, dir);
695 #define dma_map_single(d, a, s, r) dma_map_single_attrs(d, a, s, r, 0)
696 #define dma_unmap_single(d, a, s, r) dma_unmap_single_attrs(d, a, s, r, 0)
697 #define dma_map_sg(d, s, n, r) dma_map_sg_attrs(d, s, n, r, 0)
698 #define dma_unmap_sg(d, s, n, r) dma_unmap_sg_attrs(d, s, n, r, 0)
699 #define dma_map_page(d, p, o, s, r) dma_map_page_attrs(d, p, o, s, r, 0)
700 #define dma_unmap_page(d, a, s, r) dma_unmap_page_attrs(d, a, s, r, 0)
701 #define dma_get_sgtable(d, t, v, h, s) dma_get_sgtable_attrs(d, t, v, h, s, 0)
702 #define dma_mmap_coherent(d, v, c, h, s) dma_mmap_attrs(d, v, c, h, s, 0)
704 extern int dma_common_mmap(struct device *dev, struct vm_area_struct *vma,
705 void *cpu_addr, dma_addr_t dma_addr, size_t size,
706 unsigned long attrs);
708 struct page **dma_common_find_pages(void *cpu_addr);
709 void *dma_common_contiguous_remap(struct page *page, size_t size,
710 pgprot_t prot, const void *caller);
712 void *dma_common_pages_remap(struct page **pages, size_t size,
713 pgprot_t prot, const void *caller);
714 void dma_common_free_remap(void *cpu_addr, size_t size);
716 bool dma_in_atomic_pool(void *start, size_t size);
717 void *dma_alloc_from_pool(size_t size, struct page **ret_page, gfp_t flags);
718 bool dma_free_from_pool(void *start, size_t size);
721 dma_common_get_sgtable(struct device *dev, struct sg_table *sgt, void *cpu_addr,
722 dma_addr_t dma_addr, size_t size, unsigned long attrs);
724 static inline void *dma_alloc_coherent(struct device *dev, size_t size,
725 dma_addr_t *dma_handle, gfp_t gfp)
728 return dma_alloc_attrs(dev, size, dma_handle, gfp,
729 (gfp & __GFP_NOWARN) ? DMA_ATTR_NO_WARN : 0);
732 static inline void dma_free_coherent(struct device *dev, size_t size,
733 void *cpu_addr, dma_addr_t dma_handle)
735 return dma_free_attrs(dev, size, cpu_addr, dma_handle, 0);
739 static inline u64 dma_get_mask(struct device *dev)
741 if (dev->dma_mask && *dev->dma_mask)
742 return *dev->dma_mask;
743 return DMA_BIT_MASK(32);
747 * Set both the DMA mask and the coherent DMA mask to the same thing.
748 * Note that we don't check the return value from dma_set_coherent_mask()
749 * as the DMA API guarantees that the coherent DMA mask can be set to
750 * the same or smaller than the streaming DMA mask.
752 static inline int dma_set_mask_and_coherent(struct device *dev, u64 mask)
754 int rc = dma_set_mask(dev, mask);
756 dma_set_coherent_mask(dev, mask);
761 * Similar to the above, except it deals with the case where the device
762 * does not have dev->dma_mask appropriately setup.
764 static inline int dma_coerce_mask_and_coherent(struct device *dev, u64 mask)
766 dev->dma_mask = &dev->coherent_dma_mask;
767 return dma_set_mask_and_coherent(dev, mask);
771 * dma_addressing_limited - return if the device is addressing limited
772 * @dev: device to check
774 * Return %true if the devices DMA mask is too small to address all memory in
775 * the system, else %false. Lack of addressing bits is the prime reason for
776 * bounce buffering, but might not be the only one.
778 static inline bool dma_addressing_limited(struct device *dev)
780 return min_not_zero(dma_get_mask(dev), dev->bus_dma_mask) <
781 dma_get_required_mask(dev);
784 #ifdef CONFIG_ARCH_HAS_SETUP_DMA_OPS
785 void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
786 const struct iommu_ops *iommu, bool coherent);
788 static inline void arch_setup_dma_ops(struct device *dev, u64 dma_base,
789 u64 size, const struct iommu_ops *iommu, bool coherent)
792 #endif /* CONFIG_ARCH_HAS_SETUP_DMA_OPS */
794 #ifdef CONFIG_ARCH_HAS_TEARDOWN_DMA_OPS
795 void arch_teardown_dma_ops(struct device *dev);
797 static inline void arch_teardown_dma_ops(struct device *dev)
800 #endif /* CONFIG_ARCH_HAS_TEARDOWN_DMA_OPS */
802 static inline unsigned int dma_get_max_seg_size(struct device *dev)
804 if (dev->dma_parms && dev->dma_parms->max_segment_size)
805 return dev->dma_parms->max_segment_size;
809 static inline int dma_set_max_seg_size(struct device *dev, unsigned int size)
811 if (dev->dma_parms) {
812 dev->dma_parms->max_segment_size = size;
818 static inline unsigned long dma_get_seg_boundary(struct device *dev)
820 if (dev->dma_parms && dev->dma_parms->segment_boundary_mask)
821 return dev->dma_parms->segment_boundary_mask;
822 return DMA_BIT_MASK(32);
825 static inline int dma_set_seg_boundary(struct device *dev, unsigned long mask)
827 if (dev->dma_parms) {
828 dev->dma_parms->segment_boundary_mask = mask;
834 static inline int dma_get_cache_alignment(void)
836 #ifdef ARCH_DMA_MINALIGN
837 return ARCH_DMA_MINALIGN;
842 #ifdef CONFIG_DMA_DECLARE_COHERENT
843 int dma_declare_coherent_memory(struct device *dev, phys_addr_t phys_addr,
844 dma_addr_t device_addr, size_t size);
847 dma_declare_coherent_memory(struct device *dev, phys_addr_t phys_addr,
848 dma_addr_t device_addr, size_t size)
852 #endif /* CONFIG_DMA_DECLARE_COHERENT */
854 static inline void *dmam_alloc_coherent(struct device *dev, size_t size,
855 dma_addr_t *dma_handle, gfp_t gfp)
857 return dmam_alloc_attrs(dev, size, dma_handle, gfp,
858 (gfp & __GFP_NOWARN) ? DMA_ATTR_NO_WARN : 0);
861 static inline void *dma_alloc_wc(struct device *dev, size_t size,
862 dma_addr_t *dma_addr, gfp_t gfp)
864 unsigned long attrs = DMA_ATTR_WRITE_COMBINE;
866 if (gfp & __GFP_NOWARN)
867 attrs |= DMA_ATTR_NO_WARN;
869 return dma_alloc_attrs(dev, size, dma_addr, gfp, attrs);
872 static inline void dma_free_wc(struct device *dev, size_t size,
873 void *cpu_addr, dma_addr_t dma_addr)
875 return dma_free_attrs(dev, size, cpu_addr, dma_addr,
876 DMA_ATTR_WRITE_COMBINE);
879 static inline int dma_mmap_wc(struct device *dev,
880 struct vm_area_struct *vma,
881 void *cpu_addr, dma_addr_t dma_addr,
884 return dma_mmap_attrs(dev, vma, cpu_addr, dma_addr, size,
885 DMA_ATTR_WRITE_COMBINE);
888 #ifdef CONFIG_NEED_DMA_MAP_STATE
889 #define DEFINE_DMA_UNMAP_ADDR(ADDR_NAME) dma_addr_t ADDR_NAME
890 #define DEFINE_DMA_UNMAP_LEN(LEN_NAME) __u32 LEN_NAME
891 #define dma_unmap_addr(PTR, ADDR_NAME) ((PTR)->ADDR_NAME)
892 #define dma_unmap_addr_set(PTR, ADDR_NAME, VAL) (((PTR)->ADDR_NAME) = (VAL))
893 #define dma_unmap_len(PTR, LEN_NAME) ((PTR)->LEN_NAME)
894 #define dma_unmap_len_set(PTR, LEN_NAME, VAL) (((PTR)->LEN_NAME) = (VAL))
896 #define DEFINE_DMA_UNMAP_ADDR(ADDR_NAME)
897 #define DEFINE_DMA_UNMAP_LEN(LEN_NAME)
898 #define dma_unmap_addr(PTR, ADDR_NAME) (0)
899 #define dma_unmap_addr_set(PTR, ADDR_NAME, VAL) do { } while (0)
900 #define dma_unmap_len(PTR, LEN_NAME) (0)
901 #define dma_unmap_len_set(PTR, LEN_NAME, VAL) do { } while (0)