1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2018 Christoph Hellwig.
5 * DMA operations that map physical memory directly without using an IOMMU.
7 #include <linux/memblock.h> /* for max_pfn */
8 #include <linux/export.h>
10 #include <linux/dma-direct.h>
11 #include <linux/scatterlist.h>
12 #include <linux/dma-contiguous.h>
13 #include <linux/dma-noncoherent.h>
14 #include <linux/pfn.h>
15 #include <linux/set_memory.h>
16 #include <linux/swiotlb.h>
19 * Most architectures use ZONE_DMA for the first 16 Megabytes, but
20 * some use it for entirely different regions:
22 #ifndef ARCH_ZONE_DMA_BITS
23 #define ARCH_ZONE_DMA_BITS 24
26 static void report_addr(struct device *dev, dma_addr_t dma_addr, size_t size)
29 dev_err_once(dev, "DMA map on device without dma_mask\n");
30 } else if (*dev->dma_mask >= DMA_BIT_MASK(32) || dev->bus_dma_mask) {
32 "overflow %pad+%zu of DMA mask %llx bus mask %llx\n",
33 &dma_addr, size, *dev->dma_mask, dev->bus_dma_mask);
38 static inline dma_addr_t phys_to_dma_direct(struct device *dev,
41 if (force_dma_unencrypted(dev))
42 return __phys_to_dma(dev, phys);
43 return phys_to_dma(dev, phys);
46 u64 dma_direct_get_required_mask(struct device *dev)
48 phys_addr_t phys = (phys_addr_t)(max_pfn - 1) << PAGE_SHIFT;
49 u64 max_dma = phys_to_dma_direct(dev, phys);
51 return (1ULL << (fls64(max_dma) - 1)) * 2 - 1;
54 static gfp_t __dma_direct_optimal_gfp_mask(struct device *dev, u64 dma_mask,
57 if (dev->bus_dma_mask && dev->bus_dma_mask < dma_mask)
58 dma_mask = dev->bus_dma_mask;
60 if (force_dma_unencrypted(dev))
61 *phys_mask = __dma_to_phys(dev, dma_mask);
63 *phys_mask = dma_to_phys(dev, dma_mask);
66 * Optimistically try the zone that the physical address mask falls
67 * into first. If that returns memory that isn't actually addressable
68 * we will fallback to the next lower zone and try again.
70 * Note that GFP_DMA32 and GFP_DMA are no ops without the corresponding
73 if (*phys_mask <= DMA_BIT_MASK(ARCH_ZONE_DMA_BITS))
75 if (*phys_mask <= DMA_BIT_MASK(32))
80 static bool dma_coherent_ok(struct device *dev, phys_addr_t phys, size_t size)
82 return phys_to_dma_direct(dev, phys) + size - 1 <=
83 min_not_zero(dev->coherent_dma_mask, dev->bus_dma_mask);
86 struct page *__dma_direct_alloc_pages(struct device *dev, size_t size,
87 dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs)
89 size_t alloc_size = PAGE_ALIGN(size);
90 int node = dev_to_node(dev);
91 struct page *page = NULL;
94 if (attrs & DMA_ATTR_NO_WARN)
97 /* we always manually zero the memory once we are done: */
99 gfp |= __dma_direct_optimal_gfp_mask(dev, dev->coherent_dma_mask,
101 page = dma_alloc_contiguous(dev, alloc_size, gfp);
102 if (page && !dma_coherent_ok(dev, page_to_phys(page), size)) {
103 dma_free_contiguous(dev, page, alloc_size);
108 page = alloc_pages_node(node, gfp, get_order(alloc_size));
109 if (page && !dma_coherent_ok(dev, page_to_phys(page), size)) {
110 dma_free_contiguous(dev, page, size);
113 if (IS_ENABLED(CONFIG_ZONE_DMA32) &&
114 phys_mask < DMA_BIT_MASK(64) &&
115 !(gfp & (GFP_DMA32 | GFP_DMA))) {
120 if (IS_ENABLED(CONFIG_ZONE_DMA) && !(gfp & GFP_DMA)) {
121 gfp = (gfp & ~GFP_DMA32) | GFP_DMA;
129 void *dma_direct_alloc_pages(struct device *dev, size_t size,
130 dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs)
135 page = __dma_direct_alloc_pages(dev, size, dma_handle, gfp, attrs);
139 if ((attrs & DMA_ATTR_NO_KERNEL_MAPPING) &&
140 !force_dma_unencrypted(dev)) {
141 /* remove any dirty cache lines on the kernel alias */
142 if (!PageHighMem(page))
143 arch_dma_prep_coherent(page, size);
144 *dma_handle = phys_to_dma(dev, page_to_phys(page));
145 /* return the page pointer as the opaque cookie */
149 if (PageHighMem(page)) {
151 * Depending on the cma= arguments and per-arch setup
152 * dma_alloc_contiguous could return highmem pages.
153 * Without remapping there is no way to return them here,
154 * so log an error and fail.
156 dev_info(dev, "Rejecting highmem page from CMA.\n");
157 __dma_direct_free_pages(dev, size, page);
161 ret = page_address(page);
162 if (force_dma_unencrypted(dev)) {
163 set_memory_decrypted((unsigned long)ret, 1 << get_order(size));
164 *dma_handle = __phys_to_dma(dev, page_to_phys(page));
166 *dma_handle = phys_to_dma(dev, page_to_phys(page));
168 memset(ret, 0, size);
170 if (IS_ENABLED(CONFIG_ARCH_HAS_UNCACHED_SEGMENT) &&
171 dma_alloc_need_uncached(dev, attrs)) {
172 arch_dma_prep_coherent(page, size);
173 ret = uncached_kernel_address(ret);
179 void __dma_direct_free_pages(struct device *dev, size_t size, struct page *page)
181 dma_free_contiguous(dev, page, size);
184 void dma_direct_free_pages(struct device *dev, size_t size, void *cpu_addr,
185 dma_addr_t dma_addr, unsigned long attrs)
187 unsigned int page_order = get_order(size);
189 if ((attrs & DMA_ATTR_NO_KERNEL_MAPPING) &&
190 !force_dma_unencrypted(dev)) {
191 /* cpu_addr is a struct page cookie, not a kernel address */
192 __dma_direct_free_pages(dev, size, cpu_addr);
196 if (force_dma_unencrypted(dev))
197 set_memory_encrypted((unsigned long)cpu_addr, 1 << page_order);
199 if (IS_ENABLED(CONFIG_ARCH_HAS_UNCACHED_SEGMENT) &&
200 dma_alloc_need_uncached(dev, attrs))
201 cpu_addr = cached_kernel_address(cpu_addr);
202 __dma_direct_free_pages(dev, size, virt_to_page(cpu_addr));
205 void *dma_direct_alloc(struct device *dev, size_t size,
206 dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs)
208 if (!IS_ENABLED(CONFIG_ARCH_HAS_UNCACHED_SEGMENT) &&
209 dma_alloc_need_uncached(dev, attrs))
210 return arch_dma_alloc(dev, size, dma_handle, gfp, attrs);
211 return dma_direct_alloc_pages(dev, size, dma_handle, gfp, attrs);
214 void dma_direct_free(struct device *dev, size_t size,
215 void *cpu_addr, dma_addr_t dma_addr, unsigned long attrs)
217 if (!IS_ENABLED(CONFIG_ARCH_HAS_UNCACHED_SEGMENT) &&
218 dma_alloc_need_uncached(dev, attrs))
219 arch_dma_free(dev, size, cpu_addr, dma_addr, attrs);
221 dma_direct_free_pages(dev, size, cpu_addr, dma_addr, attrs);
224 #if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_DEVICE) || \
225 defined(CONFIG_SWIOTLB)
226 void dma_direct_sync_single_for_device(struct device *dev,
227 dma_addr_t addr, size_t size, enum dma_data_direction dir)
229 phys_addr_t paddr = dma_to_phys(dev, addr);
231 if (unlikely(is_swiotlb_buffer(paddr)))
232 swiotlb_tbl_sync_single(dev, paddr, size, dir, SYNC_FOR_DEVICE);
234 if (!dev_is_dma_coherent(dev))
235 arch_sync_dma_for_device(paddr, size, dir);
237 EXPORT_SYMBOL(dma_direct_sync_single_for_device);
239 void dma_direct_sync_sg_for_device(struct device *dev,
240 struct scatterlist *sgl, int nents, enum dma_data_direction dir)
242 struct scatterlist *sg;
245 for_each_sg(sgl, sg, nents, i) {
246 phys_addr_t paddr = dma_to_phys(dev, sg_dma_address(sg));
248 if (unlikely(is_swiotlb_buffer(paddr)))
249 swiotlb_tbl_sync_single(dev, paddr, sg->length,
250 dir, SYNC_FOR_DEVICE);
252 if (!dev_is_dma_coherent(dev))
253 arch_sync_dma_for_device(paddr, sg->length,
257 EXPORT_SYMBOL(dma_direct_sync_sg_for_device);
260 #if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU) || \
261 defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU_ALL) || \
262 defined(CONFIG_SWIOTLB)
263 void dma_direct_sync_single_for_cpu(struct device *dev,
264 dma_addr_t addr, size_t size, enum dma_data_direction dir)
266 phys_addr_t paddr = dma_to_phys(dev, addr);
268 if (!dev_is_dma_coherent(dev)) {
269 arch_sync_dma_for_cpu(paddr, size, dir);
270 arch_sync_dma_for_cpu_all();
273 if (unlikely(is_swiotlb_buffer(paddr)))
274 swiotlb_tbl_sync_single(dev, paddr, size, dir, SYNC_FOR_CPU);
276 EXPORT_SYMBOL(dma_direct_sync_single_for_cpu);
278 void dma_direct_sync_sg_for_cpu(struct device *dev,
279 struct scatterlist *sgl, int nents, enum dma_data_direction dir)
281 struct scatterlist *sg;
284 for_each_sg(sgl, sg, nents, i) {
285 phys_addr_t paddr = dma_to_phys(dev, sg_dma_address(sg));
287 if (!dev_is_dma_coherent(dev))
288 arch_sync_dma_for_cpu(paddr, sg->length, dir);
290 if (unlikely(is_swiotlb_buffer(paddr)))
291 swiotlb_tbl_sync_single(dev, paddr, sg->length, dir,
295 if (!dev_is_dma_coherent(dev))
296 arch_sync_dma_for_cpu_all();
298 EXPORT_SYMBOL(dma_direct_sync_sg_for_cpu);
300 void dma_direct_unmap_page(struct device *dev, dma_addr_t addr,
301 size_t size, enum dma_data_direction dir, unsigned long attrs)
303 phys_addr_t phys = dma_to_phys(dev, addr);
305 if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
306 dma_direct_sync_single_for_cpu(dev, addr, size, dir);
308 if (unlikely(is_swiotlb_buffer(phys)))
309 swiotlb_tbl_unmap_single(dev, phys, size, size, dir,
310 attrs | DMA_ATTR_SKIP_CPU_SYNC);
312 EXPORT_SYMBOL(dma_direct_unmap_page);
314 void dma_direct_unmap_sg(struct device *dev, struct scatterlist *sgl,
315 int nents, enum dma_data_direction dir, unsigned long attrs)
317 struct scatterlist *sg;
320 for_each_sg(sgl, sg, nents, i)
321 dma_direct_unmap_page(dev, sg->dma_address, sg_dma_len(sg), dir,
324 EXPORT_SYMBOL(dma_direct_unmap_sg);
327 static inline bool dma_direct_possible(struct device *dev, dma_addr_t dma_addr,
330 return swiotlb_force != SWIOTLB_FORCE &&
331 dma_capable(dev, dma_addr, size);
334 dma_addr_t dma_direct_map_page(struct device *dev, struct page *page,
335 unsigned long offset, size_t size, enum dma_data_direction dir,
338 phys_addr_t phys = page_to_phys(page) + offset;
339 dma_addr_t dma_addr = phys_to_dma(dev, phys);
341 if (unlikely(!dma_direct_possible(dev, dma_addr, size)) &&
342 !swiotlb_map(dev, &phys, &dma_addr, size, dir, attrs)) {
343 report_addr(dev, dma_addr, size);
344 return DMA_MAPPING_ERROR;
347 if (!dev_is_dma_coherent(dev) && !(attrs & DMA_ATTR_SKIP_CPU_SYNC))
348 arch_sync_dma_for_device(phys, size, dir);
351 EXPORT_SYMBOL(dma_direct_map_page);
353 int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl, int nents,
354 enum dma_data_direction dir, unsigned long attrs)
357 struct scatterlist *sg;
359 for_each_sg(sgl, sg, nents, i) {
360 sg->dma_address = dma_direct_map_page(dev, sg_page(sg),
361 sg->offset, sg->length, dir, attrs);
362 if (sg->dma_address == DMA_MAPPING_ERROR)
364 sg_dma_len(sg) = sg->length;
370 dma_direct_unmap_sg(dev, sgl, i, dir, attrs | DMA_ATTR_SKIP_CPU_SYNC);
373 EXPORT_SYMBOL(dma_direct_map_sg);
375 dma_addr_t dma_direct_map_resource(struct device *dev, phys_addr_t paddr,
376 size_t size, enum dma_data_direction dir, unsigned long attrs)
378 dma_addr_t dma_addr = paddr;
380 if (unlikely(!dma_capable(dev, dma_addr, size))) {
381 report_addr(dev, dma_addr, size);
382 return DMA_MAPPING_ERROR;
387 EXPORT_SYMBOL(dma_direct_map_resource);
390 * Because 32-bit DMA masks are so common we expect every architecture to be
391 * able to satisfy them - either by not supporting more physical memory, or by
392 * providing a ZONE_DMA32. If neither is the case, the architecture needs to
393 * use an IOMMU instead of the direct mapping.
395 int dma_direct_supported(struct device *dev, u64 mask)
399 if (IS_ENABLED(CONFIG_ZONE_DMA))
400 min_mask = DMA_BIT_MASK(ARCH_ZONE_DMA_BITS);
402 min_mask = DMA_BIT_MASK(32);
404 min_mask = min_t(u64, min_mask, (max_pfn - 1) << PAGE_SHIFT);
407 * This check needs to be against the actual bit mask value, so
408 * use __phys_to_dma() here so that the SME encryption mask isn't
411 return mask >= __phys_to_dma(dev, min_mask);
414 size_t dma_direct_max_mapping_size(struct device *dev)
416 /* If SWIOTLB is active, use its maximum mapping size */
417 if (is_swiotlb_active() &&
418 (dma_addressing_limited(dev) || swiotlb_force == SWIOTLB_FORCE))
419 return swiotlb_max_mapping_size(dev);