1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2015-2018 Etnaviv Project
6 #include <linux/dma-mapping.h>
7 #include <linux/scatterlist.h>
9 #include "common.xml.h"
10 #include "etnaviv_cmdbuf.h"
11 #include "etnaviv_drv.h"
12 #include "etnaviv_gem.h"
13 #include "etnaviv_gpu.h"
14 #include "etnaviv_mmu.h"
16 static void etnaviv_context_unmap(struct etnaviv_iommu_context *context,
17 unsigned long iova, size_t size)
19 size_t unmapped_page, unmapped = 0;
20 size_t pgsize = SZ_4K;
22 if (!IS_ALIGNED(iova | size, pgsize)) {
23 pr_err("unaligned: iova 0x%lx size 0x%zx min_pagesz 0x%zx\n",
28 while (unmapped < size) {
29 unmapped_page = context->global->ops->unmap(context, iova,
34 iova += unmapped_page;
35 unmapped += unmapped_page;
39 static int etnaviv_context_map(struct etnaviv_iommu_context *context,
40 unsigned long iova, phys_addr_t paddr,
41 size_t size, int prot)
43 unsigned long orig_iova = iova;
44 size_t pgsize = SZ_4K;
45 size_t orig_size = size;
48 if (!IS_ALIGNED(iova | paddr | size, pgsize)) {
49 pr_err("unaligned: iova 0x%lx pa %pa size 0x%zx min_pagesz 0x%zx\n",
50 iova, &paddr, size, pgsize);
55 ret = context->global->ops->map(context, iova, paddr, pgsize,
65 /* unroll mapping in case something went wrong */
67 etnaviv_context_unmap(context, orig_iova, orig_size - size);
72 static int etnaviv_iommu_map(struct etnaviv_iommu_context *context, u32 iova,
73 struct sg_table *sgt, unsigned len, int prot)
74 { struct scatterlist *sg;
75 unsigned int da = iova;
82 for_each_sgtable_dma_sg(sgt, sg, i) {
83 u32 pa = sg_dma_address(sg) - sg->offset;
84 size_t bytes = sg_dma_len(sg) + sg->offset;
86 VERB("map[%d]: %08x %08x(%zx)", i, iova, pa, bytes);
88 ret = etnaviv_context_map(context, da, pa, bytes, prot);
98 etnaviv_context_unmap(context, iova, da - iova);
102 static void etnaviv_iommu_unmap(struct etnaviv_iommu_context *context, u32 iova,
103 struct sg_table *sgt, unsigned len)
105 struct scatterlist *sg;
106 unsigned int da = iova;
109 for_each_sgtable_dma_sg(sgt, sg, i) {
110 size_t bytes = sg_dma_len(sg) + sg->offset;
112 etnaviv_context_unmap(context, da, bytes);
114 VERB("unmap[%d]: %08x(%zx)", i, iova, bytes);
116 BUG_ON(!PAGE_ALIGNED(bytes));
122 static void etnaviv_iommu_remove_mapping(struct etnaviv_iommu_context *context,
123 struct etnaviv_vram_mapping *mapping)
125 struct etnaviv_gem_object *etnaviv_obj = mapping->object;
127 etnaviv_iommu_unmap(context, mapping->vram_node.start,
128 etnaviv_obj->sgt, etnaviv_obj->base.size);
129 drm_mm_remove_node(&mapping->vram_node);
132 static int etnaviv_iommu_find_iova(struct etnaviv_iommu_context *context,
133 struct drm_mm_node *node, size_t size)
135 struct etnaviv_vram_mapping *free = NULL;
136 enum drm_mm_insert_mode mode = DRM_MM_INSERT_LOW;
139 lockdep_assert_held(&context->lock);
142 struct etnaviv_vram_mapping *m, *n;
143 struct drm_mm_scan scan;
144 struct list_head list;
147 ret = drm_mm_insert_node_in_range(&context->mm, node,
148 size, 0, 0, 0, U64_MAX, mode);
152 /* Try to retire some entries */
153 drm_mm_scan_init(&scan, &context->mm, size, 0, 0, mode);
156 INIT_LIST_HEAD(&list);
157 list_for_each_entry(free, &context->mappings, mmu_node) {
158 /* If this vram node has not been used, skip this. */
159 if (!free->vram_node.mm)
163 * If the iova is pinned, then it's in-use,
164 * so we must keep its mapping.
169 list_add(&free->scan_node, &list);
170 if (drm_mm_scan_add_block(&scan, &free->vram_node)) {
177 /* Nothing found, clean up and fail */
178 list_for_each_entry_safe(m, n, &list, scan_node)
179 BUG_ON(drm_mm_scan_remove_block(&scan, &m->vram_node));
184 * drm_mm does not allow any other operations while
185 * scanning, so we have to remove all blocks first.
186 * If drm_mm_scan_remove_block() returns false, we
187 * can leave the block pinned.
189 list_for_each_entry_safe(m, n, &list, scan_node)
190 if (!drm_mm_scan_remove_block(&scan, &m->vram_node))
191 list_del_init(&m->scan_node);
194 * Unmap the blocks which need to be reaped from the MMU.
195 * Clear the mmu pointer to prevent the mapping_get finding
198 list_for_each_entry_safe(m, n, &list, scan_node) {
199 etnaviv_iommu_remove_mapping(context, m);
200 etnaviv_iommu_context_put(m->context);
202 list_del_init(&m->mmu_node);
203 list_del_init(&m->scan_node);
206 mode = DRM_MM_INSERT_EVICT;
209 * We removed enough mappings so that the new allocation will
210 * succeed, retry the allocation one more time.
217 static int etnaviv_iommu_insert_exact(struct etnaviv_iommu_context *context,
218 struct drm_mm_node *node, size_t size, u64 va)
220 return drm_mm_insert_node_in_range(&context->mm, node, size, 0, 0, va,
221 va + size, DRM_MM_INSERT_LOWEST);
224 int etnaviv_iommu_map_gem(struct etnaviv_iommu_context *context,
225 struct etnaviv_gem_object *etnaviv_obj, u32 memory_base,
226 struct etnaviv_vram_mapping *mapping, u64 va)
228 struct sg_table *sgt = etnaviv_obj->sgt;
229 struct drm_mm_node *node;
232 lockdep_assert_held(&etnaviv_obj->lock);
234 mutex_lock(&context->lock);
236 /* v1 MMU can optimize single entry (contiguous) scatterlists */
237 if (context->global->version == ETNAVIV_IOMMU_V1 &&
238 sgt->nents == 1 && !(etnaviv_obj->flags & ETNA_BO_FORCE_MMU)) {
241 iova = sg_dma_address(sgt->sgl) - memory_base;
242 if (iova < 0x80000000 - sg_dma_len(sgt->sgl)) {
243 mapping->iova = iova;
244 list_add_tail(&mapping->mmu_node, &context->mappings);
250 node = &mapping->vram_node;
253 ret = etnaviv_iommu_insert_exact(context, node,
254 etnaviv_obj->base.size, va);
256 ret = etnaviv_iommu_find_iova(context, node,
257 etnaviv_obj->base.size);
261 mapping->iova = node->start;
262 ret = etnaviv_iommu_map(context, node->start, sgt, etnaviv_obj->base.size,
263 ETNAVIV_PROT_READ | ETNAVIV_PROT_WRITE);
266 drm_mm_remove_node(node);
270 list_add_tail(&mapping->mmu_node, &context->mappings);
271 context->flush_seq++;
273 mutex_unlock(&context->lock);
278 void etnaviv_iommu_unmap_gem(struct etnaviv_iommu_context *context,
279 struct etnaviv_vram_mapping *mapping)
281 WARN_ON(mapping->use);
283 mutex_lock(&context->lock);
285 /* Bail if the mapping has been reaped by another thread */
286 if (!mapping->context) {
287 mutex_unlock(&context->lock);
291 /* If the vram node is on the mm, unmap and remove the node */
292 if (mapping->vram_node.mm == &context->mm)
293 etnaviv_iommu_remove_mapping(context, mapping);
295 list_del(&mapping->mmu_node);
296 context->flush_seq++;
297 mutex_unlock(&context->lock);
300 static void etnaviv_iommu_context_free(struct kref *kref)
302 struct etnaviv_iommu_context *context =
303 container_of(kref, struct etnaviv_iommu_context, refcount);
305 etnaviv_cmdbuf_suballoc_unmap(context, &context->cmdbuf_mapping);
307 context->global->ops->free(context);
309 void etnaviv_iommu_context_put(struct etnaviv_iommu_context *context)
311 kref_put(&context->refcount, etnaviv_iommu_context_free);
314 struct etnaviv_iommu_context *
315 etnaviv_iommu_context_init(struct etnaviv_iommu_global *global,
316 struct etnaviv_cmdbuf_suballoc *suballoc)
318 struct etnaviv_iommu_context *ctx;
321 if (global->version == ETNAVIV_IOMMU_V1)
322 ctx = etnaviv_iommuv1_context_alloc(global);
324 ctx = etnaviv_iommuv2_context_alloc(global);
329 ret = etnaviv_cmdbuf_suballoc_map(suballoc, ctx, &ctx->cmdbuf_mapping,
330 global->memory_base);
334 if (global->version == ETNAVIV_IOMMU_V1 &&
335 ctx->cmdbuf_mapping.iova > 0x80000000) {
337 "command buffer outside valid memory window\n");
344 etnaviv_cmdbuf_suballoc_unmap(ctx, &ctx->cmdbuf_mapping);
346 global->ops->free(ctx);
350 void etnaviv_iommu_restore(struct etnaviv_gpu *gpu,
351 struct etnaviv_iommu_context *context)
353 context->global->ops->restore(gpu, context);
356 int etnaviv_iommu_get_suballoc_va(struct etnaviv_iommu_context *context,
357 struct etnaviv_vram_mapping *mapping,
358 u32 memory_base, dma_addr_t paddr,
361 mutex_lock(&context->lock);
363 if (mapping->use > 0) {
365 mutex_unlock(&context->lock);
370 * For MMUv1 we don't add the suballoc region to the pagetables, as
371 * those GPUs can only work with cmdbufs accessed through the linear
372 * window. Instead we manufacture a mapping to make it look uniform
373 * to the upper layers.
375 if (context->global->version == ETNAVIV_IOMMU_V1) {
376 mapping->iova = paddr - memory_base;
378 struct drm_mm_node *node = &mapping->vram_node;
381 ret = etnaviv_iommu_find_iova(context, node, size);
383 mutex_unlock(&context->lock);
387 mapping->iova = node->start;
388 ret = etnaviv_context_map(context, node->start, paddr, size,
391 drm_mm_remove_node(node);
392 mutex_unlock(&context->lock);
396 context->flush_seq++;
399 list_add_tail(&mapping->mmu_node, &context->mappings);
402 mutex_unlock(&context->lock);
407 void etnaviv_iommu_put_suballoc_va(struct etnaviv_iommu_context *context,
408 struct etnaviv_vram_mapping *mapping)
410 struct drm_mm_node *node = &mapping->vram_node;
412 mutex_lock(&context->lock);
415 if (mapping->use > 0 || context->global->version == ETNAVIV_IOMMU_V1) {
416 mutex_unlock(&context->lock);
420 etnaviv_context_unmap(context, node->start, node->size);
421 drm_mm_remove_node(node);
422 mutex_unlock(&context->lock);
425 size_t etnaviv_iommu_dump_size(struct etnaviv_iommu_context *context)
427 return context->global->ops->dump_size(context);
430 void etnaviv_iommu_dump(struct etnaviv_iommu_context *context, void *buf)
432 context->global->ops->dump(context, buf);
435 int etnaviv_iommu_global_init(struct etnaviv_gpu *gpu)
437 enum etnaviv_iommu_version version = ETNAVIV_IOMMU_V1;
438 struct etnaviv_drm_private *priv = gpu->drm->dev_private;
439 struct etnaviv_iommu_global *global;
440 struct device *dev = gpu->drm->dev;
442 if (gpu->identity.minor_features1 & chipMinorFeatures1_MMU_VERSION)
443 version = ETNAVIV_IOMMU_V2;
445 if (priv->mmu_global) {
446 if (priv->mmu_global->version != version) {
448 "MMU version doesn't match global version\n");
452 priv->mmu_global->use++;
456 global = kzalloc(sizeof(*global), GFP_KERNEL);
460 global->bad_page_cpu = dma_alloc_wc(dev, SZ_4K, &global->bad_page_dma,
462 if (!global->bad_page_cpu)
465 memset32(global->bad_page_cpu, 0xdead55aa, SZ_4K / sizeof(u32));
467 if (version == ETNAVIV_IOMMU_V2) {
468 global->v2.pta_cpu = dma_alloc_wc(dev, ETNAVIV_PTA_SIZE,
469 &global->v2.pta_dma, GFP_KERNEL);
470 if (!global->v2.pta_cpu)
475 global->version = version;
477 mutex_init(&global->lock);
479 if (version == ETNAVIV_IOMMU_V1)
480 global->ops = &etnaviv_iommuv1_ops;
482 global->ops = &etnaviv_iommuv2_ops;
484 priv->mmu_global = global;
489 dma_free_wc(dev, SZ_4K, global->bad_page_cpu, global->bad_page_dma);
496 void etnaviv_iommu_global_fini(struct etnaviv_gpu *gpu)
498 struct etnaviv_drm_private *priv = gpu->drm->dev_private;
499 struct etnaviv_iommu_global *global = priv->mmu_global;
501 if (--global->use > 0)
504 if (global->v2.pta_cpu)
505 dma_free_wc(global->dev, ETNAVIV_PTA_SIZE,
506 global->v2.pta_cpu, global->v2.pta_dma);
508 if (global->bad_page_cpu)
509 dma_free_wc(global->dev, SZ_4K,
510 global->bad_page_cpu, global->bad_page_dma);
512 mutex_destroy(&global->lock);
515 priv->mmu_global = NULL;