1 // SPDX-License-Identifier: GPL-2.0-only
5 * Copyright (c) 2010-2015, NVIDIA Corporation.
8 #include <linux/dma-mapping.h>
10 #include <linux/host1x.h>
11 #include <linux/iommu.h>
12 #include <linux/kref.h>
13 #include <linux/module.h>
14 #include <linux/scatterlist.h>
15 #include <linux/slab.h>
16 #include <linux/vmalloc.h>
17 #include <trace/events/host1x.h>
24 #define HOST1X_WAIT_SYNCPT_OFFSET 0x8
26 struct host1x_job *host1x_job_alloc(struct host1x_channel *ch,
27 u32 num_cmdbufs, u32 num_relocs,
30 struct host1x_job *job = NULL;
31 unsigned int num_unpins = num_relocs;
36 enable_firewall = IS_ENABLED(CONFIG_TEGRA_HOST1X_FIREWALL) && !skip_firewall;
39 num_unpins += num_cmdbufs;
41 /* Check that we're not going to overflow */
42 total = sizeof(struct host1x_job) +
43 (u64)num_relocs * sizeof(struct host1x_reloc) +
44 (u64)num_unpins * sizeof(struct host1x_job_unpin_data) +
45 (u64)num_cmdbufs * sizeof(struct host1x_job_cmd) +
46 (u64)num_unpins * sizeof(dma_addr_t) +
47 (u64)num_unpins * sizeof(u32 *);
48 if (total > ULONG_MAX)
51 mem = job = kzalloc(total, GFP_KERNEL);
55 job->enable_firewall = enable_firewall;
60 /* Redistribute memory to the structs */
61 mem += sizeof(struct host1x_job);
62 job->relocs = num_relocs ? mem : NULL;
63 mem += num_relocs * sizeof(struct host1x_reloc);
64 job->unpins = num_unpins ? mem : NULL;
65 mem += num_unpins * sizeof(struct host1x_job_unpin_data);
66 job->cmds = num_cmdbufs ? mem : NULL;
67 mem += num_cmdbufs * sizeof(struct host1x_job_cmd);
68 job->addr_phys = num_unpins ? mem : NULL;
70 job->reloc_addr_phys = job->addr_phys;
71 job->gather_addr_phys = &job->addr_phys[num_relocs];
75 EXPORT_SYMBOL(host1x_job_alloc);
77 struct host1x_job *host1x_job_get(struct host1x_job *job)
82 EXPORT_SYMBOL(host1x_job_get);
84 static void job_free(struct kref *ref)
86 struct host1x_job *job = container_of(ref, struct host1x_job, ref);
92 host1x_intr_put_ref(job->syncpt->host, job->syncpt->id,
96 host1x_syncpt_put(job->syncpt);
101 void host1x_job_put(struct host1x_job *job)
103 kref_put(&job->ref, job_free);
105 EXPORT_SYMBOL(host1x_job_put);
107 void host1x_job_add_gather(struct host1x_job *job, struct host1x_bo *bo,
108 unsigned int words, unsigned int offset)
110 struct host1x_job_gather *gather = &job->cmds[job->num_cmds].gather;
112 gather->words = words;
114 gather->offset = offset;
118 EXPORT_SYMBOL(host1x_job_add_gather);
120 void host1x_job_add_wait(struct host1x_job *job, u32 id, u32 thresh,
121 bool relative, u32 next_class)
123 struct host1x_job_cmd *cmd = &job->cmds[job->num_cmds];
127 cmd->wait.threshold = thresh;
128 cmd->wait.next_class = next_class;
129 cmd->wait.relative = relative;
133 EXPORT_SYMBOL(host1x_job_add_wait);
135 static unsigned int pin_job(struct host1x *host, struct host1x_job *job)
137 unsigned long mask = HOST1X_RELOC_READ | HOST1X_RELOC_WRITE;
138 struct host1x_client *client = job->client;
139 struct device *dev = client->dev;
140 struct host1x_job_gather *g;
146 for (i = 0; i < job->num_relocs; i++) {
147 struct host1x_reloc *reloc = &job->relocs[i];
148 enum dma_data_direction direction;
149 struct host1x_bo_mapping *map;
150 struct host1x_bo *bo;
152 reloc->target.bo = host1x_bo_get(reloc->target.bo);
153 if (!reloc->target.bo) {
158 bo = reloc->target.bo;
160 switch (reloc->flags & mask) {
161 case HOST1X_RELOC_READ:
162 direction = DMA_TO_DEVICE;
165 case HOST1X_RELOC_WRITE:
166 direction = DMA_FROM_DEVICE;
169 case HOST1X_RELOC_READ | HOST1X_RELOC_WRITE:
170 direction = DMA_BIDIRECTIONAL;
178 map = host1x_bo_pin(dev, bo, direction, NULL);
185 * host1x clients are generally not able to do scatter-gather themselves, so fail
186 * if the buffer is discontiguous and we fail to map its SG table to a single
187 * contiguous chunk of I/O virtual memory.
189 if (map->chunks > 1) {
194 job->addr_phys[job->num_unpins] = map->phys;
195 job->unpins[job->num_unpins].map = map;
200 * We will copy gathers BO content later, so there is no need to
203 if (job->enable_firewall)
206 for (i = 0; i < job->num_cmds; i++) {
207 struct host1x_bo_mapping *map;
208 size_t gather_size = 0;
209 struct scatterlist *sg;
214 if (job->cmds[i].is_wait)
217 g = &job->cmds[i].gather;
219 g->bo = host1x_bo_get(g->bo);
225 map = host1x_bo_pin(host->dev, g->bo, DMA_TO_DEVICE, NULL);
232 for_each_sgtable_sg(map->sgt, sg, j)
233 gather_size += sg->length;
235 gather_size = iova_align(&host->iova, gather_size);
237 shift = iova_shift(&host->iova);
238 alloc = alloc_iova(&host->iova, gather_size >> shift,
239 host->iova_end >> shift, true);
245 err = iommu_map_sgtable(host->domain, iova_dma_addr(&host->iova, alloc),
246 map->sgt, IOMMU_READ);
248 __free_iova(&host->iova, alloc);
253 map->phys = iova_dma_addr(&host->iova, alloc);
254 map->size = gather_size;
257 job->addr_phys[job->num_unpins] = map->phys;
258 job->unpins[job->num_unpins].map = map;
261 job->gather_addr_phys[i] = map->phys;
267 host1x_bo_put(g->bo);
269 host1x_job_unpin(job);
273 static int do_relocs(struct host1x_job *job, struct host1x_job_gather *g)
275 void *cmdbuf_addr = NULL;
276 struct host1x_bo *cmdbuf = g->bo;
279 /* pin & patch the relocs for one gather */
280 for (i = 0; i < job->num_relocs; i++) {
281 struct host1x_reloc *reloc = &job->relocs[i];
282 u32 reloc_addr = (job->reloc_addr_phys[i] +
283 reloc->target.offset) >> reloc->shift;
286 /* skip all other gathers */
287 if (cmdbuf != reloc->cmdbuf.bo)
290 if (job->enable_firewall) {
291 target = (u32 *)job->gather_copy_mapped +
292 reloc->cmdbuf.offset / sizeof(u32) +
293 g->offset / sizeof(u32);
298 cmdbuf_addr = host1x_bo_mmap(cmdbuf);
300 if (unlikely(!cmdbuf_addr)) {
301 pr_err("Could not map cmdbuf for relocation\n");
306 target = cmdbuf_addr + reloc->cmdbuf.offset;
308 *target = reloc_addr;
312 host1x_bo_munmap(cmdbuf, cmdbuf_addr);
317 static bool check_reloc(struct host1x_reloc *reloc, struct host1x_bo *cmdbuf,
320 offset *= sizeof(u32);
322 if (reloc->cmdbuf.bo != cmdbuf || reloc->cmdbuf.offset != offset)
325 /* relocation shift value validation isn't implemented yet */
332 struct host1x_firewall {
333 struct host1x_job *job;
336 unsigned int num_relocs;
337 struct host1x_reloc *reloc;
339 struct host1x_bo *cmdbuf;
349 static int check_register(struct host1x_firewall *fw, unsigned long offset)
351 if (!fw->job->is_addr_reg)
354 if (fw->job->is_addr_reg(fw->dev, fw->class, offset)) {
358 if (!check_reloc(fw->reloc, fw->cmdbuf, fw->offset))
368 static int check_class(struct host1x_firewall *fw, u32 class)
370 if (!fw->job->is_valid_class) {
371 if (fw->class != class)
374 if (!fw->job->is_valid_class(fw->class))
381 static int check_mask(struct host1x_firewall *fw)
392 ret = check_register(fw, reg);
406 static int check_incr(struct host1x_firewall *fw)
408 u32 count = fw->count;
416 ret = check_register(fw, reg);
429 static int check_nonincr(struct host1x_firewall *fw)
431 u32 count = fw->count;
438 ret = check_register(fw, fw->reg);
450 static int validate(struct host1x_firewall *fw, struct host1x_job_gather *g)
452 u32 *cmdbuf_base = (u32 *)fw->job->gather_copy_mapped +
453 (g->offset / sizeof(u32));
454 u32 job_class = fw->class;
457 fw->words = g->words;
461 while (fw->words && !err) {
462 u32 word = cmdbuf_base[fw->offset];
463 u32 opcode = (word & 0xf0000000) >> 28;
473 fw->class = word >> 6 & 0x3ff;
474 fw->mask = word & 0x3f;
475 fw->reg = word >> 16 & 0xfff;
476 err = check_class(fw, job_class);
478 err = check_mask(fw);
483 fw->reg = word >> 16 & 0xfff;
484 fw->count = word & 0xffff;
485 err = check_incr(fw);
491 fw->reg = word >> 16 & 0xfff;
492 fw->count = word & 0xffff;
493 err = check_nonincr(fw);
499 fw->mask = word & 0xffff;
500 fw->reg = word >> 16 & 0xfff;
501 err = check_mask(fw);
518 static inline int copy_gathers(struct device *host, struct host1x_job *job,
521 struct host1x_firewall fw;
528 fw.reloc = job->relocs;
529 fw.num_relocs = job->num_relocs;
530 fw.class = job->class;
532 for (i = 0; i < job->num_cmds; i++) {
533 struct host1x_job_gather *g;
535 if (job->cmds[i].is_wait)
538 g = &job->cmds[i].gather;
540 size += g->words * sizeof(u32);
544 * Try a non-blocking allocation from a higher priority pools first,
545 * as awaiting for the allocation here is a major performance hit.
547 job->gather_copy_mapped = dma_alloc_wc(host, size, &job->gather_copy,
550 /* the higher priority allocation failed, try the generic-blocking */
551 if (!job->gather_copy_mapped)
552 job->gather_copy_mapped = dma_alloc_wc(host, size,
555 if (!job->gather_copy_mapped)
558 job->gather_copy_size = size;
560 for (i = 0; i < job->num_cmds; i++) {
561 struct host1x_job_gather *g;
564 if (job->cmds[i].is_wait)
566 g = &job->cmds[i].gather;
568 /* Copy the gather */
569 gather = host1x_bo_mmap(g->bo);
570 memcpy(job->gather_copy_mapped + offset, gather + g->offset,
571 g->words * sizeof(u32));
572 host1x_bo_munmap(g->bo, gather);
574 /* Store the location in the buffer */
575 g->base = job->gather_copy;
578 /* Validate the job */
579 if (validate(&fw, g))
582 offset += g->words * sizeof(u32);
585 /* No relocs should remain at this point */
592 int host1x_job_pin(struct host1x_job *job, struct device *dev)
596 struct host1x *host = dev_get_drvdata(dev->parent);
599 err = pin_job(host, job);
603 if (job->enable_firewall) {
604 err = copy_gathers(host->dev, job, dev);
610 for (i = 0; i < job->num_cmds; i++) {
611 struct host1x_job_gather *g;
613 if (job->cmds[i].is_wait)
615 g = &job->cmds[i].gather;
617 /* process each gather mem only once */
621 /* copy_gathers() sets gathers base if firewall is enabled */
622 if (!job->enable_firewall)
623 g->base = job->gather_addr_phys[i];
625 for (j = i + 1; j < job->num_cmds; j++) {
626 if (!job->cmds[j].is_wait &&
627 job->cmds[j].gather.bo == g->bo) {
628 job->cmds[j].gather.handled = true;
629 job->cmds[j].gather.base = g->base;
633 err = do_relocs(job, g);
640 host1x_job_unpin(job);
645 EXPORT_SYMBOL(host1x_job_pin);
647 void host1x_job_unpin(struct host1x_job *job)
649 struct host1x *host = dev_get_drvdata(job->channel->dev->parent);
652 for (i = 0; i < job->num_unpins; i++) {
653 struct host1x_bo_mapping *map = job->unpins[i].map;
654 struct host1x_bo *bo = map->bo;
656 if (!job->enable_firewall && map->size && host->domain) {
657 iommu_unmap(host->domain, job->addr_phys[i], map->size);
658 free_iova(&host->iova, iova_pfn(&host->iova, job->addr_phys[i]));
661 host1x_bo_unpin(map);
667 if (job->gather_copy_size)
668 dma_free_wc(host->dev, job->gather_copy_size,
669 job->gather_copy_mapped, job->gather_copy);
671 EXPORT_SYMBOL(host1x_job_unpin);
674 * Debug routine used to dump job entries
676 void host1x_job_dump(struct device *dev, struct host1x_job *job)
678 dev_dbg(dev, " SYNCPT_ID %d\n", job->syncpt->id);
679 dev_dbg(dev, " SYNCPT_VAL %d\n", job->syncpt_end);
680 dev_dbg(dev, " FIRST_GET 0x%x\n", job->first_get);
681 dev_dbg(dev, " TIMEOUT %d\n", job->timeout);
682 dev_dbg(dev, " NUM_SLOTS %d\n", job->num_slots);
683 dev_dbg(dev, " NUM_HANDLES %d\n", job->num_unpins);