GNU Linux-libre 5.10.219-gnu1
[releases.git] / drivers / gpu / host1x / job.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Tegra host1x Job
4  *
5  * Copyright (c) 2010-2015, NVIDIA Corporation.
6  */
7
8 #include <linux/dma-mapping.h>
9 #include <linux/err.h>
10 #include <linux/host1x.h>
11 #include <linux/iommu.h>
12 #include <linux/kref.h>
13 #include <linux/module.h>
14 #include <linux/scatterlist.h>
15 #include <linux/slab.h>
16 #include <linux/vmalloc.h>
17 #include <trace/events/host1x.h>
18
19 #include "channel.h"
20 #include "dev.h"
21 #include "job.h"
22 #include "syncpt.h"
23
24 #define HOST1X_WAIT_SYNCPT_OFFSET 0x8
25
26 struct host1x_job *host1x_job_alloc(struct host1x_channel *ch,
27                                     u32 num_cmdbufs, u32 num_relocs)
28 {
29         struct host1x_job *job = NULL;
30         unsigned int num_unpins = num_relocs;
31         u64 total;
32         void *mem;
33
34         if (!IS_ENABLED(CONFIG_TEGRA_HOST1X_FIREWALL))
35                 num_unpins += num_cmdbufs;
36
37         /* Check that we're not going to overflow */
38         total = sizeof(struct host1x_job) +
39                 (u64)num_relocs * sizeof(struct host1x_reloc) +
40                 (u64)num_unpins * sizeof(struct host1x_job_unpin_data) +
41                 (u64)num_cmdbufs * sizeof(struct host1x_job_gather) +
42                 (u64)num_unpins * sizeof(dma_addr_t) +
43                 (u64)num_unpins * sizeof(u32 *);
44         if (total > ULONG_MAX)
45                 return NULL;
46
47         mem = job = kzalloc(total, GFP_KERNEL);
48         if (!job)
49                 return NULL;
50
51         kref_init(&job->ref);
52         job->channel = ch;
53
54         /* Redistribute memory to the structs  */
55         mem += sizeof(struct host1x_job);
56         job->relocs = num_relocs ? mem : NULL;
57         mem += num_relocs * sizeof(struct host1x_reloc);
58         job->unpins = num_unpins ? mem : NULL;
59         mem += num_unpins * sizeof(struct host1x_job_unpin_data);
60         job->gathers = num_cmdbufs ? mem : NULL;
61         mem += num_cmdbufs * sizeof(struct host1x_job_gather);
62         job->addr_phys = num_unpins ? mem : NULL;
63
64         job->reloc_addr_phys = job->addr_phys;
65         job->gather_addr_phys = &job->addr_phys[num_relocs];
66
67         return job;
68 }
69 EXPORT_SYMBOL(host1x_job_alloc);
70
71 struct host1x_job *host1x_job_get(struct host1x_job *job)
72 {
73         kref_get(&job->ref);
74         return job;
75 }
76 EXPORT_SYMBOL(host1x_job_get);
77
78 static void job_free(struct kref *ref)
79 {
80         struct host1x_job *job = container_of(ref, struct host1x_job, ref);
81
82         kfree(job);
83 }
84
85 void host1x_job_put(struct host1x_job *job)
86 {
87         kref_put(&job->ref, job_free);
88 }
89 EXPORT_SYMBOL(host1x_job_put);
90
91 void host1x_job_add_gather(struct host1x_job *job, struct host1x_bo *bo,
92                            unsigned int words, unsigned int offset)
93 {
94         struct host1x_job_gather *gather = &job->gathers[job->num_gathers];
95
96         gather->words = words;
97         gather->bo = bo;
98         gather->offset = offset;
99
100         job->num_gathers++;
101 }
102 EXPORT_SYMBOL(host1x_job_add_gather);
103
104 static unsigned int pin_job(struct host1x *host, struct host1x_job *job)
105 {
106         struct host1x_client *client = job->client;
107         struct device *dev = client->dev;
108         struct host1x_job_gather *g;
109         struct iommu_domain *domain;
110         unsigned int i;
111         int err;
112
113         domain = iommu_get_domain_for_dev(dev);
114         job->num_unpins = 0;
115
116         for (i = 0; i < job->num_relocs; i++) {
117                 struct host1x_reloc *reloc = &job->relocs[i];
118                 dma_addr_t phys_addr, *phys;
119                 struct sg_table *sgt;
120
121                 reloc->target.bo = host1x_bo_get(reloc->target.bo);
122                 if (!reloc->target.bo) {
123                         err = -EINVAL;
124                         goto unpin;
125                 }
126
127                 /*
128                  * If the client device is not attached to an IOMMU, the
129                  * physical address of the buffer object can be used.
130                  *
131                  * Similarly, when an IOMMU domain is shared between all
132                  * host1x clients, the IOVA is already available, so no
133                  * need to map the buffer object again.
134                  *
135                  * XXX Note that this isn't always safe to do because it
136                  * relies on an assumption that no cache maintenance is
137                  * needed on the buffer objects.
138                  */
139                 if (!domain || client->group)
140                         phys = &phys_addr;
141                 else
142                         phys = NULL;
143
144                 sgt = host1x_bo_pin(dev, reloc->target.bo, phys);
145                 if (IS_ERR(sgt)) {
146                         err = PTR_ERR(sgt);
147                         goto unpin;
148                 }
149
150                 if (sgt) {
151                         unsigned long mask = HOST1X_RELOC_READ |
152                                              HOST1X_RELOC_WRITE;
153                         enum dma_data_direction dir;
154
155                         switch (reloc->flags & mask) {
156                         case HOST1X_RELOC_READ:
157                                 dir = DMA_TO_DEVICE;
158                                 break;
159
160                         case HOST1X_RELOC_WRITE:
161                                 dir = DMA_FROM_DEVICE;
162                                 break;
163
164                         case HOST1X_RELOC_READ | HOST1X_RELOC_WRITE:
165                                 dir = DMA_BIDIRECTIONAL;
166                                 break;
167
168                         default:
169                                 err = -EINVAL;
170                                 goto unpin;
171                         }
172
173                         err = dma_map_sgtable(dev, sgt, dir, 0);
174                         if (err)
175                                 goto unpin;
176
177                         job->unpins[job->num_unpins].dev = dev;
178                         job->unpins[job->num_unpins].dir = dir;
179                         phys_addr = sg_dma_address(sgt->sgl);
180                 }
181
182                 job->addr_phys[job->num_unpins] = phys_addr;
183                 job->unpins[job->num_unpins].bo = reloc->target.bo;
184                 job->unpins[job->num_unpins].sgt = sgt;
185                 job->num_unpins++;
186         }
187
188         /*
189          * We will copy gathers BO content later, so there is no need to
190          * hold and pin them.
191          */
192         if (IS_ENABLED(CONFIG_TEGRA_HOST1X_FIREWALL))
193                 return 0;
194
195         for (i = 0; i < job->num_gathers; i++) {
196                 size_t gather_size = 0;
197                 struct scatterlist *sg;
198                 struct sg_table *sgt;
199                 dma_addr_t phys_addr;
200                 unsigned long shift;
201                 struct iova *alloc;
202                 dma_addr_t *phys;
203                 unsigned int j;
204
205                 g = &job->gathers[i];
206                 g->bo = host1x_bo_get(g->bo);
207                 if (!g->bo) {
208                         err = -EINVAL;
209                         goto unpin;
210                 }
211
212                 /**
213                  * If the host1x is not attached to an IOMMU, there is no need
214                  * to map the buffer object for the host1x, since the physical
215                  * address can simply be used.
216                  */
217                 if (!iommu_get_domain_for_dev(host->dev))
218                         phys = &phys_addr;
219                 else
220                         phys = NULL;
221
222                 sgt = host1x_bo_pin(host->dev, g->bo, phys);
223                 if (IS_ERR(sgt)) {
224                         err = PTR_ERR(sgt);
225                         goto put;
226                 }
227
228                 if (host->domain) {
229                         for_each_sgtable_sg(sgt, sg, j)
230                                 gather_size += sg->length;
231                         gather_size = iova_align(&host->iova, gather_size);
232
233                         shift = iova_shift(&host->iova);
234                         alloc = alloc_iova(&host->iova, gather_size >> shift,
235                                            host->iova_end >> shift, true);
236                         if (!alloc) {
237                                 err = -ENOMEM;
238                                 goto put;
239                         }
240
241                         err = iommu_map_sgtable(host->domain,
242                                         iova_dma_addr(&host->iova, alloc),
243                                         sgt, IOMMU_READ);
244                         if (err == 0) {
245                                 __free_iova(&host->iova, alloc);
246                                 err = -EINVAL;
247                                 goto put;
248                         }
249
250                         job->unpins[job->num_unpins].size = gather_size;
251                         phys_addr = iova_dma_addr(&host->iova, alloc);
252                 } else if (sgt) {
253                         err = dma_map_sgtable(host->dev, sgt, DMA_TO_DEVICE, 0);
254                         if (err)
255                                 goto put;
256
257                         job->unpins[job->num_unpins].dir = DMA_TO_DEVICE;
258                         job->unpins[job->num_unpins].dev = host->dev;
259                         phys_addr = sg_dma_address(sgt->sgl);
260                 }
261
262                 job->addr_phys[job->num_unpins] = phys_addr;
263                 job->gather_addr_phys[i] = phys_addr;
264
265                 job->unpins[job->num_unpins].bo = g->bo;
266                 job->unpins[job->num_unpins].sgt = sgt;
267                 job->num_unpins++;
268         }
269
270         return 0;
271
272 put:
273         host1x_bo_put(g->bo);
274 unpin:
275         host1x_job_unpin(job);
276         return err;
277 }
278
279 static int do_relocs(struct host1x_job *job, struct host1x_job_gather *g)
280 {
281         void *cmdbuf_addr = NULL;
282         struct host1x_bo *cmdbuf = g->bo;
283         unsigned int i;
284
285         /* pin & patch the relocs for one gather */
286         for (i = 0; i < job->num_relocs; i++) {
287                 struct host1x_reloc *reloc = &job->relocs[i];
288                 u32 reloc_addr = (job->reloc_addr_phys[i] +
289                                   reloc->target.offset) >> reloc->shift;
290                 u32 *target;
291
292                 /* skip all other gathers */
293                 if (cmdbuf != reloc->cmdbuf.bo)
294                         continue;
295
296                 if (IS_ENABLED(CONFIG_TEGRA_HOST1X_FIREWALL)) {
297                         target = (u32 *)job->gather_copy_mapped +
298                                         reloc->cmdbuf.offset / sizeof(u32) +
299                                                 g->offset / sizeof(u32);
300                         goto patch_reloc;
301                 }
302
303                 if (!cmdbuf_addr) {
304                         cmdbuf_addr = host1x_bo_mmap(cmdbuf);
305
306                         if (unlikely(!cmdbuf_addr)) {
307                                 pr_err("Could not map cmdbuf for relocation\n");
308                                 return -ENOMEM;
309                         }
310                 }
311
312                 target = cmdbuf_addr + reloc->cmdbuf.offset;
313 patch_reloc:
314                 *target = reloc_addr;
315         }
316
317         if (cmdbuf_addr)
318                 host1x_bo_munmap(cmdbuf, cmdbuf_addr);
319
320         return 0;
321 }
322
323 static bool check_reloc(struct host1x_reloc *reloc, struct host1x_bo *cmdbuf,
324                         unsigned int offset)
325 {
326         offset *= sizeof(u32);
327
328         if (reloc->cmdbuf.bo != cmdbuf || reloc->cmdbuf.offset != offset)
329                 return false;
330
331         /* relocation shift value validation isn't implemented yet */
332         if (reloc->shift)
333                 return false;
334
335         return true;
336 }
337
338 struct host1x_firewall {
339         struct host1x_job *job;
340         struct device *dev;
341
342         unsigned int num_relocs;
343         struct host1x_reloc *reloc;
344
345         struct host1x_bo *cmdbuf;
346         unsigned int offset;
347
348         u32 words;
349         u32 class;
350         u32 reg;
351         u32 mask;
352         u32 count;
353 };
354
355 static int check_register(struct host1x_firewall *fw, unsigned long offset)
356 {
357         if (!fw->job->is_addr_reg)
358                 return 0;
359
360         if (fw->job->is_addr_reg(fw->dev, fw->class, offset)) {
361                 if (!fw->num_relocs)
362                         return -EINVAL;
363
364                 if (!check_reloc(fw->reloc, fw->cmdbuf, fw->offset))
365                         return -EINVAL;
366
367                 fw->num_relocs--;
368                 fw->reloc++;
369         }
370
371         return 0;
372 }
373
374 static int check_class(struct host1x_firewall *fw, u32 class)
375 {
376         if (!fw->job->is_valid_class) {
377                 if (fw->class != class)
378                         return -EINVAL;
379         } else {
380                 if (!fw->job->is_valid_class(fw->class))
381                         return -EINVAL;
382         }
383
384         return 0;
385 }
386
387 static int check_mask(struct host1x_firewall *fw)
388 {
389         u32 mask = fw->mask;
390         u32 reg = fw->reg;
391         int ret;
392
393         while (mask) {
394                 if (fw->words == 0)
395                         return -EINVAL;
396
397                 if (mask & 1) {
398                         ret = check_register(fw, reg);
399                         if (ret < 0)
400                                 return ret;
401
402                         fw->words--;
403                         fw->offset++;
404                 }
405                 mask >>= 1;
406                 reg++;
407         }
408
409         return 0;
410 }
411
412 static int check_incr(struct host1x_firewall *fw)
413 {
414         u32 count = fw->count;
415         u32 reg = fw->reg;
416         int ret;
417
418         while (count) {
419                 if (fw->words == 0)
420                         return -EINVAL;
421
422                 ret = check_register(fw, reg);
423                 if (ret < 0)
424                         return ret;
425
426                 reg++;
427                 fw->words--;
428                 fw->offset++;
429                 count--;
430         }
431
432         return 0;
433 }
434
435 static int check_nonincr(struct host1x_firewall *fw)
436 {
437         u32 count = fw->count;
438         int ret;
439
440         while (count) {
441                 if (fw->words == 0)
442                         return -EINVAL;
443
444                 ret = check_register(fw, fw->reg);
445                 if (ret < 0)
446                         return ret;
447
448                 fw->words--;
449                 fw->offset++;
450                 count--;
451         }
452
453         return 0;
454 }
455
456 static int validate(struct host1x_firewall *fw, struct host1x_job_gather *g)
457 {
458         u32 *cmdbuf_base = (u32 *)fw->job->gather_copy_mapped +
459                 (g->offset / sizeof(u32));
460         u32 job_class = fw->class;
461         int err = 0;
462
463         fw->words = g->words;
464         fw->cmdbuf = g->bo;
465         fw->offset = 0;
466
467         while (fw->words && !err) {
468                 u32 word = cmdbuf_base[fw->offset];
469                 u32 opcode = (word & 0xf0000000) >> 28;
470
471                 fw->mask = 0;
472                 fw->reg = 0;
473                 fw->count = 0;
474                 fw->words--;
475                 fw->offset++;
476
477                 switch (opcode) {
478                 case 0:
479                         fw->class = word >> 6 & 0x3ff;
480                         fw->mask = word & 0x3f;
481                         fw->reg = word >> 16 & 0xfff;
482                         err = check_class(fw, job_class);
483                         if (!err)
484                                 err = check_mask(fw);
485                         if (err)
486                                 goto out;
487                         break;
488                 case 1:
489                         fw->reg = word >> 16 & 0xfff;
490                         fw->count = word & 0xffff;
491                         err = check_incr(fw);
492                         if (err)
493                                 goto out;
494                         break;
495
496                 case 2:
497                         fw->reg = word >> 16 & 0xfff;
498                         fw->count = word & 0xffff;
499                         err = check_nonincr(fw);
500                         if (err)
501                                 goto out;
502                         break;
503
504                 case 3:
505                         fw->mask = word & 0xffff;
506                         fw->reg = word >> 16 & 0xfff;
507                         err = check_mask(fw);
508                         if (err)
509                                 goto out;
510                         break;
511                 case 4:
512                 case 14:
513                         break;
514                 default:
515                         err = -EINVAL;
516                         break;
517                 }
518         }
519
520 out:
521         return err;
522 }
523
524 static inline int copy_gathers(struct device *host, struct host1x_job *job,
525                                struct device *dev)
526 {
527         struct host1x_firewall fw;
528         size_t size = 0;
529         size_t offset = 0;
530         unsigned int i;
531
532         fw.job = job;
533         fw.dev = dev;
534         fw.reloc = job->relocs;
535         fw.num_relocs = job->num_relocs;
536         fw.class = job->class;
537
538         for (i = 0; i < job->num_gathers; i++) {
539                 struct host1x_job_gather *g = &job->gathers[i];
540
541                 size += g->words * sizeof(u32);
542         }
543
544         /*
545          * Try a non-blocking allocation from a higher priority pools first,
546          * as awaiting for the allocation here is a major performance hit.
547          */
548         job->gather_copy_mapped = dma_alloc_wc(host, size, &job->gather_copy,
549                                                GFP_NOWAIT);
550
551         /* the higher priority allocation failed, try the generic-blocking */
552         if (!job->gather_copy_mapped)
553                 job->gather_copy_mapped = dma_alloc_wc(host, size,
554                                                        &job->gather_copy,
555                                                        GFP_KERNEL);
556         if (!job->gather_copy_mapped)
557                 return -ENOMEM;
558
559         job->gather_copy_size = size;
560
561         for (i = 0; i < job->num_gathers; i++) {
562                 struct host1x_job_gather *g = &job->gathers[i];
563                 void *gather;
564
565                 /* Copy the gather */
566                 gather = host1x_bo_mmap(g->bo);
567                 memcpy(job->gather_copy_mapped + offset, gather + g->offset,
568                        g->words * sizeof(u32));
569                 host1x_bo_munmap(g->bo, gather);
570
571                 /* Store the location in the buffer */
572                 g->base = job->gather_copy;
573                 g->offset = offset;
574
575                 /* Validate the job */
576                 if (validate(&fw, g))
577                         return -EINVAL;
578
579                 offset += g->words * sizeof(u32);
580         }
581
582         /* No relocs should remain at this point */
583         if (fw.num_relocs)
584                 return -EINVAL;
585
586         return 0;
587 }
588
589 int host1x_job_pin(struct host1x_job *job, struct device *dev)
590 {
591         int err;
592         unsigned int i, j;
593         struct host1x *host = dev_get_drvdata(dev->parent);
594
595         /* pin memory */
596         err = pin_job(host, job);
597         if (err)
598                 goto out;
599
600         if (IS_ENABLED(CONFIG_TEGRA_HOST1X_FIREWALL)) {
601                 err = copy_gathers(host->dev, job, dev);
602                 if (err)
603                         goto out;
604         }
605
606         /* patch gathers */
607         for (i = 0; i < job->num_gathers; i++) {
608                 struct host1x_job_gather *g = &job->gathers[i];
609
610                 /* process each gather mem only once */
611                 if (g->handled)
612                         continue;
613
614                 /* copy_gathers() sets gathers base if firewall is enabled */
615                 if (!IS_ENABLED(CONFIG_TEGRA_HOST1X_FIREWALL))
616                         g->base = job->gather_addr_phys[i];
617
618                 for (j = i + 1; j < job->num_gathers; j++) {
619                         if (job->gathers[j].bo == g->bo) {
620                                 job->gathers[j].handled = true;
621                                 job->gathers[j].base = g->base;
622                         }
623                 }
624
625                 err = do_relocs(job, g);
626                 if (err)
627                         break;
628         }
629
630 out:
631         if (err)
632                 host1x_job_unpin(job);
633         wmb();
634
635         return err;
636 }
637 EXPORT_SYMBOL(host1x_job_pin);
638
639 void host1x_job_unpin(struct host1x_job *job)
640 {
641         struct host1x *host = dev_get_drvdata(job->channel->dev->parent);
642         unsigned int i;
643
644         for (i = 0; i < job->num_unpins; i++) {
645                 struct host1x_job_unpin_data *unpin = &job->unpins[i];
646                 struct device *dev = unpin->dev ?: host->dev;
647                 struct sg_table *sgt = unpin->sgt;
648
649                 if (!IS_ENABLED(CONFIG_TEGRA_HOST1X_FIREWALL) &&
650                     unpin->size && host->domain) {
651                         iommu_unmap(host->domain, job->addr_phys[i],
652                                     unpin->size);
653                         free_iova(&host->iova,
654                                 iova_pfn(&host->iova, job->addr_phys[i]));
655                 }
656
657                 if (unpin->dev && sgt)
658                         dma_unmap_sgtable(unpin->dev, sgt, unpin->dir, 0);
659
660                 host1x_bo_unpin(dev, unpin->bo, sgt);
661                 host1x_bo_put(unpin->bo);
662         }
663
664         job->num_unpins = 0;
665
666         if (job->gather_copy_size)
667                 dma_free_wc(host->dev, job->gather_copy_size,
668                             job->gather_copy_mapped, job->gather_copy);
669 }
670 EXPORT_SYMBOL(host1x_job_unpin);
671
672 /*
673  * Debug routine used to dump job entries
674  */
675 void host1x_job_dump(struct device *dev, struct host1x_job *job)
676 {
677         dev_dbg(dev, "    SYNCPT_ID   %d\n", job->syncpt_id);
678         dev_dbg(dev, "    SYNCPT_VAL  %d\n", job->syncpt_end);
679         dev_dbg(dev, "    FIRST_GET   0x%x\n", job->first_get);
680         dev_dbg(dev, "    TIMEOUT     %d\n", job->timeout);
681         dev_dbg(dev, "    NUM_SLOTS   %d\n", job->num_slots);
682         dev_dbg(dev, "    NUM_HANDLES %d\n", job->num_unpins);
683 }