Silence GPU push
[chai.git] / src / ktrace.c
1 /*
2  *
3  * Copyright (C) 2017 Cafe Beverage. All rights reserved.
4  *
5  * This program is free software and is provided to you under the terms of the
6  * GNU General Public License version 2 as published by the Free Software
7  * Foundation, and any use by you of this program is subject to the terms
8  * of such GNU licence.
9  *
10  * A copy of the licence is included with the program, and can also be obtained
11  * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
12  * Boston, MA  02110-1301, USA.
13  *
14  */
15
16 static void formatted_hex_dump(char *array, uint8_t *buffer, size_t s) 
17 {
18         if (!buffer) return;
19
20         print_hex_dump(KERN_DEBUG, "raw data: ", DUMP_PREFIX_ADDRESS, 16, 1, buffer, s, true);
21 }
22
23 /* Dump CPU memory by address. */
24
25 #define CPU_DUMP_SIZE 256
26
27 static void *kbase_fetch_cpu(struct kbase_context *kctx, void __user *cpu_addr, size_t *size_o, size_t size_i)
28 {
29         uint8_t *buffer;
30
31         if (!cpu_addr) return NULL;
32
33         if (!size_i) size_i = CPU_DUMP_SIZE;
34         if (size_o) *size_o = size_i;
35
36         buffer = kmalloc(size_i, GFP_KERNEL);
37
38         if (!buffer) {
39                 return NULL;
40         }
41
42         if (copy_from_user(buffer, cpu_addr, size_i) != 0) {
43                 kfree(buffer);
44                 return NULL;
45         }
46
47         return buffer;
48 }
49
50 /* Dump GPU memory by address.
51  * See mali_kbase_debug_mem_view.c for more information */
52
53 int kbase_jd_umm_map(struct kbase_context *kctx, struct kbase_va_region *reg);
54 void kbase_jd_umm_unmap(struct kbase_context *kctx, struct kbase_mem_phy_alloc *alloc);
55
56 static void *kbase_fetch_gpu(struct kbase_context *kctx, u64 gpu_addr, size_t *size_o, size_t size_i)
57 {
58         struct kbase_va_region *reg;
59         struct kbase_mem_phy_alloc *alloc;
60         uint8_t *buffer;
61         uint8_t *buffer_on;
62         int p;
63         pgprot_t prot = PAGE_KERNEL;
64         uint64_t offset;
65
66         reg = kbase_region_tracker_find_region_enclosing_address(kctx, gpu_addr);
67
68         if (!reg) {
69                 printk("Region not found!");
70                 return NULL;
71         }
72
73         if (!reg->gpu_alloc) {
74                 printk("No alloc!\n");
75                 return NULL;
76         }
77
78         offset = gpu_addr - (reg->start_pfn << PAGE_SHIFT);
79
80         if (offset < 0) {
81                 printk("What?\n");
82                 printk("GPU addr: %LX", gpu_addr);
83                 printk("start_pfn: %LX", reg->start_pfn);
84                 return NULL;
85         }
86
87         alloc = kbase_mem_phy_alloc_get(reg->gpu_alloc);
88
89         if (alloc->type == KBASE_MEM_TYPE_IMPORTED_UMM) {
90                 kbase_jd_umm_map(kctx, reg);
91         }
92
93         if (!alloc->nents) {
94                 printk("Unbacked buffer %LX\n", gpu_addr);
95                 printk("Type: %d\n", alloc->type);
96
97                 return NULL;
98         }
99
100         if (!size_i) size_i = (alloc->nents << PAGE_SHIFT) - offset;
101         if (size_o) *size_o = size_i;
102
103         if (!(reg->flags & KBASE_REG_CPU_CACHED))
104                 prot = pgprot_writecombine(prot);
105
106         buffer = kmalloc(alloc->nents << PAGE_SHIFT, GFP_KERNEL);
107
108         if (!buffer) {
109                 printk("Bad alloc");
110                 return NULL;
111         }
112
113         for (p = 0; p < alloc->nents; ++p) {
114                 struct page *page = pfn_to_page(PFN_DOWN(alloc->pages[p]));
115                 uint8_t *mapping = vmap(&page, 1, VM_MAP, prot);
116
117                 if (!mapping) {
118                         printk("Bad mapping");
119                         kfree(buffer);
120                         return NULL;
121                 }
122
123                 memcpy(buffer + (p << PAGE_SHIFT), mapping, PAGE_SIZE);
124
125                 vunmap(mapping);
126         }
127
128         if (offset) {
129                 buffer_on = kmalloc(size_i, GFP_KERNEL);
130                 memcpy(buffer_on, buffer + offset, size_i);
131                 kfree(buffer);
132
133                 return buffer_on;
134         }
135
136         if (alloc->type == KBASE_MEM_TYPE_IMPORTED_UMM) {
137                 kbase_jd_umm_unmap(kctx, alloc);
138         }
139
140         return buffer;
141 }
142
143 static void kbase_push_gpu(struct kbase_context *kctx, void *buffer, u64 gpu_addr, size_t s)
144 {
145         struct kbase_va_region *reg;
146         struct kbase_mem_phy_alloc *alloc;
147         int p;
148         pgprot_t prot = PAGE_KERNEL;
149         uint64_t offset;
150         uint32_t buffer_offset = 0;
151         bool first_page = true;
152
153         reg = kbase_region_tracker_find_region_enclosing_address(kctx, gpu_addr);
154
155         if (!reg) {
156                 printk("Region not found!");
157                 return;
158         }
159
160         if (!reg->gpu_alloc) {
161                 printk("No alloc!\n");
162                 return;
163         }
164
165         offset = gpu_addr - (reg->start_pfn << PAGE_SHIFT);
166
167         if (offset < 0) {
168                 printk("What?\n");
169                 printk("GPU addr: %LX", gpu_addr);
170                 printk("start_pfn: %LX", reg->start_pfn);
171                 return;
172         }
173
174         alloc = kbase_mem_phy_alloc_get(reg->gpu_alloc);
175
176         if (alloc->type == KBASE_MEM_TYPE_IMPORTED_UMM) {
177                 kbase_jd_umm_map(kctx, reg);
178         }
179
180         if (!alloc->nents) {
181                 printk("Unbacked buffer %LX\n", gpu_addr);
182                 printk("Type: %d\n", alloc->type);
183                 return;
184         }
185
186         if (!(reg->flags & KBASE_REG_CPU_CACHED))
187                 prot = pgprot_writecombine(prot);
188
189         /* Copy page-by-page */
190
191         for (p = offset >> PAGE_SHIFT; p < ((offset + s + PAGE_SIZE - 1) >> PAGE_SHIFT); ++p) {
192                 struct page *page;
193                 uint8_t *mapping;
194
195                 int copy_offset = p << PAGE_SHIFT, copy_quant = PAGE_SIZE;
196
197                 page = pfn_to_page(PFN_DOWN(alloc->pages[p]));
198                 mapping = vmap(&page, 1, VM_MAP, prot);
199
200                 if (!mapping) {
201                         printk("Bad mapping");
202                         return;
203                 }
204
205                 if(first_page) {
206                         copy_offset += offset & ~PAGE_MASK;
207                         copy_quant = PAGE_SIZE - copy_offset;
208
209                         first_page = false;
210                 }
211
212                 if(buffer_offset + copy_quant > s) {
213                         copy_quant = s - buffer_offset;
214                 }
215
216                 memcpy(mapping + copy_offset,
217                                 buffer + buffer_offset,
218                                 copy_quant);
219
220                 buffer_offset += copy_quant;
221
222                 vunmap(mapping);
223         }
224
225         if (alloc->type == KBASE_MEM_TYPE_IMPORTED_UMM) {
226                 kbase_jd_umm_unmap(kctx, alloc);
227         }
228 }
229
230 /* Assert that synthesised command stream is bit-identical with trace */
231
232 static void assert_gpu_same(struct kbase_context *kctx, uint64_t addr, size_t s, uint8_t *synth) 
233 {
234         uint8_t *buffer = kbase_fetch_gpu(kctx, addr, NULL, s);
235         int i;
236
237         if (!buffer) {
238                 printk("Bad allocation in assert %LX", addr);
239                 return;
240         }
241
242         for (i = 0; i < s; ++i) {
243                 if (buffer[i] != synth[i]) {
244                         printk("At %LX, expected:", addr);
245                         formatted_hex_dump("b", synth, s);
246                         printk("Instead got:");
247                         formatted_hex_dump("b", buffer, s);
248
249                         break;
250                 }
251         }
252
253         kfree(buffer);
254 }
255
256 static void assert_gpu_zeroes(struct kbase_context *kctx, uint64_t addr, size_t s) 
257 {
258         char *zero = kzalloc(s, GFP_KERNEL);
259
260         assert_gpu_same(kctx, addr, s, zero);
261         kfree(zero);
262 }
263
264 static void quick_dump_gpu(struct kbase_context *kctx, uint64_t addr, size_t s)
265 {
266         uint8_t *buf;
267
268         if(!addr) {
269                 printk("Null quick dump");
270                 return;
271         }
272
273         buf = kbase_fetch_gpu(kctx, addr, NULL, s);
274
275         printk("Quick GPU dump (%LX)", addr);
276
277         if(!buf) {
278                 printk("Not found");
279                 return;
280         }
281
282         formatted_hex_dump("a", buf, s);
283         kfree(buf);
284 }
285
286 #include "chai-notes.h"
287
288 #define DEFINE_CASE(label) case label: return #label;
289
290 static char *kbase_job_type_name(int type)
291 {
292         switch (type) {
293                 DEFINE_CASE(JOB_NOT_STARTED)
294                 DEFINE_CASE(JOB_TYPE_NULL)
295                 DEFINE_CASE(JOB_TYPE_SET_VALUE)
296                 DEFINE_CASE(JOB_TYPE_CACHE_FLUSH)
297                 DEFINE_CASE(JOB_TYPE_COMPUTE)
298                 DEFINE_CASE(JOB_TYPE_VERTEX)
299                 DEFINE_CASE(JOB_TYPE_TILER)
300                 DEFINE_CASE(JOB_TYPE_FUSED)
301                 DEFINE_CASE(JOB_TYPE_FRAGMENT)
302
303                 default:
304                         printk("Requested job type %X", type);
305                         return "UNKNOWN";
306         }
307 }
308
309 static char* kbase_gl_mode_name(uint8_t b) 
310 {
311         switch (b) {
312                 DEFINE_CASE(CHAI_POINTS)
313                 DEFINE_CASE(CHAI_LINES)
314                 DEFINE_CASE(CHAI_TRIANGLES)
315                 DEFINE_CASE(CHAI_TRIANGLE_STRIP)
316                 DEFINE_CASE(CHAI_TRIANGLE_FAN)
317
318                 default:
319                         printk("Unknown mode %X", b);
320                         return "GL_UNKNOWN";
321         }
322 }
323
324 /* TODO: Figure out what "fbd" means */
325 /* TODO: Corresponding SFBD decode (don't assume MFBD) */
326
327 static void kbase_trace_fbd(struct kbase_context *kctx, uint32_t fbd)
328 {
329         struct tentative_mfbd *mfbd = (struct tentative_mfbd *) kbase_fetch_gpu(kctx, (uint64_t) (uint32_t) fbd & FBD_POINTER_MASK, NULL, sizeof(struct tentative_mfbd));
330         uint8_t *buf;
331         uint32_t *buf32;
332
333         printk("MFBD @ %X (%X)", fbd & FBD_POINTER_MASK, fbd & ~FBD_POINTER_MASK);
334         printk("MFBD flags %X, heap free address %LX",
335                         mfbd->flags, mfbd->heap_free_address);
336
337         formatted_hex_dump("Block 1", (uint8_t *) mfbd->block1, sizeof(mfbd->block1));
338
339         printk("unk2");
340         buf = kbase_fetch_gpu(kctx, mfbd->unknown2, NULL, 64);
341         formatted_hex_dump("B", buf, 64);
342         kfree(buf);
343
344         assert_gpu_zeroes(kctx, mfbd->block2[0], 64);
345         assert_gpu_zeroes(kctx, mfbd->block2[1], 64);
346         assert_gpu_zeroes(kctx, mfbd->ugaT, 64);
347         assert_gpu_zeroes(kctx, mfbd->unknown_gpu_address, 64);
348
349         /* Somehow maybe sort of kind of framebufferish?
350          * It changes predictably in the same way as the FB.
351          * Unclear what exactly it is, though.
352          *
353          * Where the framebuffer is: 1A 33 00 00
354          * This is: 71 B3 03 71 6C 4D 87 46
355          * Where the framebuffer is: 1A 33 1A 00
356          * This is: AB E4 43 9C E8 D6 D1 25
357          *
358          * It repeats, too, but everything 8 bytes rather than 4.
359          *
360          * It is a function of the colour painted. But the exact details
361          * are elusive.
362          *
363          * Also, this is an output, not an input.
364          * Assuming the framebuffer works as intended, RE may be
365          * pointless.
366          */
367
368         printk("ugaT %LX, uga %LX", mfbd->ugaT, mfbd->unknown_gpu_address);
369         printk("ugan %LX", mfbd->unknown_gpu_addressN);
370         buf = kbase_fetch_gpu(kctx, mfbd->unknown_gpu_addressN, NULL, 64);
371         formatted_hex_dump("B", buf, 64);
372         kfree(buf);
373
374         printk("unk1 %X, b1 %LX, b2 %LX, unk2 %LX, unk3 %LX, blah %LX",
375                         mfbd->unknown1,
376                         mfbd->block2[0],
377                         mfbd->block2[1],
378                         mfbd->unknown2,
379                         mfbd->unknown3,
380                         mfbd->blah);
381
382         printk("Weights [ %X, %X, %X, %X, %X, %X, %X, %X ]",
383                         mfbd->weights[0], mfbd->weights[1],
384                         mfbd->weights[2], mfbd->weights[3],
385                         mfbd->weights[4], mfbd->weights[5],
386                         mfbd->weights[6], mfbd->weights[7]);
387
388         formatted_hex_dump("Block 3", (uint8_t *) mfbd->block3, sizeof(mfbd->block3));
389         printk("---");
390         formatted_hex_dump("Block 4", (uint8_t *) mfbd->block4, sizeof(mfbd->block4));
391
392         printk("--- (seriously though) --- %X", mfbd->block3[4]);
393         buf32 = kbase_fetch_gpu(kctx, mfbd->block3[4], NULL, 128);
394         
395         if(buf32) {
396                 formatted_hex_dump("a", (uint8_t*) buf32, 128);
397
398                 quick_dump_gpu(kctx, buf32[6], 64);
399                 quick_dump_gpu(kctx, buf32[20], 64);
400                 quick_dump_gpu(kctx, buf32[23], 64);
401                 quick_dump_gpu(kctx, buf32[24], 64);
402                 quick_dump_gpu(kctx, buf32[25], 64);
403                 quick_dump_gpu(kctx, buf32[26], 64);
404                 quick_dump_gpu(kctx, buf32[27], 64);
405                 quick_dump_gpu(kctx, buf32[28], 64);
406                 quick_dump_gpu(kctx, buf32[31], 64);
407
408                 kfree(buf32);
409         }
410
411         quick_dump_gpu(kctx, mfbd->block3[16], 128);
412
413         kfree(mfbd);
414 }
415
416 static void kbase_trace_vecN_as_uint32_hex(uint32_t *p, size_t count)
417 {
418         if(count == 1) 
419                 printk("\t<%X>,", p[0]);
420         else if(count == 2)
421                 printk("\t<%X, %X>,", p[0], p[1]);
422         else if(count == 3)
423                 printk("\t<%X, %X, %X>,", p[0], p[1], p[2]);
424         else if(count == 4)
425                 printk("\t<%X, %X, %X, %X>,", p[0], p[1], p[2], p[3]);
426         else
427                 printk("Cannot print vec%d", count);
428 }
429
430 static void kbase_trace_attribute(struct kbase_context *kctx, uint64_t address)
431 {
432         uint64_t raw;
433         uint64_t flags;
434         int vertex_count;
435         int component_count;
436         uint32_t *v;
437         uint32_t *p;
438         int i;
439
440         struct attribute_buffer *vb =
441                 (struct attribute_buffer *) kbase_fetch_gpu(
442                                 kctx, address,
443                                 NULL, sizeof(struct attribute_buffer));
444         if (!vb) return;
445
446         vertex_count = vb->total_size / vb->element_size;
447         component_count = vb->element_size / sizeof(float);
448
449         raw = vb->elements & ~3;
450         flags = vb->elements ^ raw;
451
452         p = v = kbase_fetch_gpu(kctx, raw, NULL, vb->total_size);
453
454         printk("attribute vec%d mem%LXflag%LX = {", component_count, raw, flags);
455
456         for (i = 0; i < vertex_count; i++, p += component_count)
457                 kbase_trace_vecN_as_uint32_hex(p, component_count);
458
459         printk("}");
460
461         kbase_push_gpu(kctx, v, raw, vb->total_size);
462
463         kfree(vb);
464 }
465
466 static void kbase_trace_hw_chain(struct kbase_context *kctx, uint64_t chain)
467 {
468         struct job_descriptor_header *h;
469         uint8_t *gen_pay;
470         u64 next;
471         u64 payload;
472
473         /* Trace descriptor */
474         h = kbase_fetch_gpu(kctx, chain, NULL, sizeof(*h));
475
476         printk("%s job, %d-bit, status %X, incomplete %X, fault %LX, barrier %d, index %hX, dependencies (%hX, %hX)",
477                         kbase_job_type_name(h->job_type),
478                         h->job_descriptor_size ? 64 : 32,
479                         h->exception_status,
480                         h->first_incomplete_task,
481                         h->fault_pointer,
482                         h->job_barrier,
483                         h->job_index,
484                         h->job_dependency_index_1,
485                         h->job_dependency_index_2);
486
487         payload = chain + sizeof(*h);
488
489         switch (h->job_type) {
490         case JOB_TYPE_SET_VALUE: {
491                 struct payload_set_value *s;
492
493                 s = kbase_fetch_gpu(kctx, payload, NULL, sizeof(*s));
494                 printk("set value -> %LX (%LX)", s->out, s->unknown);
495                 kfree(s);
496                 break;
497         }
498
499         case JOB_TYPE_VERTEX:
500         case JOB_TYPE_TILER: {
501                 struct payload_vertex_tiler32 *v;
502                 int addr = 0;
503                 uint64_t *i_shader;
504                 uint8_t *shader;
505
506                 v = kbase_fetch_gpu(kctx, payload, NULL, sizeof(*v));
507
508                 if ((v->shader & 0xFFF00000) == 0x5AB00000) {
509                         printk("Job sabotaged");
510                         break;
511                 }
512
513                 /* Mask out lower 128-bit (instruction word) for flags.
514                  *
515                  * TODO: Decode flags.
516                  *
517                  * TODO: Link with cwabbotts-open-gpu-tools to
518                  * disassemble on the fly.
519                  */
520
521                 i_shader = kbase_fetch_gpu(kctx, v->shader, NULL, sizeof(u64));
522
523                 printk("%s shader @ %LX (flags %LX)\n",
524                         h->job_type == JOB_TYPE_VERTEX ? "Vertex" : "Fragment",
525                         *i_shader & ~15,
526                         *i_shader & 15);
527
528                 shader = kbase_fetch_gpu(kctx, *i_shader & ~15, NULL, 0x880 - 0x540);
529                 formatted_hex_dump("s", shader, 0x880 - 0x540);
530                 kfree(shader);
531                 kfree(i_shader);
532
533                 /* Trace attribute based on metadata */
534                 uint64_t s = v->attribute_meta;
535
536                 for(;;) {
537                         attribute_meta_t *attr_meta = kbase_fetch_gpu(kctx, s, NULL, sizeof(attribute_meta_t));
538
539                         if(!HAS_ATTRIBUTE(*attr_meta)) break;
540
541                         printk("Attribute %LX (flags %LX)",
542                                         ATTRIBUTE_NO(*attr_meta),
543                                         ATTRIBUTE_FLAGS(*attr_meta));
544
545                         kbase_trace_attribute(kctx, v->attributes + ATTRIBUTE_NO(*attr_meta) * sizeof(struct attribute_buffer));
546
547                         s += sizeof(attribute_meta_t);
548                 }
549
550                 if (h->job_type == JOB_TYPE_TILER)
551                         printk("Drawing in %s", kbase_gl_mode_name(((uint8_t *) v->block1)[8]));
552
553                 assert_gpu_zeroes(kctx, v->zeroes, 64);
554
555                 if (v->null1 | v->null2 | v->null4)
556                         printk("Null tripped?");
557
558                 printk("%cFBD", v->fbd & FBD_TYPE ? 'M' : 'S');
559                 kbase_trace_fbd(kctx, v->fbd);
560
561                 formatted_hex_dump("Block 1", (uint8_t *) v->block1, sizeof(v->block1));
562
563                 for (addr = 0; addr < 14; ++addr) {
564                         uint32_t address = ((uint32_t *) &(v->zeroes))[addr];
565                         uint8_t *buf;
566                         size_t sz = 64;
567
568                         /* Structure known. Skip hex dump */
569                         if (addr == 2) continue;
570                         if (addr == 3) continue;
571                         if (addr == 6) continue;
572                         if (addr == 10 && h->job_type == JOB_TYPE_VERTEX) continue;
573                         if (addr == 11) continue;
574                         if (addr == 12) continue;
575
576                         /* Size known exactly but not structure; cull */
577                         if (addr == 0) sz = 0x100;
578                         if (addr == 1) sz = 0x10;
579                         if (addr == 4) sz = 0x40;
580                         if (addr == 5) sz = 0x20;
581                         if (addr == 7) sz = 0x20;
582                         if (addr == 8) sz = 0x20;
583
584                         printk("Addr %d %X", addr, address);
585
586                         if (!address) continue;
587
588                         if (addr == 0) {
589                                 buf = kbase_fetch_cpu(kctx, (void *) (uint32_t) address, NULL, sz);
590                         } else {
591                                 buf = kbase_fetch_gpu(kctx, address, NULL, sz);
592                         }
593
594                         formatted_hex_dump("B", buf, sz);
595
596                         if (addr == 8) {
597                                 uint32_t sub = *((uint32_t *) buf) & 0xFFFFFFFE;
598                                 uint8_t *sbuf = kbase_fetch_gpu(kctx, sub, NULL, 64);
599
600                                 printk("---");
601                                 formatted_hex_dump("S", sbuf, 64);
602                                 kfree(sbuf);
603                         }
604
605                         if (addr == 1) {
606                                 uint64_t sub = *((uint64_t*) buf) >> 8;
607                                 uint8_t *sbuf = kbase_fetch_gpu(kctx, sub, NULL, 64);
608
609                                 printk("--- %LX", sub);
610                                 formatted_hex_dump("S", sbuf, 64);
611                                 kfree(sbuf);
612                         }
613
614                         kfree(buf);
615                 }
616
617                 formatted_hex_dump("Block 2", (uint8_t *) v->block2, sizeof(v->block2));
618
619                 kfree(v);
620                 break;
621         }
622
623         case JOB_TYPE_FRAGMENT: {
624                 struct payload_fragment *f;
625
626                 f = kbase_fetch_gpu(kctx, payload, NULL, sizeof(*f));
627
628                 /* Bit 31 of max_tile_coord clear on the first frame.
629                  * Set after.
630                  * TODO: Research.
631                  */
632
633                 printk("frag %X %X (%d, %d) -> (%d, %d), fbd type %cFBD at %LX (%LX) \n",
634                                 f->min_tile_coord, f->max_tile_coord,
635                                 TILE_COORD_X(f->min_tile_coord),
636                                 TILE_COORD_Y(f->min_tile_coord),
637                                 TILE_COORD_X(f->max_tile_coord),
638                                 TILE_COORD_Y(f->max_tile_coord),
639                                 f->fragment_fbd & FBD_TYPE ? 'M' : 'S',
640                                 f->fragment_fbd,
641                                 f->fragment_fbd & FBD_POINTER_MASK);
642
643                 kbase_trace_fbd(kctx, f->fragment_fbd);
644
645                 kfree(f);
646                 break;
647         }
648
649         default: {
650                 printk("Dumping payload %LX for job type %s",
651                                 payload,
652                                 kbase_job_type_name(h->job_type));
653
654                 gen_pay = kbase_fetch_gpu(kctx, payload, NULL, 256);
655                 formatted_hex_dump("pl", gen_pay, 256);
656                 kfree(gen_pay);
657         }
658         }
659
660         next = h->job_descriptor_size
661                 ? h->next_job._64
662                 : h->next_job._32;
663
664         kfree(h);
665
666         /* Traverse the job chain */
667         if (next)
668                 kbase_trace_hw_chain(kctx, next);
669 }
670
671 static void kbase_trace_ext_resources(
672                 struct kbase_context *kctx,
673                 int n,
674                 union kbase_pointer rsrc)
675 {
676         int i;
677         u64 *lst = kbase_fetch_cpu(kctx, rsrc.value, NULL, sizeof(u64) * n);
678
679         for (i = 0; i < n; ++i) {
680                 u64 link = lst[i];
681                 u64 addr = link & ~BASE_EXT_RES_ACCESS_EXCLUSIVE;
682                 void *rs = kbase_fetch_gpu(kctx, addr, NULL, 0);
683
684                 printk("Resource %d: %LX %s",
685                         i,
686                         addr,
687                         link & BASE_EXT_RES_ACCESS_EXCLUSIVE
688                                 ? "(exclusive)" : "(shared)");
689
690                 /* Framebuffer in fragment jobs */
691
692                 formatted_hex_dump("fb", rs, 48);
693                 kfree(rs);
694         }
695
696         kfree(lst);
697 }
698
699 extern const char *kbasep_map_core_reqs_to_string(base_jd_core_req);
700
701 static void kbase_trace_special_base_jd_atom_v2(
702                 struct kbase_context *kctx,
703                 struct base_jd_atom_v2 *v)
704 {
705         printk("%s", kbasep_map_core_reqs_to_string(v->core_req));
706
707         kbase_trace_ext_resources(kctx, v->nr_extres, v->extres_list);
708
709         if (v->core_req & BASE_JD_REQ_SOFT_JOB) {
710                 if (v->core_req & BASE_JD_REQ_SOFT_REPLAY) {
711                         struct base_jd_replay_payload *payload;
712
713                         payload = (struct base_jd_replay_payload *)
714                                 kbase_fetch_gpu(kctx, v->jc, NULL, sizeof(*payload));
715
716                         printk("tiler_jc_list = %LX, fragment_jc = %LX, "
717                                 "tiler_heap_free = %LX, fragment hierarchy mask = %hX, "
718                                 "tiler hierachy mask = %hX, hierarchy def weight %X, "
719                                 "tiler core_req = %X, fragment core_req = %X",
720                                 payload->tiler_jc_list,
721                                 payload->fragment_jc,
722                                 payload->tiler_heap_free,
723                                 payload->fragment_hierarchy_mask,
724                                 payload->tiler_hierarchy_mask,
725                                 payload->hierarchy_default_weight,
726                                 payload->tiler_core_req,
727                                 payload->fragment_core_req);
728
729                         kfree(payload);
730                 } else  {
731                         /* TODO: Soft job decoding */
732                         printk("Unknown soft job");
733                 }
734         } else {
735                 kbase_trace_hw_chain(kctx, v->jc);
736         }
737 }
738
739 /* The ioctl tracer is automatically generated by black */
740 #include "black-output-trace.c"