It's not vertex data... it's an attribute
[chai.git] / src / ktrace.c
1 /*
2  *
3  * Copyright (C) 2017 Cafe Beverage. All rights reserved.
4  *
5  * This program is free software and is provided to you under the terms of the
6  * GNU General Public License version 2 as published by the Free Software
7  * Foundation, and any use by you of this program is subject to the terms
8  * of such GNU licence.
9  *
10  * A copy of the licence is included with the program, and can also be obtained
11  * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
12  * Boston, MA  02110-1301, USA.
13  *
14  */
15
16 static void formatted_hex_dump(char *array, uint8_t *buffer, size_t s) 
17 {
18         if (!buffer) return;
19
20         print_hex_dump(KERN_DEBUG, "raw data: ", DUMP_PREFIX_ADDRESS, 16, 1, buffer, s, true);
21 }
22
23 /* Dump CPU memory by address. */
24
25 #define CPU_DUMP_SIZE 256
26
27 static void *kbase_fetch_cpu(struct kbase_context *kctx, void __user *cpu_addr, size_t *size_o, size_t size_i)
28 {
29         uint8_t *buffer;
30
31         if (!cpu_addr) return NULL;
32
33         if (!size_i) size_i = CPU_DUMP_SIZE;
34         if (size_o) *size_o = size_i;
35
36         buffer = kmalloc(size_i, GFP_KERNEL);
37
38         if (!buffer) {
39                 return NULL;
40         }
41
42         if (copy_from_user(buffer, cpu_addr, size_i) != 0) {
43                 kfree(buffer);
44                 return NULL;
45         }
46
47         return buffer;
48 }
49
50 /* Dump GPU memory by address.
51  * See mali_kbase_debug_mem_view.c for more information */
52
53 int kbase_jd_umm_map(struct kbase_context *kctx, struct kbase_va_region *reg);
54 void kbase_jd_umm_unmap(struct kbase_context *kctx, struct kbase_mem_phy_alloc *alloc);
55
56 static void *kbase_fetch_gpu(struct kbase_context *kctx, u64 gpu_addr, size_t *size_o, size_t size_i)
57 {
58         struct kbase_va_region *reg;
59         struct kbase_mem_phy_alloc *alloc;
60         uint8_t *buffer;
61         uint8_t *buffer_on;
62         int p;
63         pgprot_t prot = PAGE_KERNEL;
64         uint64_t offset;
65
66         reg = kbase_region_tracker_find_region_enclosing_address(kctx, gpu_addr);
67
68         if (!reg) {
69                 printk("Region not found!");
70                 return NULL;
71         }
72
73         if (!reg->gpu_alloc) {
74                 printk("No alloc!\n");
75                 return NULL;
76         }
77
78         offset = gpu_addr - (reg->start_pfn << PAGE_SHIFT);
79
80         if (offset < 0) {
81                 printk("What?\n");
82                 printk("GPU addr: %LX", gpu_addr);
83                 printk("start_pfn: %LX", reg->start_pfn);
84                 return NULL;
85         }
86
87         alloc = kbase_mem_phy_alloc_get(reg->gpu_alloc);
88
89         if (alloc->type == KBASE_MEM_TYPE_IMPORTED_UMM) {
90                 kbase_jd_umm_map(kctx, reg);
91         }
92
93         if (!alloc->nents) {
94                 printk("Unbacked buffer %LX\n", gpu_addr);
95                 printk("Type: %d\n", alloc->type);
96
97                 return NULL;
98         }
99
100         if (!size_i) size_i = (alloc->nents << PAGE_SHIFT) - offset;
101         if (size_o) *size_o = size_i;
102
103         if (!(reg->flags & KBASE_REG_CPU_CACHED))
104                 prot = pgprot_writecombine(prot);
105
106         buffer = kmalloc(alloc->nents << PAGE_SHIFT, GFP_KERNEL);
107
108         if (!buffer) {
109                 printk("Bad alloc");
110                 return NULL;
111         }
112
113         for (p = 0; p < alloc->nents; ++p) {
114                 struct page *page = pfn_to_page(PFN_DOWN(alloc->pages[p]));
115                 uint8_t *mapping = vmap(&page, 1, VM_MAP, prot);
116
117                 if (!mapping) {
118                         printk("Bad mapping");
119                         kfree(buffer);
120                         return NULL;
121                 }
122
123                 memcpy(buffer + (p << PAGE_SHIFT), mapping, PAGE_SIZE);
124
125                 vunmap(mapping);
126         }
127
128         if (offset) {
129                 buffer_on = kmalloc(size_i, GFP_KERNEL);
130                 memcpy(buffer_on, buffer + offset, size_i);
131                 kfree(buffer);
132
133                 return buffer_on;
134         }
135
136         if (alloc->type == KBASE_MEM_TYPE_IMPORTED_UMM) {
137                 kbase_jd_umm_unmap(kctx, alloc);
138         }
139
140         return buffer;
141 }
142
143 static void kbase_push_gpu(struct kbase_context *kctx, void *buffer, u64 gpu_addr, size_t s)
144 {
145         struct kbase_va_region *reg;
146         struct kbase_mem_phy_alloc *alloc;
147         int p;
148         pgprot_t prot = PAGE_KERNEL;
149         uint64_t offset;
150         uint32_t buffer_offset = 0;
151         bool first_page = true;
152
153         printk("Pushing %d bytes from %p to %LX", s, buffer, gpu_addr);
154
155         reg = kbase_region_tracker_find_region_enclosing_address(kctx, gpu_addr);
156
157         if (!reg) {
158                 printk("Region not found!");
159                 return;
160         }
161
162         if (!reg->gpu_alloc) {
163                 printk("No alloc!\n");
164                 return;
165         }
166
167         offset = gpu_addr - (reg->start_pfn << PAGE_SHIFT);
168
169         printk("Offset: %LX", offset);
170
171         if (offset < 0) {
172                 printk("What?\n");
173                 printk("GPU addr: %LX", gpu_addr);
174                 printk("start_pfn: %LX", reg->start_pfn);
175                 return;
176         }
177
178         alloc = kbase_mem_phy_alloc_get(reg->gpu_alloc);
179
180         if (alloc->type == KBASE_MEM_TYPE_IMPORTED_UMM) {
181                 kbase_jd_umm_map(kctx, reg);
182         }
183
184         if (!alloc->nents) {
185                 printk("Unbacked buffer %LX\n", gpu_addr);
186                 printk("Type: %d\n", alloc->type);
187                 return;
188         }
189
190         if (!(reg->flags & KBASE_REG_CPU_CACHED))
191                 prot = pgprot_writecombine(prot);
192
193         /* Copy page-by-page */
194
195         for (p = offset >> PAGE_SHIFT; p < ((offset + s + PAGE_SIZE - 1) >> PAGE_SHIFT); ++p) {
196                 struct page *page;
197                 uint8_t *mapping;
198
199                 int copy_offset = p << PAGE_SHIFT, copy_quant = PAGE_SIZE;
200
201                 page = pfn_to_page(PFN_DOWN(alloc->pages[p]));
202                 mapping = vmap(&page, 1, VM_MAP, prot);
203
204                 if (!mapping) {
205                         printk("Bad mapping");
206                         return;
207                 }
208
209                 if(first_page) {
210                         copy_offset += offset & ~PAGE_MASK;
211                         copy_quant = PAGE_SIZE - copy_offset;
212
213                         first_page = false;
214                 }
215
216                 if(buffer_offset + copy_quant > s) {
217                         copy_quant = s - buffer_offset;
218                 }
219
220                 printk("Copying %d bytes to offset %d from offset %d",
221                                 copy_quant, copy_offset, buffer_offset);
222
223                 memcpy(mapping + copy_offset,
224                                 buffer + buffer_offset,
225                                 copy_quant);
226
227                 buffer_offset += copy_quant;
228
229                 vunmap(mapping);
230         }
231
232         if (alloc->type == KBASE_MEM_TYPE_IMPORTED_UMM) {
233                 kbase_jd_umm_unmap(kctx, alloc);
234         }
235 }
236
237 /* Assert that synthesised command stream is bit-identical with trace */
238
239 static void assert_gpu_same(struct kbase_context *kctx, uint64_t addr, size_t s, uint8_t *synth) 
240 {
241         uint8_t *buffer = kbase_fetch_gpu(kctx, addr, NULL, s);
242         int i;
243
244         if (!buffer) {
245                 printk("Bad allocation in assert %LX", addr);
246                 return;
247         }
248
249         for (i = 0; i < s; ++i) {
250                 if (buffer[i] != synth[i]) {
251                         printk("At %LX, expected:", addr);
252                         formatted_hex_dump("b", synth, s);
253                         printk("Instead got:");
254                         formatted_hex_dump("b", buffer, s);
255
256                         break;
257                 }
258         }
259
260         kfree(buffer);
261 }
262
263 static void assert_gpu_zeroes(struct kbase_context *kctx, uint64_t addr, size_t s) 
264 {
265         char *zero = kzalloc(s, GFP_KERNEL);
266
267         assert_gpu_same(kctx, addr, s, zero);
268         kfree(zero);
269 }
270
271 static void quick_dump_gpu(struct kbase_context *kctx, uint64_t addr, size_t s)
272 {
273         uint8_t *buf;
274
275         if(!addr) {
276                 printk("Null quick dump");
277                 return;
278         }
279
280         buf = kbase_fetch_gpu(kctx, addr, NULL, s);
281
282         printk("Quick GPU dump (%LX)", addr);
283
284         if(!buf) {
285                 printk("Not found");
286                 return;
287         }
288
289         formatted_hex_dump("a", buf, s);
290         kfree(buf);
291 }
292
293 #include "chai-notes.h"
294
295 #define DEFINE_CASE(label) case label: return #label;
296
297 static char *kbase_job_type_name(int type)
298 {
299         switch (type) {
300                 DEFINE_CASE(JOB_NOT_STARTED)
301                 DEFINE_CASE(JOB_TYPE_NULL)
302                 DEFINE_CASE(JOB_TYPE_SET_VALUE)
303                 DEFINE_CASE(JOB_TYPE_CACHE_FLUSH)
304                 DEFINE_CASE(JOB_TYPE_COMPUTE)
305                 DEFINE_CASE(JOB_TYPE_VERTEX)
306                 DEFINE_CASE(JOB_TYPE_TILER)
307                 DEFINE_CASE(JOB_TYPE_FUSED)
308                 DEFINE_CASE(JOB_TYPE_FRAGMENT)
309
310                 default:
311                         printk("Requested job type %X", type);
312                         return "UNKNOWN";
313         }
314 }
315
316 static char* kbase_gl_mode_name(uint8_t b) 
317 {
318         switch (b) {
319                 DEFINE_CASE(CHAI_POINTS)
320                 DEFINE_CASE(CHAI_LINES)
321                 DEFINE_CASE(CHAI_TRIANGLES)
322                 DEFINE_CASE(CHAI_TRIANGLE_STRIP)
323                 DEFINE_CASE(CHAI_TRIANGLE_FAN)
324
325                 default:
326                         printk("Unknown mode %X", b);
327                         return "GL_UNKNOWN";
328         }
329 }
330
331 /* TODO: Figure out what "fbd" means */
332 /* TODO: Corresponding SFBD decode (don't assume MFBD) */
333
334 static void kbase_trace_fbd(struct kbase_context *kctx, uint32_t fbd)
335 {
336         struct tentative_mfbd *mfbd = (struct tentative_mfbd *) kbase_fetch_gpu(kctx, (uint64_t) (uint32_t) fbd & FBD_POINTER_MASK, NULL, sizeof(struct tentative_mfbd));
337         uint8_t *buf;
338         uint32_t *buf32;
339
340         printk("MFBD @ %X (%X)", fbd & FBD_POINTER_MASK, fbd & ~FBD_POINTER_MASK);
341         printk("MFBD flags %X, heap free address %LX",
342                         mfbd->flags, mfbd->heap_free_address);
343
344         formatted_hex_dump("Block 1", (uint8_t *) mfbd->block1, sizeof(mfbd->block1));
345
346         printk("unk2");
347         buf = kbase_fetch_gpu(kctx, mfbd->unknown2, NULL, 64);
348         formatted_hex_dump("B", buf, 64);
349         kfree(buf);
350
351         assert_gpu_zeroes(kctx, mfbd->block2[0], 64);
352         assert_gpu_zeroes(kctx, mfbd->block2[1], 64);
353         assert_gpu_zeroes(kctx, mfbd->ugaT, 64);
354         assert_gpu_zeroes(kctx, mfbd->unknown_gpu_address, 64);
355
356         /* Somehow maybe sort of kind of framebufferish?
357          * It changes predictably in the same way as the FB.
358          * Unclear what exactly it is, though.
359          *
360          * Where the framebuffer is: 1A 33 00 00
361          * This is: 71 B3 03 71 6C 4D 87 46
362          * Where the framebuffer is: 1A 33 1A 00
363          * This is: AB E4 43 9C E8 D6 D1 25
364          *
365          * It repeats, too, but everything 8 bytes rather than 4.
366          *
367          * It is a function of the colour painted. But the exact details
368          * are elusive.
369          *
370          * Also, this is an output, not an input.
371          * Assuming the framebuffer works as intended, RE may be
372          * pointless.
373          */
374
375         printk("ugaT %LX, uga %LX", mfbd->ugaT, mfbd->unknown_gpu_address);
376         printk("ugan %LX", mfbd->unknown_gpu_addressN);
377         buf = kbase_fetch_gpu(kctx, mfbd->unknown_gpu_addressN, NULL, 64);
378         formatted_hex_dump("B", buf, 64);
379         kfree(buf);
380
381         printk("unk1 %X, b1 %LX, b2 %LX, unk2 %LX, unk3 %LX, blah %LX",
382                         mfbd->unknown1,
383                         mfbd->block2[0],
384                         mfbd->block2[1],
385                         mfbd->unknown2,
386                         mfbd->unknown3,
387                         mfbd->blah);
388
389         printk("Weights [ %X, %X, %X, %X, %X, %X, %X, %X ]",
390                         mfbd->weights[0], mfbd->weights[1],
391                         mfbd->weights[2], mfbd->weights[3],
392                         mfbd->weights[4], mfbd->weights[5],
393                         mfbd->weights[6], mfbd->weights[7]);
394
395         formatted_hex_dump("Block 3", (uint8_t *) mfbd->block3, sizeof(mfbd->block3));
396         printk("---");
397         formatted_hex_dump("Block 4", (uint8_t *) mfbd->block4, sizeof(mfbd->block4));
398
399         printk("--- (seriously though) --- %X", mfbd->block3[4]);
400         buf32 = kbase_fetch_gpu(kctx, mfbd->block3[4], NULL, 128);
401         
402         if(buf32) {
403                 formatted_hex_dump("a", (uint8_t*) buf32, 128);
404
405                 quick_dump_gpu(kctx, buf32[6], 64);
406                 quick_dump_gpu(kctx, buf32[20], 64);
407                 quick_dump_gpu(kctx, buf32[23], 64);
408                 quick_dump_gpu(kctx, buf32[24], 64);
409                 quick_dump_gpu(kctx, buf32[25], 64);
410                 quick_dump_gpu(kctx, buf32[26], 64);
411                 quick_dump_gpu(kctx, buf32[27], 64);
412                 quick_dump_gpu(kctx, buf32[28], 64);
413                 quick_dump_gpu(kctx, buf32[31], 64);
414
415                 kfree(buf32);
416         }
417
418         quick_dump_gpu(kctx, mfbd->block3[16], 128);
419
420         kfree(mfbd);
421 }
422
423 static void kbase_trace_vecN_as_uint32_hex(uint32_t *p, size_t count)
424 {
425         if(count == 1) 
426                 printk("\t<%X>,", p[0]);
427         else if(count == 2)
428                 printk("\t<%X, %X>,", p[0], p[1]);
429         else if(count == 3)
430                 printk("\t<%X, %X, %X>,", p[0], p[1], p[2]);
431         else if(count == 4)
432                 printk("\t<%X, %X, %X, %X>,", p[0], p[1], p[2], p[3]);
433         else
434                 printk("Cannot print vec%d", count);
435 }
436
437 static void kbase_trace_attribute(struct kbase_context *kctx, uint64_t address)
438 {
439         uint64_t raw;
440         uint64_t flags;
441         int vertex_count;
442         int component_count;
443         uint32_t *v;
444         uint32_t *p;
445         int i;
446
447         struct attribute_buffer *vb =
448                 (struct attribute_buffer *) kbase_fetch_gpu(
449                                 kctx, address,
450                                 NULL, sizeof(struct attribute_buffer));
451         if (!vb) return;
452
453         vertex_count = vb->total_size / vb->element_size;
454         component_count = vb->element_size / sizeof(float);
455
456         raw = vb->elements & ~3;
457         flags = vb->elements ^ raw;
458
459         p = v = kbase_fetch_gpu(kctx, raw, NULL, vb->total_size);
460
461         printk("attribute vec%d mem%LXflag%LX = {", component_count, raw, flags);
462
463         for (i = 0; i < vertex_count; i++, p += component_count)
464                 kbase_trace_vecN_as_uint32_hex(p, component_count);
465
466         printk("}");
467
468         kbase_push_gpu(kctx, v, raw, vb->total_size);
469
470         kfree(vb);
471 }
472
473 static void kbase_trace_hw_chain(struct kbase_context *kctx, uint64_t chain)
474 {
475         struct job_descriptor_header *h;
476         uint8_t *gen_pay;
477         u64 next;
478         u64 payload;
479
480         /* Trace descriptor */
481         h = kbase_fetch_gpu(kctx, chain, NULL, sizeof(*h));
482
483         printk("%s job, %d-bit, status %X, incomplete %X, fault %LX, barrier %d, index %hX, dependencies (%hX, %hX)",
484                         kbase_job_type_name(h->job_type),
485                         h->job_descriptor_size ? 64 : 32,
486                         h->exception_status,
487                         h->first_incomplete_task,
488                         h->fault_pointer,
489                         h->job_barrier,
490                         h->job_index,
491                         h->job_dependency_index_1,
492                         h->job_dependency_index_2);
493
494         payload = chain + sizeof(*h);
495
496         switch (h->job_type) {
497         case JOB_TYPE_SET_VALUE: {
498                 struct payload_set_value *s;
499
500                 s = kbase_fetch_gpu(kctx, payload, NULL, sizeof(*s));
501                 printk("set value -> %LX (%LX)", s->out, s->unknown);
502                 kfree(s);
503                 break;
504         }
505
506         case JOB_TYPE_VERTEX:
507         case JOB_TYPE_TILER: {
508                 struct payload_vertex_tiler32 *v;
509                 int addr = 0;
510                 uint64_t *i_shader;
511                 uint8_t *shader;
512
513                 v = kbase_fetch_gpu(kctx, payload, NULL, sizeof(*v));
514
515                 if ((v->shader & 0xFFF00000) == 0x5AB00000) {
516                         printk("Job sabotaged");
517                         break;
518                 }
519
520                 /* Mask out lower 128-bit (instruction word) for flags.
521                  *
522                  * TODO: Decode flags.
523                  *
524                  * TODO: Link with cwabbotts-open-gpu-tools to
525                  * disassemble on the fly.
526                  */
527
528                 i_shader = kbase_fetch_gpu(kctx, v->shader, NULL, sizeof(u64));
529
530                 printk("%s shader @ %LX (flags %LX)\n",
531                         h->job_type == JOB_TYPE_VERTEX ? "Vertex" : "Fragment",
532                         *i_shader & ~15,
533                         *i_shader & 15);
534
535                 shader = kbase_fetch_gpu(kctx, *i_shader & ~15, NULL, 0x880 - 0x540);
536                 formatted_hex_dump("s", shader, 0x880 - 0x540);
537                 kfree(shader);
538                 kfree(i_shader);
539
540                 kbase_trace_attribute(kctx, v->vertices);
541
542                 if (h->job_type == JOB_TYPE_TILER)
543                         printk("Drawing in %s", kbase_gl_mode_name(((uint8_t *) v->block1)[8]));
544
545                 assert_gpu_zeroes(kctx, v->zeroes, 64);
546
547                 if (v->null1 | v->null2 | v->null4)
548                         printk("Null tripped?");
549
550                 printk("%cFBD", v->fbd & FBD_TYPE ? 'M' : 'S');
551                 kbase_trace_fbd(kctx, v->fbd);
552
553                 formatted_hex_dump("Block 1", (uint8_t *) v->block1, sizeof(v->block1));
554
555                 for (addr = 0; addr < 14; ++addr) {
556                         uint32_t address = ((uint32_t *) &(v->zeroes))[addr];
557                         uint8_t *buf;
558                         size_t sz = 64;
559
560                         /* Structure known. Skip hex dump */
561                         if (addr == 2) continue;
562                         if (addr == 3) continue;
563                         if (addr == 6) continue;
564                         if (addr == 10 && h->job_type == JOB_TYPE_VERTEX) continue;
565                         if (addr == 11) continue;
566                         if (addr == 12) continue;
567
568                         /* Size known exactly but not structure; cull */
569                         if (addr == 0) sz = 0x100;
570                         if (addr == 1) sz = 0x10;
571                         if (addr == 4) sz = 0x40;
572                         if (addr == 5) sz = 0x20;
573                         if (addr == 7) sz = 0x10;
574                         if (addr == 8) sz = 0x20;
575
576                         printk("Addr %d %X", addr, address);
577
578                         if (!address) continue;
579
580                         if (addr == 0) {
581                                 buf = kbase_fetch_cpu(kctx, (void *) (uint32_t) address, NULL, sz);
582                         } else {
583                                 buf = kbase_fetch_gpu(kctx, address, NULL, sz);
584                         }
585
586                         formatted_hex_dump("B", buf, sz);
587
588                         if (addr == 8) {
589                                 uint32_t sub = *((uint32_t *) buf) & 0xFFFFFFFE;
590                                 uint8_t *sbuf = kbase_fetch_gpu(kctx, sub, NULL, 64);
591
592                                 printk("---");
593                                 formatted_hex_dump("S", sbuf, 64);
594                                 kfree(sbuf);
595                         }
596
597                         if (addr == 1) {
598                                 uint64_t sub = *((uint64_t*) buf) >> 8;
599                                 uint8_t *sbuf = kbase_fetch_gpu(kctx, sub, NULL, 64);
600
601                                 printk("--- %LX", sub);
602                                 formatted_hex_dump("S", sbuf, 64);
603                                 kfree(sbuf);
604                         }
605
606                         kfree(buf);
607                 }
608
609                 formatted_hex_dump("Block 2", (uint8_t *) v->block2, sizeof(v->block2));
610
611                 kfree(v);
612                 break;
613         }
614
615         case JOB_TYPE_FRAGMENT: {
616                 struct payload_fragment *f;
617
618                 f = kbase_fetch_gpu(kctx, payload, NULL, sizeof(*f));
619
620                 /* Bit 31 of max_tile_coord clear on the first frame.
621                  * Set after.
622                  * TODO: Research.
623                  */
624
625                 printk("frag %X %X (%d, %d) -> (%d, %d), fbd type %cFBD at %LX (%LX) \n",
626                                 f->min_tile_coord, f->max_tile_coord,
627                                 TILE_COORD_X(f->min_tile_coord),
628                                 TILE_COORD_Y(f->min_tile_coord),
629                                 TILE_COORD_X(f->max_tile_coord),
630                                 TILE_COORD_Y(f->max_tile_coord),
631                                 f->fragment_fbd & FBD_TYPE ? 'M' : 'S',
632                                 f->fragment_fbd,
633                                 f->fragment_fbd & FBD_POINTER_MASK);
634
635                 kbase_trace_fbd(kctx, f->fragment_fbd);
636
637                 kfree(f);
638                 break;
639         }
640
641         default: {
642                 printk("Dumping payload %LX for job type %s",
643                                 payload,
644                                 kbase_job_type_name(h->job_type));
645
646                 gen_pay = kbase_fetch_gpu(kctx, payload, NULL, 256);
647                 formatted_hex_dump("pl", gen_pay, 256);
648                 kfree(gen_pay);
649         }
650         }
651
652         next = h->job_descriptor_size
653                 ? h->next_job._64
654                 : h->next_job._32;
655
656         kfree(h);
657
658         /* Traverse the job chain */
659         if (next)
660                 kbase_trace_hw_chain(kctx, next);
661 }
662
663 static void kbase_trace_ext_resources(
664                 struct kbase_context *kctx,
665                 int n,
666                 union kbase_pointer rsrc)
667 {
668         int i;
669         u64 *lst = kbase_fetch_cpu(kctx, rsrc.value, NULL, sizeof(u64) * n);
670
671         for (i = 0; i < n; ++i) {
672                 u64 link = lst[i];
673                 u64 addr = link & ~BASE_EXT_RES_ACCESS_EXCLUSIVE;
674                 void *rs = kbase_fetch_gpu(kctx, addr, NULL, 0);
675
676                 printk("Resource %d: %LX %s",
677                         i,
678                         addr,
679                         link & BASE_EXT_RES_ACCESS_EXCLUSIVE
680                                 ? "(exclusive)" : "(shared)");
681
682                 /* Framebuffer in fragment jobs */
683
684                 formatted_hex_dump("fb", rs, 48);
685                 kfree(rs);
686         }
687
688         kfree(lst);
689 }
690
691 extern const char *kbasep_map_core_reqs_to_string(base_jd_core_req);
692
693 static void kbase_trace_special_base_jd_atom_v2(
694                 struct kbase_context *kctx,
695                 struct base_jd_atom_v2 *v)
696 {
697         printk("%s", kbasep_map_core_reqs_to_string(v->core_req));
698
699         kbase_trace_ext_resources(kctx, v->nr_extres, v->extres_list);
700
701         if (v->core_req & BASE_JD_REQ_SOFT_JOB) {
702                 if (v->core_req & BASE_JD_REQ_SOFT_REPLAY) {
703                         struct base_jd_replay_payload *payload;
704
705                         payload = (struct base_jd_replay_payload *)
706                                 kbase_fetch_gpu(kctx, v->jc, NULL, sizeof(*payload));
707
708                         printk("tiler_jc_list = %LX, fragment_jc = %LX, "
709                                 "tiler_heap_free = %LX, fragment hierarchy mask = %hX, "
710                                 "tiler hierachy mask = %hX, hierarchy def weight %X, "
711                                 "tiler core_req = %X, fragment core_req = %X",
712                                 payload->tiler_jc_list,
713                                 payload->fragment_jc,
714                                 payload->tiler_heap_free,
715                                 payload->fragment_hierarchy_mask,
716                                 payload->tiler_hierarchy_mask,
717                                 payload->hierarchy_default_weight,
718                                 payload->tiler_core_req,
719                                 payload->fragment_core_req);
720
721                         kfree(payload);
722                 } else  {
723                         /* TODO: Soft job decoding */
724                         printk("Unknown soft job");
725                 }
726         } else {
727                 kbase_trace_hw_chain(kctx, v->jc);
728         }
729 }
730
731 /* The ioctl tracer is automatically generated by black */
732 #include "black-output-trace.c"