1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2015-2018 Etnaviv Project
7 #include <linux/component.h>
8 #include <linux/delay.h>
9 #include <linux/dma-fence.h>
10 #include <linux/dma-mapping.h>
11 #include <linux/module.h>
12 #include <linux/of_device.h>
13 #include <linux/platform_device.h>
14 #include <linux/pm_runtime.h>
15 #include <linux/regulator/consumer.h>
16 #include <linux/thermal.h>
18 #include "etnaviv_cmdbuf.h"
19 #include "etnaviv_dump.h"
20 #include "etnaviv_gpu.h"
21 #include "etnaviv_gem.h"
22 #include "etnaviv_mmu.h"
23 #include "etnaviv_perfmon.h"
24 #include "etnaviv_sched.h"
25 #include "common.xml.h"
26 #include "state.xml.h"
27 #include "state_hi.xml.h"
28 #include "cmdstream.xml.h"
34 static const struct platform_device_id gpu_ids[] = {
35 { .name = "etnaviv-gpu,2d" },
43 int etnaviv_gpu_get_param(struct etnaviv_gpu *gpu, u32 param, u64 *value)
45 struct etnaviv_drm_private *priv = gpu->drm->dev_private;
48 case ETNAVIV_PARAM_GPU_MODEL:
49 *value = gpu->identity.model;
52 case ETNAVIV_PARAM_GPU_REVISION:
53 *value = gpu->identity.revision;
56 case ETNAVIV_PARAM_GPU_FEATURES_0:
57 *value = gpu->identity.features;
60 case ETNAVIV_PARAM_GPU_FEATURES_1:
61 *value = gpu->identity.minor_features0;
64 case ETNAVIV_PARAM_GPU_FEATURES_2:
65 *value = gpu->identity.minor_features1;
68 case ETNAVIV_PARAM_GPU_FEATURES_3:
69 *value = gpu->identity.minor_features2;
72 case ETNAVIV_PARAM_GPU_FEATURES_4:
73 *value = gpu->identity.minor_features3;
76 case ETNAVIV_PARAM_GPU_FEATURES_5:
77 *value = gpu->identity.minor_features4;
80 case ETNAVIV_PARAM_GPU_FEATURES_6:
81 *value = gpu->identity.minor_features5;
84 case ETNAVIV_PARAM_GPU_FEATURES_7:
85 *value = gpu->identity.minor_features6;
88 case ETNAVIV_PARAM_GPU_FEATURES_8:
89 *value = gpu->identity.minor_features7;
92 case ETNAVIV_PARAM_GPU_FEATURES_9:
93 *value = gpu->identity.minor_features8;
96 case ETNAVIV_PARAM_GPU_FEATURES_10:
97 *value = gpu->identity.minor_features9;
100 case ETNAVIV_PARAM_GPU_FEATURES_11:
101 *value = gpu->identity.minor_features10;
104 case ETNAVIV_PARAM_GPU_FEATURES_12:
105 *value = gpu->identity.minor_features11;
108 case ETNAVIV_PARAM_GPU_STREAM_COUNT:
109 *value = gpu->identity.stream_count;
112 case ETNAVIV_PARAM_GPU_REGISTER_MAX:
113 *value = gpu->identity.register_max;
116 case ETNAVIV_PARAM_GPU_THREAD_COUNT:
117 *value = gpu->identity.thread_count;
120 case ETNAVIV_PARAM_GPU_VERTEX_CACHE_SIZE:
121 *value = gpu->identity.vertex_cache_size;
124 case ETNAVIV_PARAM_GPU_SHADER_CORE_COUNT:
125 *value = gpu->identity.shader_core_count;
128 case ETNAVIV_PARAM_GPU_PIXEL_PIPES:
129 *value = gpu->identity.pixel_pipes;
132 case ETNAVIV_PARAM_GPU_VERTEX_OUTPUT_BUFFER_SIZE:
133 *value = gpu->identity.vertex_output_buffer_size;
136 case ETNAVIV_PARAM_GPU_BUFFER_SIZE:
137 *value = gpu->identity.buffer_size;
140 case ETNAVIV_PARAM_GPU_INSTRUCTION_COUNT:
141 *value = gpu->identity.instruction_count;
144 case ETNAVIV_PARAM_GPU_NUM_CONSTANTS:
145 *value = gpu->identity.num_constants;
148 case ETNAVIV_PARAM_GPU_NUM_VARYINGS:
149 *value = gpu->identity.varyings_count;
152 case ETNAVIV_PARAM_SOFTPIN_START_ADDR:
153 if (priv->mmu_global->version == ETNAVIV_IOMMU_V2)
154 *value = ETNAVIV_SOFTPIN_START_ADDRESS;
160 DBG("%s: invalid param: %u", dev_name(gpu->dev), param);
168 #define etnaviv_is_model_rev(gpu, mod, rev) \
169 ((gpu)->identity.model == chipModel_##mod && \
170 (gpu)->identity.revision == rev)
171 #define etnaviv_field(val, field) \
172 (((val) & field##__MASK) >> field##__SHIFT)
174 static void etnaviv_hw_specs(struct etnaviv_gpu *gpu)
176 if (gpu->identity.minor_features0 &
177 chipMinorFeatures0_MORE_MINOR_FEATURES) {
179 unsigned int streams;
181 specs[0] = gpu_read(gpu, VIVS_HI_CHIP_SPECS);
182 specs[1] = gpu_read(gpu, VIVS_HI_CHIP_SPECS_2);
183 specs[2] = gpu_read(gpu, VIVS_HI_CHIP_SPECS_3);
184 specs[3] = gpu_read(gpu, VIVS_HI_CHIP_SPECS_4);
186 gpu->identity.stream_count = etnaviv_field(specs[0],
187 VIVS_HI_CHIP_SPECS_STREAM_COUNT);
188 gpu->identity.register_max = etnaviv_field(specs[0],
189 VIVS_HI_CHIP_SPECS_REGISTER_MAX);
190 gpu->identity.thread_count = etnaviv_field(specs[0],
191 VIVS_HI_CHIP_SPECS_THREAD_COUNT);
192 gpu->identity.vertex_cache_size = etnaviv_field(specs[0],
193 VIVS_HI_CHIP_SPECS_VERTEX_CACHE_SIZE);
194 gpu->identity.shader_core_count = etnaviv_field(specs[0],
195 VIVS_HI_CHIP_SPECS_SHADER_CORE_COUNT);
196 gpu->identity.pixel_pipes = etnaviv_field(specs[0],
197 VIVS_HI_CHIP_SPECS_PIXEL_PIPES);
198 gpu->identity.vertex_output_buffer_size =
199 etnaviv_field(specs[0],
200 VIVS_HI_CHIP_SPECS_VERTEX_OUTPUT_BUFFER_SIZE);
202 gpu->identity.buffer_size = etnaviv_field(specs[1],
203 VIVS_HI_CHIP_SPECS_2_BUFFER_SIZE);
204 gpu->identity.instruction_count = etnaviv_field(specs[1],
205 VIVS_HI_CHIP_SPECS_2_INSTRUCTION_COUNT);
206 gpu->identity.num_constants = etnaviv_field(specs[1],
207 VIVS_HI_CHIP_SPECS_2_NUM_CONSTANTS);
209 gpu->identity.varyings_count = etnaviv_field(specs[2],
210 VIVS_HI_CHIP_SPECS_3_VARYINGS_COUNT);
212 /* This overrides the value from older register if non-zero */
213 streams = etnaviv_field(specs[3],
214 VIVS_HI_CHIP_SPECS_4_STREAM_COUNT);
216 gpu->identity.stream_count = streams;
219 /* Fill in the stream count if not specified */
220 if (gpu->identity.stream_count == 0) {
221 if (gpu->identity.model >= 0x1000)
222 gpu->identity.stream_count = 4;
224 gpu->identity.stream_count = 1;
227 /* Convert the register max value */
228 if (gpu->identity.register_max)
229 gpu->identity.register_max = 1 << gpu->identity.register_max;
230 else if (gpu->identity.model == chipModel_GC400)
231 gpu->identity.register_max = 32;
233 gpu->identity.register_max = 64;
235 /* Convert thread count */
236 if (gpu->identity.thread_count)
237 gpu->identity.thread_count = 1 << gpu->identity.thread_count;
238 else if (gpu->identity.model == chipModel_GC400)
239 gpu->identity.thread_count = 64;
240 else if (gpu->identity.model == chipModel_GC500 ||
241 gpu->identity.model == chipModel_GC530)
242 gpu->identity.thread_count = 128;
244 gpu->identity.thread_count = 256;
246 if (gpu->identity.vertex_cache_size == 0)
247 gpu->identity.vertex_cache_size = 8;
249 if (gpu->identity.shader_core_count == 0) {
250 if (gpu->identity.model >= 0x1000)
251 gpu->identity.shader_core_count = 2;
253 gpu->identity.shader_core_count = 1;
256 if (gpu->identity.pixel_pipes == 0)
257 gpu->identity.pixel_pipes = 1;
259 /* Convert virtex buffer size */
260 if (gpu->identity.vertex_output_buffer_size) {
261 gpu->identity.vertex_output_buffer_size =
262 1 << gpu->identity.vertex_output_buffer_size;
263 } else if (gpu->identity.model == chipModel_GC400) {
264 if (gpu->identity.revision < 0x4000)
265 gpu->identity.vertex_output_buffer_size = 512;
266 else if (gpu->identity.revision < 0x4200)
267 gpu->identity.vertex_output_buffer_size = 256;
269 gpu->identity.vertex_output_buffer_size = 128;
271 gpu->identity.vertex_output_buffer_size = 512;
274 switch (gpu->identity.instruction_count) {
276 if (etnaviv_is_model_rev(gpu, GC2000, 0x5108) ||
277 gpu->identity.model == chipModel_GC880)
278 gpu->identity.instruction_count = 512;
280 gpu->identity.instruction_count = 256;
284 gpu->identity.instruction_count = 1024;
288 gpu->identity.instruction_count = 2048;
292 gpu->identity.instruction_count = 256;
296 if (gpu->identity.num_constants == 0)
297 gpu->identity.num_constants = 168;
299 if (gpu->identity.varyings_count == 0) {
300 if (gpu->identity.minor_features1 & chipMinorFeatures1_HALTI0)
301 gpu->identity.varyings_count = 12;
303 gpu->identity.varyings_count = 8;
307 * For some cores, two varyings are consumed for position, so the
308 * maximum varying count needs to be reduced by one.
310 if (etnaviv_is_model_rev(gpu, GC5000, 0x5434) ||
311 etnaviv_is_model_rev(gpu, GC4000, 0x5222) ||
312 etnaviv_is_model_rev(gpu, GC4000, 0x5245) ||
313 etnaviv_is_model_rev(gpu, GC4000, 0x5208) ||
314 etnaviv_is_model_rev(gpu, GC3000, 0x5435) ||
315 etnaviv_is_model_rev(gpu, GC2200, 0x5244) ||
316 etnaviv_is_model_rev(gpu, GC2100, 0x5108) ||
317 etnaviv_is_model_rev(gpu, GC2000, 0x5108) ||
318 etnaviv_is_model_rev(gpu, GC1500, 0x5246) ||
319 etnaviv_is_model_rev(gpu, GC880, 0x5107) ||
320 etnaviv_is_model_rev(gpu, GC880, 0x5106))
321 gpu->identity.varyings_count -= 1;
324 static void etnaviv_hw_identify(struct etnaviv_gpu *gpu)
328 chipIdentity = gpu_read(gpu, VIVS_HI_CHIP_IDENTITY);
330 /* Special case for older graphic cores. */
331 if (etnaviv_field(chipIdentity, VIVS_HI_CHIP_IDENTITY_FAMILY) == 0x01) {
332 gpu->identity.model = chipModel_GC500;
333 gpu->identity.revision = etnaviv_field(chipIdentity,
334 VIVS_HI_CHIP_IDENTITY_REVISION);
336 u32 chipDate = gpu_read(gpu, VIVS_HI_CHIP_DATE);
338 gpu->identity.model = gpu_read(gpu, VIVS_HI_CHIP_MODEL);
339 gpu->identity.revision = gpu_read(gpu, VIVS_HI_CHIP_REV);
340 gpu->identity.customer_id = gpu_read(gpu, VIVS_HI_CHIP_CUSTOMER_ID);
343 * Reading these two registers on GC600 rev 0x19 result in a
344 * unhandled fault: external abort on non-linefetch
346 if (!etnaviv_is_model_rev(gpu, GC600, 0x19)) {
347 gpu->identity.product_id = gpu_read(gpu, VIVS_HI_CHIP_PRODUCT_ID);
348 gpu->identity.eco_id = gpu_read(gpu, VIVS_HI_CHIP_ECO_ID);
352 * !!!! HACK ALERT !!!!
353 * Because people change device IDs without letting software
354 * know about it - here is the hack to make it all look the
355 * same. Only for GC400 family.
357 if ((gpu->identity.model & 0xff00) == 0x0400 &&
358 gpu->identity.model != chipModel_GC420) {
359 gpu->identity.model = gpu->identity.model & 0x0400;
362 /* Another special case */
363 if (etnaviv_is_model_rev(gpu, GC300, 0x2201)) {
364 u32 chipTime = gpu_read(gpu, VIVS_HI_CHIP_TIME);
366 if (chipDate == 0x20080814 && chipTime == 0x12051100) {
368 * This IP has an ECO; put the correct
371 gpu->identity.revision = 0x1051;
376 * NXP likes to call the GPU on the i.MX6QP GC2000+, but in
377 * reality it's just a re-branded GC3000. We can identify this
378 * core by the upper half of the revision register being all 1.
379 * Fix model/rev here, so all other places can refer to this
380 * core by its real identity.
382 if (etnaviv_is_model_rev(gpu, GC2000, 0xffff5450)) {
383 gpu->identity.model = chipModel_GC3000;
384 gpu->identity.revision &= 0xffff;
387 if (etnaviv_is_model_rev(gpu, GC1000, 0x5037) && (chipDate == 0x20120617))
388 gpu->identity.eco_id = 1;
390 if (etnaviv_is_model_rev(gpu, GC320, 0x5303) && (chipDate == 0x20140511))
391 gpu->identity.eco_id = 1;
394 dev_info(gpu->dev, "model: GC%x, revision: %x\n",
395 gpu->identity.model, gpu->identity.revision);
397 gpu->idle_mask = ~VIVS_HI_IDLE_STATE_AXI_LP;
399 * If there is a match in the HWDB, we aren't interested in the
400 * remaining register values, as they might be wrong.
402 if (etnaviv_fill_identity_from_hwdb(gpu))
405 gpu->identity.features = gpu_read(gpu, VIVS_HI_CHIP_FEATURE);
407 /* Disable fast clear on GC700. */
408 if (gpu->identity.model == chipModel_GC700)
409 gpu->identity.features &= ~chipFeatures_FAST_CLEAR;
411 if ((gpu->identity.model == chipModel_GC500 &&
412 gpu->identity.revision < 2) ||
413 (gpu->identity.model == chipModel_GC300 &&
414 gpu->identity.revision < 0x2000)) {
417 * GC500 rev 1.x and GC300 rev < 2.0 doesn't have these
420 gpu->identity.minor_features0 = 0;
421 gpu->identity.minor_features1 = 0;
422 gpu->identity.minor_features2 = 0;
423 gpu->identity.minor_features3 = 0;
424 gpu->identity.minor_features4 = 0;
425 gpu->identity.minor_features5 = 0;
427 gpu->identity.minor_features0 =
428 gpu_read(gpu, VIVS_HI_CHIP_MINOR_FEATURE_0);
430 if (gpu->identity.minor_features0 &
431 chipMinorFeatures0_MORE_MINOR_FEATURES) {
432 gpu->identity.minor_features1 =
433 gpu_read(gpu, VIVS_HI_CHIP_MINOR_FEATURE_1);
434 gpu->identity.minor_features2 =
435 gpu_read(gpu, VIVS_HI_CHIP_MINOR_FEATURE_2);
436 gpu->identity.minor_features3 =
437 gpu_read(gpu, VIVS_HI_CHIP_MINOR_FEATURE_3);
438 gpu->identity.minor_features4 =
439 gpu_read(gpu, VIVS_HI_CHIP_MINOR_FEATURE_4);
440 gpu->identity.minor_features5 =
441 gpu_read(gpu, VIVS_HI_CHIP_MINOR_FEATURE_5);
444 /* GC600 idle register reports zero bits where modules aren't present */
445 if (gpu->identity.model == chipModel_GC600)
446 gpu->idle_mask = VIVS_HI_IDLE_STATE_TX |
447 VIVS_HI_IDLE_STATE_RA |
448 VIVS_HI_IDLE_STATE_SE |
449 VIVS_HI_IDLE_STATE_PA |
450 VIVS_HI_IDLE_STATE_SH |
451 VIVS_HI_IDLE_STATE_PE |
452 VIVS_HI_IDLE_STATE_DE |
453 VIVS_HI_IDLE_STATE_FE;
455 etnaviv_hw_specs(gpu);
458 static void etnaviv_gpu_load_clock(struct etnaviv_gpu *gpu, u32 clock)
460 gpu_write(gpu, VIVS_HI_CLOCK_CONTROL, clock |
461 VIVS_HI_CLOCK_CONTROL_FSCALE_CMD_LOAD);
462 gpu_write(gpu, VIVS_HI_CLOCK_CONTROL, clock);
465 static void etnaviv_gpu_update_clock(struct etnaviv_gpu *gpu)
467 if (gpu->identity.minor_features2 &
468 chipMinorFeatures2_DYNAMIC_FREQUENCY_SCALING) {
469 clk_set_rate(gpu->clk_core,
470 gpu->base_rate_core >> gpu->freq_scale);
471 clk_set_rate(gpu->clk_shader,
472 gpu->base_rate_shader >> gpu->freq_scale);
474 unsigned int fscale = 1 << (6 - gpu->freq_scale);
475 u32 clock = gpu_read(gpu, VIVS_HI_CLOCK_CONTROL);
477 clock &= ~VIVS_HI_CLOCK_CONTROL_FSCALE_VAL__MASK;
478 clock |= VIVS_HI_CLOCK_CONTROL_FSCALE_VAL(fscale);
479 etnaviv_gpu_load_clock(gpu, clock);
483 static int etnaviv_hw_reset(struct etnaviv_gpu *gpu)
486 unsigned long timeout;
489 /* We hope that the GPU resets in under one second */
490 timeout = jiffies + msecs_to_jiffies(1000);
492 while (time_is_after_jiffies(timeout)) {
494 unsigned int fscale = 1 << (6 - gpu->freq_scale);
495 control = VIVS_HI_CLOCK_CONTROL_FSCALE_VAL(fscale);
496 etnaviv_gpu_load_clock(gpu, control);
498 /* isolate the GPU. */
499 control |= VIVS_HI_CLOCK_CONTROL_ISOLATE_GPU;
500 gpu_write(gpu, VIVS_HI_CLOCK_CONTROL, control);
502 if (gpu->sec_mode == ETNA_SEC_KERNEL) {
503 gpu_write(gpu, VIVS_MMUv2_AHB_CONTROL,
504 VIVS_MMUv2_AHB_CONTROL_RESET);
506 /* set soft reset. */
507 control |= VIVS_HI_CLOCK_CONTROL_SOFT_RESET;
508 gpu_write(gpu, VIVS_HI_CLOCK_CONTROL, control);
511 /* wait for reset. */
512 usleep_range(10, 20);
514 /* reset soft reset bit. */
515 control &= ~VIVS_HI_CLOCK_CONTROL_SOFT_RESET;
516 gpu_write(gpu, VIVS_HI_CLOCK_CONTROL, control);
518 /* reset GPU isolation. */
519 control &= ~VIVS_HI_CLOCK_CONTROL_ISOLATE_GPU;
520 gpu_write(gpu, VIVS_HI_CLOCK_CONTROL, control);
522 /* read idle register. */
523 idle = gpu_read(gpu, VIVS_HI_IDLE_STATE);
525 /* try resetting again if FE is not idle */
526 if ((idle & VIVS_HI_IDLE_STATE_FE) == 0) {
527 dev_dbg(gpu->dev, "FE is not idle\n");
531 /* read reset register. */
532 control = gpu_read(gpu, VIVS_HI_CLOCK_CONTROL);
534 /* is the GPU idle? */
535 if (((control & VIVS_HI_CLOCK_CONTROL_IDLE_3D) == 0) ||
536 ((control & VIVS_HI_CLOCK_CONTROL_IDLE_2D) == 0)) {
537 dev_dbg(gpu->dev, "GPU is not idle\n");
541 /* disable debug registers, as they are not normally needed */
542 control |= VIVS_HI_CLOCK_CONTROL_DISABLE_DEBUG_REGISTERS;
543 gpu_write(gpu, VIVS_HI_CLOCK_CONTROL, control);
550 idle = gpu_read(gpu, VIVS_HI_IDLE_STATE);
551 control = gpu_read(gpu, VIVS_HI_CLOCK_CONTROL);
553 dev_err(gpu->dev, "GPU failed to reset: FE %sidle, 3D %sidle, 2D %sidle\n",
554 idle & VIVS_HI_IDLE_STATE_FE ? "" : "not ",
555 control & VIVS_HI_CLOCK_CONTROL_IDLE_3D ? "" : "not ",
556 control & VIVS_HI_CLOCK_CONTROL_IDLE_2D ? "" : "not ");
561 /* We rely on the GPU running, so program the clock */
562 etnaviv_gpu_update_clock(gpu);
564 gpu->fe_running = false;
565 gpu->exec_state = -1;
566 if (gpu->mmu_context)
567 etnaviv_iommu_context_put(gpu->mmu_context);
568 gpu->mmu_context = NULL;
573 static void etnaviv_gpu_enable_mlcg(struct etnaviv_gpu *gpu)
577 /* enable clock gating */
578 ppc = gpu_read(gpu, VIVS_PM_POWER_CONTROLS);
579 ppc |= VIVS_PM_POWER_CONTROLS_ENABLE_MODULE_CLOCK_GATING;
581 /* Disable stall module clock gating for 4.3.0.1 and 4.3.0.2 revs */
582 if (gpu->identity.revision == 0x4301 ||
583 gpu->identity.revision == 0x4302)
584 ppc |= VIVS_PM_POWER_CONTROLS_DISABLE_STALL_MODULE_CLOCK_GATING;
586 gpu_write(gpu, VIVS_PM_POWER_CONTROLS, ppc);
588 pmc = gpu_read(gpu, VIVS_PM_MODULE_CONTROLS);
590 /* Disable PA clock gating for GC400+ without bugfix except for GC420 */
591 if (gpu->identity.model >= chipModel_GC400 &&
592 gpu->identity.model != chipModel_GC420 &&
593 !(gpu->identity.minor_features3 & chipMinorFeatures3_BUG_FIXES12))
594 pmc |= VIVS_PM_MODULE_CONTROLS_DISABLE_MODULE_CLOCK_GATING_PA;
597 * Disable PE clock gating on revs < 5.0.0.0 when HZ is
598 * present without a bug fix.
600 if (gpu->identity.revision < 0x5000 &&
601 gpu->identity.minor_features0 & chipMinorFeatures0_HZ &&
602 !(gpu->identity.minor_features1 &
603 chipMinorFeatures1_DISABLE_PE_GATING))
604 pmc |= VIVS_PM_MODULE_CONTROLS_DISABLE_MODULE_CLOCK_GATING_PE;
606 if (gpu->identity.revision < 0x5422)
607 pmc |= BIT(15); /* Unknown bit */
609 /* Disable TX clock gating on affected core revisions. */
610 if (etnaviv_is_model_rev(gpu, GC4000, 0x5222) ||
611 etnaviv_is_model_rev(gpu, GC2000, 0x5108))
612 pmc |= VIVS_PM_MODULE_CONTROLS_DISABLE_MODULE_CLOCK_GATING_TX;
614 pmc |= VIVS_PM_MODULE_CONTROLS_DISABLE_MODULE_CLOCK_GATING_RA_HZ;
615 pmc |= VIVS_PM_MODULE_CONTROLS_DISABLE_MODULE_CLOCK_GATING_RA_EZ;
617 gpu_write(gpu, VIVS_PM_MODULE_CONTROLS, pmc);
620 void etnaviv_gpu_start_fe(struct etnaviv_gpu *gpu, u32 address, u16 prefetch)
622 gpu_write(gpu, VIVS_FE_COMMAND_ADDRESS, address);
623 gpu_write(gpu, VIVS_FE_COMMAND_CONTROL,
624 VIVS_FE_COMMAND_CONTROL_ENABLE |
625 VIVS_FE_COMMAND_CONTROL_PREFETCH(prefetch));
627 if (gpu->sec_mode == ETNA_SEC_KERNEL) {
628 gpu_write(gpu, VIVS_MMUv2_SEC_COMMAND_CONTROL,
629 VIVS_MMUv2_SEC_COMMAND_CONTROL_ENABLE |
630 VIVS_MMUv2_SEC_COMMAND_CONTROL_PREFETCH(prefetch));
633 gpu->fe_running = true;
636 static void etnaviv_gpu_start_fe_idleloop(struct etnaviv_gpu *gpu,
637 struct etnaviv_iommu_context *context)
643 etnaviv_iommu_restore(gpu, context);
645 /* Start command processor */
646 prefetch = etnaviv_buffer_init(gpu);
647 address = etnaviv_cmdbuf_get_va(&gpu->buffer,
648 &gpu->mmu_context->cmdbuf_mapping);
650 etnaviv_gpu_start_fe(gpu, address, prefetch);
653 static void etnaviv_gpu_setup_pulse_eater(struct etnaviv_gpu *gpu)
656 * Base value for VIVS_PM_PULSE_EATER register on models where it
657 * cannot be read, extracted from vivante kernel driver.
659 u32 pulse_eater = 0x01590880;
661 if (etnaviv_is_model_rev(gpu, GC4000, 0x5208) ||
662 etnaviv_is_model_rev(gpu, GC4000, 0x5222)) {
663 pulse_eater |= BIT(23);
667 if (etnaviv_is_model_rev(gpu, GC1000, 0x5039) ||
668 etnaviv_is_model_rev(gpu, GC1000, 0x5040)) {
669 pulse_eater &= ~BIT(16);
670 pulse_eater |= BIT(17);
673 if ((gpu->identity.revision > 0x5420) &&
674 (gpu->identity.features & chipFeatures_PIPE_3D))
676 /* Performance fix: disable internal DFS */
677 pulse_eater = gpu_read(gpu, VIVS_PM_PULSE_EATER);
678 pulse_eater |= BIT(18);
681 gpu_write(gpu, VIVS_PM_PULSE_EATER, pulse_eater);
684 static void etnaviv_gpu_hw_init(struct etnaviv_gpu *gpu)
686 if ((etnaviv_is_model_rev(gpu, GC320, 0x5007) ||
687 etnaviv_is_model_rev(gpu, GC320, 0x5220)) &&
688 gpu_read(gpu, VIVS_HI_CHIP_TIME) != 0x2062400) {
691 mc_memory_debug = gpu_read(gpu, VIVS_MC_DEBUG_MEMORY) & ~0xff;
693 if (gpu->identity.revision == 0x5007)
694 mc_memory_debug |= 0x0c;
696 mc_memory_debug |= 0x08;
698 gpu_write(gpu, VIVS_MC_DEBUG_MEMORY, mc_memory_debug);
701 /* enable module-level clock gating */
702 etnaviv_gpu_enable_mlcg(gpu);
705 * Update GPU AXI cache atttribute to "cacheable, no allocate".
706 * This is necessary to prevent the iMX6 SoC locking up.
708 gpu_write(gpu, VIVS_HI_AXI_CONFIG,
709 VIVS_HI_AXI_CONFIG_AWCACHE(2) |
710 VIVS_HI_AXI_CONFIG_ARCACHE(2));
712 /* GC2000 rev 5108 needs a special bus config */
713 if (etnaviv_is_model_rev(gpu, GC2000, 0x5108)) {
714 u32 bus_config = gpu_read(gpu, VIVS_MC_BUS_CONFIG);
715 bus_config &= ~(VIVS_MC_BUS_CONFIG_FE_BUS_CONFIG__MASK |
716 VIVS_MC_BUS_CONFIG_TX_BUS_CONFIG__MASK);
717 bus_config |= VIVS_MC_BUS_CONFIG_FE_BUS_CONFIG(1) |
718 VIVS_MC_BUS_CONFIG_TX_BUS_CONFIG(0);
719 gpu_write(gpu, VIVS_MC_BUS_CONFIG, bus_config);
722 if (gpu->sec_mode == ETNA_SEC_KERNEL) {
723 u32 val = gpu_read(gpu, VIVS_MMUv2_AHB_CONTROL);
724 val |= VIVS_MMUv2_AHB_CONTROL_NONSEC_ACCESS;
725 gpu_write(gpu, VIVS_MMUv2_AHB_CONTROL, val);
728 /* setup the pulse eater */
729 etnaviv_gpu_setup_pulse_eater(gpu);
731 gpu_write(gpu, VIVS_HI_INTR_ENBL, ~0U);
734 int etnaviv_gpu_init(struct etnaviv_gpu *gpu)
736 struct etnaviv_drm_private *priv = gpu->drm->dev_private;
739 ret = pm_runtime_get_sync(gpu->dev);
741 dev_err(gpu->dev, "Failed to enable GPU power domain\n");
745 etnaviv_hw_identify(gpu);
747 if (gpu->identity.model == 0) {
748 dev_err(gpu->dev, "Unknown GPU model\n");
753 /* Exclude VG cores with FE2.0 */
754 if (gpu->identity.features & chipFeatures_PIPE_VG &&
755 gpu->identity.features & chipFeatures_FE20) {
756 dev_info(gpu->dev, "Ignoring GPU with VG and FE2.0\n");
762 * On cores with security features supported, we claim control over the
765 if ((gpu->identity.minor_features7 & chipMinorFeatures7_BIT_SECURITY) &&
766 (gpu->identity.minor_features10 & chipMinorFeatures10_SECURITY_AHB))
767 gpu->sec_mode = ETNA_SEC_KERNEL;
769 ret = etnaviv_hw_reset(gpu);
771 dev_err(gpu->dev, "GPU reset failed\n");
775 ret = etnaviv_iommu_global_init(gpu);
780 * Set the GPU linear window to be at the end of the DMA window, where
781 * the CMA area is likely to reside. This ensures that we are able to
782 * map the command buffers while having the linear window overlap as
783 * much RAM as possible, so we can optimize mappings for other buffers.
785 * For 3D cores only do this if MC2.0 is present, as with MC1.0 it leads
786 * to different views of the memory on the individual engines.
788 if (!(gpu->identity.features & chipFeatures_PIPE_3D) ||
789 (gpu->identity.minor_features0 & chipMinorFeatures0_MC20)) {
790 u32 dma_mask = (u32)dma_get_required_mask(gpu->dev);
791 if (dma_mask < PHYS_OFFSET + SZ_2G)
792 priv->mmu_global->memory_base = PHYS_OFFSET;
794 priv->mmu_global->memory_base = dma_mask - SZ_2G + 1;
795 } else if (PHYS_OFFSET >= SZ_2G) {
796 dev_info(gpu->dev, "Need to move linear window on MC1.0, disabling TS\n");
797 priv->mmu_global->memory_base = PHYS_OFFSET;
798 gpu->identity.features &= ~chipFeatures_FAST_CLEAR;
802 * If the GPU is part of a system with DMA addressing limitations,
803 * request pages for our SHM backend buffers from the DMA32 zone to
804 * hopefully avoid performance killing SWIOTLB bounce buffering.
806 if (dma_addressing_limited(gpu->dev))
807 priv->shm_gfp_mask |= GFP_DMA32;
810 ret = etnaviv_cmdbuf_init(priv->cmdbuf_suballoc, &gpu->buffer,
813 dev_err(gpu->dev, "could not create command buffer\n");
817 /* Setup event management */
818 spin_lock_init(&gpu->event_spinlock);
819 init_completion(&gpu->event_free);
820 bitmap_zero(gpu->event_bitmap, ETNA_NR_EVENTS);
821 for (i = 0; i < ARRAY_SIZE(gpu->event); i++)
822 complete(&gpu->event_free);
824 /* Now program the hardware */
825 mutex_lock(&gpu->lock);
826 etnaviv_gpu_hw_init(gpu);
827 mutex_unlock(&gpu->lock);
829 pm_runtime_mark_last_busy(gpu->dev);
830 pm_runtime_put_autosuspend(gpu->dev);
832 gpu->initialized = true;
837 pm_runtime_mark_last_busy(gpu->dev);
839 pm_runtime_put_autosuspend(gpu->dev);
844 #ifdef CONFIG_DEBUG_FS
850 static void verify_dma(struct etnaviv_gpu *gpu, struct dma_debug *debug)
854 debug->address[0] = gpu_read(gpu, VIVS_FE_DMA_ADDRESS);
855 debug->state[0] = gpu_read(gpu, VIVS_FE_DMA_DEBUG_STATE);
857 for (i = 0; i < 500; i++) {
858 debug->address[1] = gpu_read(gpu, VIVS_FE_DMA_ADDRESS);
859 debug->state[1] = gpu_read(gpu, VIVS_FE_DMA_DEBUG_STATE);
861 if (debug->address[0] != debug->address[1])
864 if (debug->state[0] != debug->state[1])
869 int etnaviv_gpu_debugfs(struct etnaviv_gpu *gpu, struct seq_file *m)
871 struct dma_debug debug;
872 u32 dma_lo, dma_hi, axi, idle;
875 seq_printf(m, "%s Status:\n", dev_name(gpu->dev));
877 ret = pm_runtime_get_sync(gpu->dev);
881 dma_lo = gpu_read(gpu, VIVS_FE_DMA_LOW);
882 dma_hi = gpu_read(gpu, VIVS_FE_DMA_HIGH);
883 axi = gpu_read(gpu, VIVS_HI_AXI_STATUS);
884 idle = gpu_read(gpu, VIVS_HI_IDLE_STATE);
886 verify_dma(gpu, &debug);
888 seq_puts(m, "\tidentity\n");
889 seq_printf(m, "\t model: 0x%x\n", gpu->identity.model);
890 seq_printf(m, "\t revision: 0x%x\n", gpu->identity.revision);
891 seq_printf(m, "\t product_id: 0x%x\n", gpu->identity.product_id);
892 seq_printf(m, "\t customer_id: 0x%x\n", gpu->identity.customer_id);
893 seq_printf(m, "\t eco_id: 0x%x\n", gpu->identity.eco_id);
895 seq_puts(m, "\tfeatures\n");
896 seq_printf(m, "\t major_features: 0x%08x\n",
897 gpu->identity.features);
898 seq_printf(m, "\t minor_features0: 0x%08x\n",
899 gpu->identity.minor_features0);
900 seq_printf(m, "\t minor_features1: 0x%08x\n",
901 gpu->identity.minor_features1);
902 seq_printf(m, "\t minor_features2: 0x%08x\n",
903 gpu->identity.minor_features2);
904 seq_printf(m, "\t minor_features3: 0x%08x\n",
905 gpu->identity.minor_features3);
906 seq_printf(m, "\t minor_features4: 0x%08x\n",
907 gpu->identity.minor_features4);
908 seq_printf(m, "\t minor_features5: 0x%08x\n",
909 gpu->identity.minor_features5);
910 seq_printf(m, "\t minor_features6: 0x%08x\n",
911 gpu->identity.minor_features6);
912 seq_printf(m, "\t minor_features7: 0x%08x\n",
913 gpu->identity.minor_features7);
914 seq_printf(m, "\t minor_features8: 0x%08x\n",
915 gpu->identity.minor_features8);
916 seq_printf(m, "\t minor_features9: 0x%08x\n",
917 gpu->identity.minor_features9);
918 seq_printf(m, "\t minor_features10: 0x%08x\n",
919 gpu->identity.minor_features10);
920 seq_printf(m, "\t minor_features11: 0x%08x\n",
921 gpu->identity.minor_features11);
923 seq_puts(m, "\tspecs\n");
924 seq_printf(m, "\t stream_count: %d\n",
925 gpu->identity.stream_count);
926 seq_printf(m, "\t register_max: %d\n",
927 gpu->identity.register_max);
928 seq_printf(m, "\t thread_count: %d\n",
929 gpu->identity.thread_count);
930 seq_printf(m, "\t vertex_cache_size: %d\n",
931 gpu->identity.vertex_cache_size);
932 seq_printf(m, "\t shader_core_count: %d\n",
933 gpu->identity.shader_core_count);
934 seq_printf(m, "\t pixel_pipes: %d\n",
935 gpu->identity.pixel_pipes);
936 seq_printf(m, "\t vertex_output_buffer_size: %d\n",
937 gpu->identity.vertex_output_buffer_size);
938 seq_printf(m, "\t buffer_size: %d\n",
939 gpu->identity.buffer_size);
940 seq_printf(m, "\t instruction_count: %d\n",
941 gpu->identity.instruction_count);
942 seq_printf(m, "\t num_constants: %d\n",
943 gpu->identity.num_constants);
944 seq_printf(m, "\t varyings_count: %d\n",
945 gpu->identity.varyings_count);
947 seq_printf(m, "\taxi: 0x%08x\n", axi);
948 seq_printf(m, "\tidle: 0x%08x\n", idle);
949 idle |= ~gpu->idle_mask & ~VIVS_HI_IDLE_STATE_AXI_LP;
950 if ((idle & VIVS_HI_IDLE_STATE_FE) == 0)
951 seq_puts(m, "\t FE is not idle\n");
952 if ((idle & VIVS_HI_IDLE_STATE_DE) == 0)
953 seq_puts(m, "\t DE is not idle\n");
954 if ((idle & VIVS_HI_IDLE_STATE_PE) == 0)
955 seq_puts(m, "\t PE is not idle\n");
956 if ((idle & VIVS_HI_IDLE_STATE_SH) == 0)
957 seq_puts(m, "\t SH is not idle\n");
958 if ((idle & VIVS_HI_IDLE_STATE_PA) == 0)
959 seq_puts(m, "\t PA is not idle\n");
960 if ((idle & VIVS_HI_IDLE_STATE_SE) == 0)
961 seq_puts(m, "\t SE is not idle\n");
962 if ((idle & VIVS_HI_IDLE_STATE_RA) == 0)
963 seq_puts(m, "\t RA is not idle\n");
964 if ((idle & VIVS_HI_IDLE_STATE_TX) == 0)
965 seq_puts(m, "\t TX is not idle\n");
966 if ((idle & VIVS_HI_IDLE_STATE_VG) == 0)
967 seq_puts(m, "\t VG is not idle\n");
968 if ((idle & VIVS_HI_IDLE_STATE_IM) == 0)
969 seq_puts(m, "\t IM is not idle\n");
970 if ((idle & VIVS_HI_IDLE_STATE_FP) == 0)
971 seq_puts(m, "\t FP is not idle\n");
972 if ((idle & VIVS_HI_IDLE_STATE_TS) == 0)
973 seq_puts(m, "\t TS is not idle\n");
974 if ((idle & VIVS_HI_IDLE_STATE_BL) == 0)
975 seq_puts(m, "\t BL is not idle\n");
976 if ((idle & VIVS_HI_IDLE_STATE_ASYNCFE) == 0)
977 seq_puts(m, "\t ASYNCFE is not idle\n");
978 if ((idle & VIVS_HI_IDLE_STATE_MC) == 0)
979 seq_puts(m, "\t MC is not idle\n");
980 if ((idle & VIVS_HI_IDLE_STATE_PPA) == 0)
981 seq_puts(m, "\t PPA is not idle\n");
982 if ((idle & VIVS_HI_IDLE_STATE_WD) == 0)
983 seq_puts(m, "\t WD is not idle\n");
984 if ((idle & VIVS_HI_IDLE_STATE_NN) == 0)
985 seq_puts(m, "\t NN is not idle\n");
986 if ((idle & VIVS_HI_IDLE_STATE_TP) == 0)
987 seq_puts(m, "\t TP is not idle\n");
988 if (idle & VIVS_HI_IDLE_STATE_AXI_LP)
989 seq_puts(m, "\t AXI low power mode\n");
991 if (gpu->identity.features & chipFeatures_DEBUG_MODE) {
992 u32 read0 = gpu_read(gpu, VIVS_MC_DEBUG_READ0);
993 u32 read1 = gpu_read(gpu, VIVS_MC_DEBUG_READ1);
994 u32 write = gpu_read(gpu, VIVS_MC_DEBUG_WRITE);
996 seq_puts(m, "\tMC\n");
997 seq_printf(m, "\t read0: 0x%08x\n", read0);
998 seq_printf(m, "\t read1: 0x%08x\n", read1);
999 seq_printf(m, "\t write: 0x%08x\n", write);
1002 seq_puts(m, "\tDMA ");
1004 if (debug.address[0] == debug.address[1] &&
1005 debug.state[0] == debug.state[1]) {
1006 seq_puts(m, "seems to be stuck\n");
1007 } else if (debug.address[0] == debug.address[1]) {
1008 seq_puts(m, "address is constant\n");
1010 seq_puts(m, "is running\n");
1013 seq_printf(m, "\t address 0: 0x%08x\n", debug.address[0]);
1014 seq_printf(m, "\t address 1: 0x%08x\n", debug.address[1]);
1015 seq_printf(m, "\t state 0: 0x%08x\n", debug.state[0]);
1016 seq_printf(m, "\t state 1: 0x%08x\n", debug.state[1]);
1017 seq_printf(m, "\t last fetch 64 bit word: 0x%08x 0x%08x\n",
1022 pm_runtime_mark_last_busy(gpu->dev);
1024 pm_runtime_put_autosuspend(gpu->dev);
1030 void etnaviv_gpu_recover_hang(struct etnaviv_gpu *gpu)
1034 dev_err(gpu->dev, "recover hung GPU!\n");
1036 if (pm_runtime_get_sync(gpu->dev) < 0)
1039 mutex_lock(&gpu->lock);
1041 etnaviv_hw_reset(gpu);
1043 /* complete all events, the GPU won't do it after the reset */
1044 spin_lock(&gpu->event_spinlock);
1045 for_each_set_bit_from(i, gpu->event_bitmap, ETNA_NR_EVENTS)
1046 complete(&gpu->event_free);
1047 bitmap_zero(gpu->event_bitmap, ETNA_NR_EVENTS);
1048 spin_unlock(&gpu->event_spinlock);
1050 etnaviv_gpu_hw_init(gpu);
1052 mutex_unlock(&gpu->lock);
1053 pm_runtime_mark_last_busy(gpu->dev);
1055 pm_runtime_put_autosuspend(gpu->dev);
1058 /* fence object management */
1059 struct etnaviv_fence {
1060 struct etnaviv_gpu *gpu;
1061 struct dma_fence base;
1064 static inline struct etnaviv_fence *to_etnaviv_fence(struct dma_fence *fence)
1066 return container_of(fence, struct etnaviv_fence, base);
1069 static const char *etnaviv_fence_get_driver_name(struct dma_fence *fence)
1074 static const char *etnaviv_fence_get_timeline_name(struct dma_fence *fence)
1076 struct etnaviv_fence *f = to_etnaviv_fence(fence);
1078 return dev_name(f->gpu->dev);
1081 static bool etnaviv_fence_signaled(struct dma_fence *fence)
1083 struct etnaviv_fence *f = to_etnaviv_fence(fence);
1085 return (s32)(f->gpu->completed_fence - f->base.seqno) >= 0;
1088 static void etnaviv_fence_release(struct dma_fence *fence)
1090 struct etnaviv_fence *f = to_etnaviv_fence(fence);
1092 kfree_rcu(f, base.rcu);
1095 static const struct dma_fence_ops etnaviv_fence_ops = {
1096 .get_driver_name = etnaviv_fence_get_driver_name,
1097 .get_timeline_name = etnaviv_fence_get_timeline_name,
1098 .signaled = etnaviv_fence_signaled,
1099 .release = etnaviv_fence_release,
1102 static struct dma_fence *etnaviv_gpu_fence_alloc(struct etnaviv_gpu *gpu)
1104 struct etnaviv_fence *f;
1107 * GPU lock must already be held, otherwise fence completion order might
1108 * not match the seqno order assigned here.
1110 lockdep_assert_held(&gpu->lock);
1112 f = kzalloc(sizeof(*f), GFP_KERNEL);
1118 dma_fence_init(&f->base, &etnaviv_fence_ops, &gpu->fence_spinlock,
1119 gpu->fence_context, ++gpu->next_fence);
1124 /* returns true if fence a comes after fence b */
1125 static inline bool fence_after(u32 a, u32 b)
1127 return (s32)(a - b) > 0;
1134 static int event_alloc(struct etnaviv_gpu *gpu, unsigned nr_events,
1135 unsigned int *events)
1137 unsigned long timeout = msecs_to_jiffies(10 * 10000);
1138 unsigned i, acquired = 0;
1140 for (i = 0; i < nr_events; i++) {
1143 ret = wait_for_completion_timeout(&gpu->event_free, timeout);
1146 dev_err(gpu->dev, "wait_for_completion_timeout failed");
1154 spin_lock(&gpu->event_spinlock);
1156 for (i = 0; i < nr_events; i++) {
1157 int event = find_first_zero_bit(gpu->event_bitmap, ETNA_NR_EVENTS);
1160 memset(&gpu->event[event], 0, sizeof(struct etnaviv_event));
1161 set_bit(event, gpu->event_bitmap);
1164 spin_unlock(&gpu->event_spinlock);
1169 for (i = 0; i < acquired; i++)
1170 complete(&gpu->event_free);
1175 static void event_free(struct etnaviv_gpu *gpu, unsigned int event)
1177 if (!test_bit(event, gpu->event_bitmap)) {
1178 dev_warn(gpu->dev, "event %u is already marked as free",
1181 clear_bit(event, gpu->event_bitmap);
1182 complete(&gpu->event_free);
1187 * Cmdstream submission/retirement:
1189 int etnaviv_gpu_wait_fence_interruptible(struct etnaviv_gpu *gpu,
1190 u32 id, struct drm_etnaviv_timespec *timeout)
1192 struct dma_fence *fence;
1196 * Look up the fence and take a reference. We might still find a fence
1197 * whose refcount has already dropped to zero. dma_fence_get_rcu
1198 * pretends we didn't find a fence in that case.
1201 fence = idr_find(&gpu->fence_idr, id);
1203 fence = dma_fence_get_rcu(fence);
1210 /* No timeout was requested: just test for completion */
1211 ret = dma_fence_is_signaled(fence) ? 0 : -EBUSY;
1213 unsigned long remaining = etnaviv_timeout_to_jiffies(timeout);
1215 ret = dma_fence_wait_timeout(fence, true, remaining);
1218 else if (ret != -ERESTARTSYS)
1223 dma_fence_put(fence);
1228 * Wait for an object to become inactive. This, on it's own, is not race
1229 * free: the object is moved by the scheduler off the active list, and
1230 * then the iova is put. Moreover, the object could be re-submitted just
1231 * after we notice that it's become inactive.
1233 * Although the retirement happens under the gpu lock, we don't want to hold
1234 * that lock in this function while waiting.
1236 int etnaviv_gpu_wait_obj_inactive(struct etnaviv_gpu *gpu,
1237 struct etnaviv_gem_object *etnaviv_obj,
1238 struct drm_etnaviv_timespec *timeout)
1240 unsigned long remaining;
1244 return !is_active(etnaviv_obj) ? 0 : -EBUSY;
1246 remaining = etnaviv_timeout_to_jiffies(timeout);
1248 ret = wait_event_interruptible_timeout(gpu->fence_event,
1249 !is_active(etnaviv_obj),
1253 else if (ret == -ERESTARTSYS)
1254 return -ERESTARTSYS;
1259 static void sync_point_perfmon_sample(struct etnaviv_gpu *gpu,
1260 struct etnaviv_event *event, unsigned int flags)
1262 const struct etnaviv_gem_submit *submit = event->submit;
1265 for (i = 0; i < submit->nr_pmrs; i++) {
1266 const struct etnaviv_perfmon_request *pmr = submit->pmrs + i;
1268 if (pmr->flags == flags)
1269 etnaviv_perfmon_process(gpu, pmr, submit->exec_state);
1273 static void sync_point_perfmon_sample_pre(struct etnaviv_gpu *gpu,
1274 struct etnaviv_event *event)
1278 /* disable clock gating */
1279 val = gpu_read(gpu, VIVS_PM_POWER_CONTROLS);
1280 val &= ~VIVS_PM_POWER_CONTROLS_ENABLE_MODULE_CLOCK_GATING;
1281 gpu_write(gpu, VIVS_PM_POWER_CONTROLS, val);
1283 /* enable debug register */
1284 val = gpu_read(gpu, VIVS_HI_CLOCK_CONTROL);
1285 val &= ~VIVS_HI_CLOCK_CONTROL_DISABLE_DEBUG_REGISTERS;
1286 gpu_write(gpu, VIVS_HI_CLOCK_CONTROL, val);
1288 sync_point_perfmon_sample(gpu, event, ETNA_PM_PROCESS_PRE);
1291 static void sync_point_perfmon_sample_post(struct etnaviv_gpu *gpu,
1292 struct etnaviv_event *event)
1294 const struct etnaviv_gem_submit *submit = event->submit;
1298 sync_point_perfmon_sample(gpu, event, ETNA_PM_PROCESS_POST);
1300 for (i = 0; i < submit->nr_pmrs; i++) {
1301 const struct etnaviv_perfmon_request *pmr = submit->pmrs + i;
1303 *pmr->bo_vma = pmr->sequence;
1306 /* disable debug register */
1307 val = gpu_read(gpu, VIVS_HI_CLOCK_CONTROL);
1308 val |= VIVS_HI_CLOCK_CONTROL_DISABLE_DEBUG_REGISTERS;
1309 gpu_write(gpu, VIVS_HI_CLOCK_CONTROL, val);
1311 /* enable clock gating */
1312 val = gpu_read(gpu, VIVS_PM_POWER_CONTROLS);
1313 val |= VIVS_PM_POWER_CONTROLS_ENABLE_MODULE_CLOCK_GATING;
1314 gpu_write(gpu, VIVS_PM_POWER_CONTROLS, val);
1318 /* add bo's to gpu's ring, and kick gpu: */
1319 struct dma_fence *etnaviv_gpu_submit(struct etnaviv_gem_submit *submit)
1321 struct etnaviv_gpu *gpu = submit->gpu;
1322 struct dma_fence *gpu_fence;
1323 unsigned int i, nr_events = 1, event[3];
1326 if (!submit->runtime_resumed) {
1327 ret = pm_runtime_get_sync(gpu->dev);
1329 pm_runtime_put_noidle(gpu->dev);
1332 submit->runtime_resumed = true;
1336 * if there are performance monitor requests we need to have
1337 * - a sync point to re-configure gpu and process ETNA_PM_PROCESS_PRE
1339 * - a sync point to re-configure gpu, process ETNA_PM_PROCESS_POST requests
1340 * and update the sequence number for userspace.
1342 if (submit->nr_pmrs)
1345 ret = event_alloc(gpu, nr_events, event);
1347 DRM_ERROR("no free events\n");
1348 pm_runtime_put_noidle(gpu->dev);
1352 mutex_lock(&gpu->lock);
1354 gpu_fence = etnaviv_gpu_fence_alloc(gpu);
1356 for (i = 0; i < nr_events; i++)
1357 event_free(gpu, event[i]);
1362 if (!gpu->fe_running)
1363 etnaviv_gpu_start_fe_idleloop(gpu, submit->mmu_context);
1365 if (submit->prev_mmu_context)
1366 etnaviv_iommu_context_put(submit->prev_mmu_context);
1367 submit->prev_mmu_context = etnaviv_iommu_context_get(gpu->mmu_context);
1369 if (submit->nr_pmrs) {
1370 gpu->event[event[1]].sync_point = &sync_point_perfmon_sample_pre;
1371 kref_get(&submit->refcount);
1372 gpu->event[event[1]].submit = submit;
1373 etnaviv_sync_point_queue(gpu, event[1]);
1376 gpu->event[event[0]].fence = gpu_fence;
1377 submit->cmdbuf.user_size = submit->cmdbuf.size - 8;
1378 etnaviv_buffer_queue(gpu, submit->exec_state, submit->mmu_context,
1379 event[0], &submit->cmdbuf);
1381 if (submit->nr_pmrs) {
1382 gpu->event[event[2]].sync_point = &sync_point_perfmon_sample_post;
1383 kref_get(&submit->refcount);
1384 gpu->event[event[2]].submit = submit;
1385 etnaviv_sync_point_queue(gpu, event[2]);
1389 mutex_unlock(&gpu->lock);
1394 static void sync_point_worker(struct work_struct *work)
1396 struct etnaviv_gpu *gpu = container_of(work, struct etnaviv_gpu,
1398 struct etnaviv_event *event = &gpu->event[gpu->sync_point_event];
1399 u32 addr = gpu_read(gpu, VIVS_FE_DMA_ADDRESS);
1401 event->sync_point(gpu, event);
1402 etnaviv_submit_put(event->submit);
1403 event_free(gpu, gpu->sync_point_event);
1405 /* restart FE last to avoid GPU and IRQ racing against this worker */
1406 etnaviv_gpu_start_fe(gpu, addr + 2, 2);
1409 static void dump_mmu_fault(struct etnaviv_gpu *gpu)
1411 u32 status_reg, status;
1414 if (gpu->sec_mode == ETNA_SEC_NONE)
1415 status_reg = VIVS_MMUv2_STATUS;
1417 status_reg = VIVS_MMUv2_SEC_STATUS;
1419 status = gpu_read(gpu, status_reg);
1420 dev_err_ratelimited(gpu->dev, "MMU fault status 0x%08x\n", status);
1422 for (i = 0; i < 4; i++) {
1425 if (!(status & (VIVS_MMUv2_STATUS_EXCEPTION0__MASK << (i * 4))))
1428 if (gpu->sec_mode == ETNA_SEC_NONE)
1429 address_reg = VIVS_MMUv2_EXCEPTION_ADDR(i);
1431 address_reg = VIVS_MMUv2_SEC_EXCEPTION_ADDR;
1433 dev_err_ratelimited(gpu->dev, "MMU %d fault addr 0x%08x\n", i,
1434 gpu_read(gpu, address_reg));
1438 static irqreturn_t irq_handler(int irq, void *data)
1440 struct etnaviv_gpu *gpu = data;
1441 irqreturn_t ret = IRQ_NONE;
1443 u32 intr = gpu_read(gpu, VIVS_HI_INTR_ACKNOWLEDGE);
1448 pm_runtime_mark_last_busy(gpu->dev);
1450 dev_dbg(gpu->dev, "intr 0x%08x\n", intr);
1452 if (intr & VIVS_HI_INTR_ACKNOWLEDGE_AXI_BUS_ERROR) {
1453 dev_err(gpu->dev, "AXI bus error\n");
1454 intr &= ~VIVS_HI_INTR_ACKNOWLEDGE_AXI_BUS_ERROR;
1457 if (intr & VIVS_HI_INTR_ACKNOWLEDGE_MMU_EXCEPTION) {
1458 dump_mmu_fault(gpu);
1459 intr &= ~VIVS_HI_INTR_ACKNOWLEDGE_MMU_EXCEPTION;
1462 while ((event = ffs(intr)) != 0) {
1463 struct dma_fence *fence;
1467 intr &= ~(1 << event);
1469 dev_dbg(gpu->dev, "event %u\n", event);
1471 if (gpu->event[event].sync_point) {
1472 gpu->sync_point_event = event;
1473 queue_work(gpu->wq, &gpu->sync_point_work);
1476 fence = gpu->event[event].fence;
1480 gpu->event[event].fence = NULL;
1483 * Events can be processed out of order. Eg,
1484 * - allocate and queue event 0
1485 * - allocate event 1
1486 * - event 0 completes, we process it
1487 * - allocate and queue event 0
1488 * - event 1 and event 0 complete
1489 * we can end up processing event 0 first, then 1.
1491 if (fence_after(fence->seqno, gpu->completed_fence))
1492 gpu->completed_fence = fence->seqno;
1493 dma_fence_signal(fence);
1495 event_free(gpu, event);
1504 static int etnaviv_gpu_clk_enable(struct etnaviv_gpu *gpu)
1508 ret = clk_prepare_enable(gpu->clk_reg);
1512 ret = clk_prepare_enable(gpu->clk_bus);
1514 goto disable_clk_reg;
1516 ret = clk_prepare_enable(gpu->clk_core);
1518 goto disable_clk_bus;
1520 ret = clk_prepare_enable(gpu->clk_shader);
1522 goto disable_clk_core;
1527 clk_disable_unprepare(gpu->clk_core);
1529 clk_disable_unprepare(gpu->clk_bus);
1531 clk_disable_unprepare(gpu->clk_reg);
1536 static int etnaviv_gpu_clk_disable(struct etnaviv_gpu *gpu)
1538 clk_disable_unprepare(gpu->clk_shader);
1539 clk_disable_unprepare(gpu->clk_core);
1540 clk_disable_unprepare(gpu->clk_bus);
1541 clk_disable_unprepare(gpu->clk_reg);
1546 int etnaviv_gpu_wait_idle(struct etnaviv_gpu *gpu, unsigned int timeout_ms)
1548 unsigned long timeout = jiffies + msecs_to_jiffies(timeout_ms);
1551 u32 idle = gpu_read(gpu, VIVS_HI_IDLE_STATE);
1553 if ((idle & gpu->idle_mask) == gpu->idle_mask)
1556 if (time_is_before_jiffies(timeout)) {
1558 "timed out waiting for idle: idle=0x%x\n",
1567 static int etnaviv_gpu_hw_suspend(struct etnaviv_gpu *gpu)
1569 if (gpu->initialized && gpu->fe_running) {
1570 /* Replace the last WAIT with END */
1571 mutex_lock(&gpu->lock);
1572 etnaviv_buffer_end(gpu);
1573 mutex_unlock(&gpu->lock);
1576 * We know that only the FE is busy here, this should
1577 * happen quickly (as the WAIT is only 200 cycles). If
1578 * we fail, just warn and continue.
1580 etnaviv_gpu_wait_idle(gpu, 100);
1582 gpu->fe_running = false;
1585 gpu->exec_state = -1;
1587 return etnaviv_gpu_clk_disable(gpu);
1591 static int etnaviv_gpu_hw_resume(struct etnaviv_gpu *gpu)
1595 ret = mutex_lock_killable(&gpu->lock);
1599 etnaviv_gpu_update_clock(gpu);
1600 etnaviv_gpu_hw_init(gpu);
1602 mutex_unlock(&gpu->lock);
1609 etnaviv_gpu_cooling_get_max_state(struct thermal_cooling_device *cdev,
1610 unsigned long *state)
1618 etnaviv_gpu_cooling_get_cur_state(struct thermal_cooling_device *cdev,
1619 unsigned long *state)
1621 struct etnaviv_gpu *gpu = cdev->devdata;
1623 *state = gpu->freq_scale;
1629 etnaviv_gpu_cooling_set_cur_state(struct thermal_cooling_device *cdev,
1630 unsigned long state)
1632 struct etnaviv_gpu *gpu = cdev->devdata;
1634 mutex_lock(&gpu->lock);
1635 gpu->freq_scale = state;
1636 if (!pm_runtime_suspended(gpu->dev))
1637 etnaviv_gpu_update_clock(gpu);
1638 mutex_unlock(&gpu->lock);
1643 static struct thermal_cooling_device_ops cooling_ops = {
1644 .get_max_state = etnaviv_gpu_cooling_get_max_state,
1645 .get_cur_state = etnaviv_gpu_cooling_get_cur_state,
1646 .set_cur_state = etnaviv_gpu_cooling_set_cur_state,
1649 static int etnaviv_gpu_bind(struct device *dev, struct device *master,
1652 struct drm_device *drm = data;
1653 struct etnaviv_drm_private *priv = drm->dev_private;
1654 struct etnaviv_gpu *gpu = dev_get_drvdata(dev);
1657 if (IS_ENABLED(CONFIG_DRM_ETNAVIV_THERMAL)) {
1658 gpu->cooling = thermal_of_cooling_device_register(dev->of_node,
1659 (char *)dev_name(dev), gpu, &cooling_ops);
1660 if (IS_ERR(gpu->cooling))
1661 return PTR_ERR(gpu->cooling);
1664 gpu->wq = alloc_ordered_workqueue(dev_name(dev), 0);
1670 ret = etnaviv_sched_init(gpu);
1675 ret = pm_runtime_get_sync(gpu->dev);
1677 ret = etnaviv_gpu_clk_enable(gpu);
1684 gpu->fence_context = dma_fence_context_alloc(1);
1685 idr_init(&gpu->fence_idr);
1686 spin_lock_init(&gpu->fence_spinlock);
1688 INIT_WORK(&gpu->sync_point_work, sync_point_worker);
1689 init_waitqueue_head(&gpu->fence_event);
1691 priv->gpu[priv->num_gpus++] = gpu;
1693 pm_runtime_mark_last_busy(gpu->dev);
1694 pm_runtime_put_autosuspend(gpu->dev);
1699 etnaviv_sched_fini(gpu);
1702 destroy_workqueue(gpu->wq);
1705 if (IS_ENABLED(CONFIG_DRM_ETNAVIV_THERMAL))
1706 thermal_cooling_device_unregister(gpu->cooling);
1711 static void etnaviv_gpu_unbind(struct device *dev, struct device *master,
1714 struct etnaviv_gpu *gpu = dev_get_drvdata(dev);
1716 DBG("%s", dev_name(gpu->dev));
1718 flush_workqueue(gpu->wq);
1719 destroy_workqueue(gpu->wq);
1721 etnaviv_sched_fini(gpu);
1724 pm_runtime_get_sync(gpu->dev);
1725 pm_runtime_put_sync_suspend(gpu->dev);
1727 etnaviv_gpu_hw_suspend(gpu);
1730 if (gpu->mmu_context)
1731 etnaviv_iommu_context_put(gpu->mmu_context);
1733 if (gpu->initialized) {
1734 etnaviv_cmdbuf_free(&gpu->buffer);
1735 etnaviv_iommu_global_fini(gpu);
1736 gpu->initialized = false;
1740 idr_destroy(&gpu->fence_idr);
1742 if (IS_ENABLED(CONFIG_DRM_ETNAVIV_THERMAL))
1743 thermal_cooling_device_unregister(gpu->cooling);
1744 gpu->cooling = NULL;
1747 static const struct component_ops gpu_ops = {
1748 .bind = etnaviv_gpu_bind,
1749 .unbind = etnaviv_gpu_unbind,
1752 static const struct of_device_id etnaviv_gpu_match[] = {
1754 .compatible = "vivante,gc"
1758 MODULE_DEVICE_TABLE(of, etnaviv_gpu_match);
1760 static int etnaviv_gpu_platform_probe(struct platform_device *pdev)
1762 struct device *dev = &pdev->dev;
1763 struct etnaviv_gpu *gpu;
1766 gpu = devm_kzalloc(dev, sizeof(*gpu), GFP_KERNEL);
1770 gpu->dev = &pdev->dev;
1771 mutex_init(&gpu->lock);
1772 mutex_init(&gpu->fence_lock);
1774 /* Map registers: */
1775 gpu->mmio = devm_platform_ioremap_resource(pdev, 0);
1776 if (IS_ERR(gpu->mmio))
1777 return PTR_ERR(gpu->mmio);
1779 /* Get Interrupt: */
1780 gpu->irq = platform_get_irq(pdev, 0);
1782 dev_err(dev, "failed to get irq: %d\n", gpu->irq);
1786 err = devm_request_irq(&pdev->dev, gpu->irq, irq_handler, 0,
1787 dev_name(gpu->dev), gpu);
1789 dev_err(dev, "failed to request IRQ%u: %d\n", gpu->irq, err);
1794 gpu->clk_reg = devm_clk_get_optional(&pdev->dev, "reg");
1795 DBG("clk_reg: %p", gpu->clk_reg);
1796 if (IS_ERR(gpu->clk_reg))
1797 return PTR_ERR(gpu->clk_reg);
1799 gpu->clk_bus = devm_clk_get_optional(&pdev->dev, "bus");
1800 DBG("clk_bus: %p", gpu->clk_bus);
1801 if (IS_ERR(gpu->clk_bus))
1802 return PTR_ERR(gpu->clk_bus);
1804 gpu->clk_core = devm_clk_get(&pdev->dev, "core");
1805 DBG("clk_core: %p", gpu->clk_core);
1806 if (IS_ERR(gpu->clk_core))
1807 return PTR_ERR(gpu->clk_core);
1808 gpu->base_rate_core = clk_get_rate(gpu->clk_core);
1810 gpu->clk_shader = devm_clk_get_optional(&pdev->dev, "shader");
1811 DBG("clk_shader: %p", gpu->clk_shader);
1812 if (IS_ERR(gpu->clk_shader))
1813 return PTR_ERR(gpu->clk_shader);
1814 gpu->base_rate_shader = clk_get_rate(gpu->clk_shader);
1816 /* TODO: figure out max mapped size */
1817 dev_set_drvdata(dev, gpu);
1820 * We treat the device as initially suspended. The runtime PM
1821 * autosuspend delay is rather arbitary: no measurements have
1822 * yet been performed to determine an appropriate value.
1824 pm_runtime_use_autosuspend(gpu->dev);
1825 pm_runtime_set_autosuspend_delay(gpu->dev, 200);
1826 pm_runtime_enable(gpu->dev);
1828 err = component_add(&pdev->dev, &gpu_ops);
1830 dev_err(&pdev->dev, "failed to register component: %d\n", err);
1837 static int etnaviv_gpu_platform_remove(struct platform_device *pdev)
1839 component_del(&pdev->dev, &gpu_ops);
1840 pm_runtime_disable(&pdev->dev);
1845 static int etnaviv_gpu_rpm_suspend(struct device *dev)
1847 struct etnaviv_gpu *gpu = dev_get_drvdata(dev);
1850 /* If there are any jobs in the HW queue, we're not idle */
1851 if (atomic_read(&gpu->sched.hw_rq_count))
1854 /* Check whether the hardware (except FE and MC) is idle */
1855 mask = gpu->idle_mask & ~(VIVS_HI_IDLE_STATE_FE |
1856 VIVS_HI_IDLE_STATE_MC);
1857 idle = gpu_read(gpu, VIVS_HI_IDLE_STATE) & mask;
1859 dev_warn_ratelimited(dev, "GPU not yet idle, mask: 0x%08x\n",
1864 return etnaviv_gpu_hw_suspend(gpu);
1867 static int etnaviv_gpu_rpm_resume(struct device *dev)
1869 struct etnaviv_gpu *gpu = dev_get_drvdata(dev);
1872 ret = etnaviv_gpu_clk_enable(gpu);
1876 /* Re-initialise the basic hardware state */
1877 if (gpu->drm && gpu->initialized) {
1878 ret = etnaviv_gpu_hw_resume(gpu);
1880 etnaviv_gpu_clk_disable(gpu);
1889 static const struct dev_pm_ops etnaviv_gpu_pm_ops = {
1890 SET_RUNTIME_PM_OPS(etnaviv_gpu_rpm_suspend, etnaviv_gpu_rpm_resume,
1894 struct platform_driver etnaviv_gpu_driver = {
1896 .name = "etnaviv-gpu",
1897 .owner = THIS_MODULE,
1898 .pm = &etnaviv_gpu_pm_ops,
1899 .of_match_table = etnaviv_gpu_match,
1901 .probe = etnaviv_gpu_platform_probe,
1902 .remove = etnaviv_gpu_platform_remove,
1903 .id_table = gpu_ids,