2 * Copyright (C) 2015 Etnaviv Project
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License version 2 as published by
6 * the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * You should have received a copy of the GNU General Public License along with
14 * this program. If not, see <http://www.gnu.org/licenses/>.
17 #include <linux/component.h>
18 #include <linux/dma-fence.h>
19 #include <linux/moduleparam.h>
20 #include <linux/of_device.h>
21 #include <linux/thermal.h>
23 #include "etnaviv_cmdbuf.h"
24 #include "etnaviv_dump.h"
25 #include "etnaviv_gpu.h"
26 #include "etnaviv_gem.h"
27 #include "etnaviv_mmu.h"
28 #include "common.xml.h"
29 #include "state.xml.h"
30 #include "state_hi.xml.h"
31 #include "cmdstream.xml.h"
33 static const struct platform_device_id gpu_ids[] = {
34 { .name = "etnaviv-gpu,2d" },
38 static bool etnaviv_dump_core = true;
39 module_param_named(dump_core, etnaviv_dump_core, bool, 0600);
45 int etnaviv_gpu_get_param(struct etnaviv_gpu *gpu, u32 param, u64 *value)
48 case ETNAVIV_PARAM_GPU_MODEL:
49 *value = gpu->identity.model;
52 case ETNAVIV_PARAM_GPU_REVISION:
53 *value = gpu->identity.revision;
56 case ETNAVIV_PARAM_GPU_FEATURES_0:
57 *value = gpu->identity.features;
60 case ETNAVIV_PARAM_GPU_FEATURES_1:
61 *value = gpu->identity.minor_features0;
64 case ETNAVIV_PARAM_GPU_FEATURES_2:
65 *value = gpu->identity.minor_features1;
68 case ETNAVIV_PARAM_GPU_FEATURES_3:
69 *value = gpu->identity.minor_features2;
72 case ETNAVIV_PARAM_GPU_FEATURES_4:
73 *value = gpu->identity.minor_features3;
76 case ETNAVIV_PARAM_GPU_FEATURES_5:
77 *value = gpu->identity.minor_features4;
80 case ETNAVIV_PARAM_GPU_FEATURES_6:
81 *value = gpu->identity.minor_features5;
84 case ETNAVIV_PARAM_GPU_STREAM_COUNT:
85 *value = gpu->identity.stream_count;
88 case ETNAVIV_PARAM_GPU_REGISTER_MAX:
89 *value = gpu->identity.register_max;
92 case ETNAVIV_PARAM_GPU_THREAD_COUNT:
93 *value = gpu->identity.thread_count;
96 case ETNAVIV_PARAM_GPU_VERTEX_CACHE_SIZE:
97 *value = gpu->identity.vertex_cache_size;
100 case ETNAVIV_PARAM_GPU_SHADER_CORE_COUNT:
101 *value = gpu->identity.shader_core_count;
104 case ETNAVIV_PARAM_GPU_PIXEL_PIPES:
105 *value = gpu->identity.pixel_pipes;
108 case ETNAVIV_PARAM_GPU_VERTEX_OUTPUT_BUFFER_SIZE:
109 *value = gpu->identity.vertex_output_buffer_size;
112 case ETNAVIV_PARAM_GPU_BUFFER_SIZE:
113 *value = gpu->identity.buffer_size;
116 case ETNAVIV_PARAM_GPU_INSTRUCTION_COUNT:
117 *value = gpu->identity.instruction_count;
120 case ETNAVIV_PARAM_GPU_NUM_CONSTANTS:
121 *value = gpu->identity.num_constants;
124 case ETNAVIV_PARAM_GPU_NUM_VARYINGS:
125 *value = gpu->identity.varyings_count;
129 DBG("%s: invalid param: %u", dev_name(gpu->dev), param);
137 #define etnaviv_is_model_rev(gpu, mod, rev) \
138 ((gpu)->identity.model == chipModel_##mod && \
139 (gpu)->identity.revision == rev)
140 #define etnaviv_field(val, field) \
141 (((val) & field##__MASK) >> field##__SHIFT)
143 static void etnaviv_hw_specs(struct etnaviv_gpu *gpu)
145 if (gpu->identity.minor_features0 &
146 chipMinorFeatures0_MORE_MINOR_FEATURES) {
148 unsigned int streams;
150 specs[0] = gpu_read(gpu, VIVS_HI_CHIP_SPECS);
151 specs[1] = gpu_read(gpu, VIVS_HI_CHIP_SPECS_2);
152 specs[2] = gpu_read(gpu, VIVS_HI_CHIP_SPECS_3);
153 specs[3] = gpu_read(gpu, VIVS_HI_CHIP_SPECS_4);
155 gpu->identity.stream_count = etnaviv_field(specs[0],
156 VIVS_HI_CHIP_SPECS_STREAM_COUNT);
157 gpu->identity.register_max = etnaviv_field(specs[0],
158 VIVS_HI_CHIP_SPECS_REGISTER_MAX);
159 gpu->identity.thread_count = etnaviv_field(specs[0],
160 VIVS_HI_CHIP_SPECS_THREAD_COUNT);
161 gpu->identity.vertex_cache_size = etnaviv_field(specs[0],
162 VIVS_HI_CHIP_SPECS_VERTEX_CACHE_SIZE);
163 gpu->identity.shader_core_count = etnaviv_field(specs[0],
164 VIVS_HI_CHIP_SPECS_SHADER_CORE_COUNT);
165 gpu->identity.pixel_pipes = etnaviv_field(specs[0],
166 VIVS_HI_CHIP_SPECS_PIXEL_PIPES);
167 gpu->identity.vertex_output_buffer_size =
168 etnaviv_field(specs[0],
169 VIVS_HI_CHIP_SPECS_VERTEX_OUTPUT_BUFFER_SIZE);
171 gpu->identity.buffer_size = etnaviv_field(specs[1],
172 VIVS_HI_CHIP_SPECS_2_BUFFER_SIZE);
173 gpu->identity.instruction_count = etnaviv_field(specs[1],
174 VIVS_HI_CHIP_SPECS_2_INSTRUCTION_COUNT);
175 gpu->identity.num_constants = etnaviv_field(specs[1],
176 VIVS_HI_CHIP_SPECS_2_NUM_CONSTANTS);
178 gpu->identity.varyings_count = etnaviv_field(specs[2],
179 VIVS_HI_CHIP_SPECS_3_VARYINGS_COUNT);
181 /* This overrides the value from older register if non-zero */
182 streams = etnaviv_field(specs[3],
183 VIVS_HI_CHIP_SPECS_4_STREAM_COUNT);
185 gpu->identity.stream_count = streams;
188 /* Fill in the stream count if not specified */
189 if (gpu->identity.stream_count == 0) {
190 if (gpu->identity.model >= 0x1000)
191 gpu->identity.stream_count = 4;
193 gpu->identity.stream_count = 1;
196 /* Convert the register max value */
197 if (gpu->identity.register_max)
198 gpu->identity.register_max = 1 << gpu->identity.register_max;
199 else if (gpu->identity.model == chipModel_GC400)
200 gpu->identity.register_max = 32;
202 gpu->identity.register_max = 64;
204 /* Convert thread count */
205 if (gpu->identity.thread_count)
206 gpu->identity.thread_count = 1 << gpu->identity.thread_count;
207 else if (gpu->identity.model == chipModel_GC400)
208 gpu->identity.thread_count = 64;
209 else if (gpu->identity.model == chipModel_GC500 ||
210 gpu->identity.model == chipModel_GC530)
211 gpu->identity.thread_count = 128;
213 gpu->identity.thread_count = 256;
215 if (gpu->identity.vertex_cache_size == 0)
216 gpu->identity.vertex_cache_size = 8;
218 if (gpu->identity.shader_core_count == 0) {
219 if (gpu->identity.model >= 0x1000)
220 gpu->identity.shader_core_count = 2;
222 gpu->identity.shader_core_count = 1;
225 if (gpu->identity.pixel_pipes == 0)
226 gpu->identity.pixel_pipes = 1;
228 /* Convert virtex buffer size */
229 if (gpu->identity.vertex_output_buffer_size) {
230 gpu->identity.vertex_output_buffer_size =
231 1 << gpu->identity.vertex_output_buffer_size;
232 } else if (gpu->identity.model == chipModel_GC400) {
233 if (gpu->identity.revision < 0x4000)
234 gpu->identity.vertex_output_buffer_size = 512;
235 else if (gpu->identity.revision < 0x4200)
236 gpu->identity.vertex_output_buffer_size = 256;
238 gpu->identity.vertex_output_buffer_size = 128;
240 gpu->identity.vertex_output_buffer_size = 512;
243 switch (gpu->identity.instruction_count) {
245 if (etnaviv_is_model_rev(gpu, GC2000, 0x5108) ||
246 gpu->identity.model == chipModel_GC880)
247 gpu->identity.instruction_count = 512;
249 gpu->identity.instruction_count = 256;
253 gpu->identity.instruction_count = 1024;
257 gpu->identity.instruction_count = 2048;
261 gpu->identity.instruction_count = 256;
265 if (gpu->identity.num_constants == 0)
266 gpu->identity.num_constants = 168;
268 if (gpu->identity.varyings_count == 0) {
269 if (gpu->identity.minor_features1 & chipMinorFeatures1_HALTI0)
270 gpu->identity.varyings_count = 12;
272 gpu->identity.varyings_count = 8;
276 * For some cores, two varyings are consumed for position, so the
277 * maximum varying count needs to be reduced by one.
279 if (etnaviv_is_model_rev(gpu, GC5000, 0x5434) ||
280 etnaviv_is_model_rev(gpu, GC4000, 0x5222) ||
281 etnaviv_is_model_rev(gpu, GC4000, 0x5245) ||
282 etnaviv_is_model_rev(gpu, GC4000, 0x5208) ||
283 etnaviv_is_model_rev(gpu, GC3000, 0x5435) ||
284 etnaviv_is_model_rev(gpu, GC2200, 0x5244) ||
285 etnaviv_is_model_rev(gpu, GC2100, 0x5108) ||
286 etnaviv_is_model_rev(gpu, GC2000, 0x5108) ||
287 etnaviv_is_model_rev(gpu, GC1500, 0x5246) ||
288 etnaviv_is_model_rev(gpu, GC880, 0x5107) ||
289 etnaviv_is_model_rev(gpu, GC880, 0x5106))
290 gpu->identity.varyings_count -= 1;
293 static void etnaviv_hw_identify(struct etnaviv_gpu *gpu)
297 chipIdentity = gpu_read(gpu, VIVS_HI_CHIP_IDENTITY);
299 /* Special case for older graphic cores. */
300 if (etnaviv_field(chipIdentity, VIVS_HI_CHIP_IDENTITY_FAMILY) == 0x01) {
301 gpu->identity.model = chipModel_GC500;
302 gpu->identity.revision = etnaviv_field(chipIdentity,
303 VIVS_HI_CHIP_IDENTITY_REVISION);
306 gpu->identity.model = gpu_read(gpu, VIVS_HI_CHIP_MODEL);
307 gpu->identity.revision = gpu_read(gpu, VIVS_HI_CHIP_REV);
310 * !!!! HACK ALERT !!!!
311 * Because people change device IDs without letting software
312 * know about it - here is the hack to make it all look the
313 * same. Only for GC400 family.
315 if ((gpu->identity.model & 0xff00) == 0x0400 &&
316 gpu->identity.model != chipModel_GC420) {
317 gpu->identity.model = gpu->identity.model & 0x0400;
320 /* Another special case */
321 if (etnaviv_is_model_rev(gpu, GC300, 0x2201)) {
322 u32 chipDate = gpu_read(gpu, VIVS_HI_CHIP_DATE);
323 u32 chipTime = gpu_read(gpu, VIVS_HI_CHIP_TIME);
325 if (chipDate == 0x20080814 && chipTime == 0x12051100) {
327 * This IP has an ECO; put the correct
330 gpu->identity.revision = 0x1051;
335 * NXP likes to call the GPU on the i.MX6QP GC2000+, but in
336 * reality it's just a re-branded GC3000. We can identify this
337 * core by the upper half of the revision register being all 1.
338 * Fix model/rev here, so all other places can refer to this
339 * core by its real identity.
341 if (etnaviv_is_model_rev(gpu, GC2000, 0xffff5450)) {
342 gpu->identity.model = chipModel_GC3000;
343 gpu->identity.revision &= 0xffff;
347 dev_info(gpu->dev, "model: GC%x, revision: %x\n",
348 gpu->identity.model, gpu->identity.revision);
350 gpu->identity.features = gpu_read(gpu, VIVS_HI_CHIP_FEATURE);
352 /* Disable fast clear on GC700. */
353 if (gpu->identity.model == chipModel_GC700)
354 gpu->identity.features &= ~chipFeatures_FAST_CLEAR;
356 if ((gpu->identity.model == chipModel_GC500 &&
357 gpu->identity.revision < 2) ||
358 (gpu->identity.model == chipModel_GC300 &&
359 gpu->identity.revision < 0x2000)) {
362 * GC500 rev 1.x and GC300 rev < 2.0 doesn't have these
365 gpu->identity.minor_features0 = 0;
366 gpu->identity.minor_features1 = 0;
367 gpu->identity.minor_features2 = 0;
368 gpu->identity.minor_features3 = 0;
369 gpu->identity.minor_features4 = 0;
370 gpu->identity.minor_features5 = 0;
372 gpu->identity.minor_features0 =
373 gpu_read(gpu, VIVS_HI_CHIP_MINOR_FEATURE_0);
375 if (gpu->identity.minor_features0 &
376 chipMinorFeatures0_MORE_MINOR_FEATURES) {
377 gpu->identity.minor_features1 =
378 gpu_read(gpu, VIVS_HI_CHIP_MINOR_FEATURE_1);
379 gpu->identity.minor_features2 =
380 gpu_read(gpu, VIVS_HI_CHIP_MINOR_FEATURE_2);
381 gpu->identity.minor_features3 =
382 gpu_read(gpu, VIVS_HI_CHIP_MINOR_FEATURE_3);
383 gpu->identity.minor_features4 =
384 gpu_read(gpu, VIVS_HI_CHIP_MINOR_FEATURE_4);
385 gpu->identity.minor_features5 =
386 gpu_read(gpu, VIVS_HI_CHIP_MINOR_FEATURE_5);
389 /* GC600 idle register reports zero bits where modules aren't present */
390 if (gpu->identity.model == chipModel_GC600) {
391 gpu->idle_mask = VIVS_HI_IDLE_STATE_TX |
392 VIVS_HI_IDLE_STATE_RA |
393 VIVS_HI_IDLE_STATE_SE |
394 VIVS_HI_IDLE_STATE_PA |
395 VIVS_HI_IDLE_STATE_SH |
396 VIVS_HI_IDLE_STATE_PE |
397 VIVS_HI_IDLE_STATE_DE |
398 VIVS_HI_IDLE_STATE_FE;
400 gpu->idle_mask = ~VIVS_HI_IDLE_STATE_AXI_LP;
403 etnaviv_hw_specs(gpu);
406 static void etnaviv_gpu_load_clock(struct etnaviv_gpu *gpu, u32 clock)
408 gpu_write(gpu, VIVS_HI_CLOCK_CONTROL, clock |
409 VIVS_HI_CLOCK_CONTROL_FSCALE_CMD_LOAD);
410 gpu_write(gpu, VIVS_HI_CLOCK_CONTROL, clock);
413 static void etnaviv_gpu_update_clock(struct etnaviv_gpu *gpu)
415 if (gpu->identity.minor_features2 &
416 chipMinorFeatures2_DYNAMIC_FREQUENCY_SCALING) {
417 clk_set_rate(gpu->clk_core,
418 gpu->base_rate_core >> gpu->freq_scale);
419 clk_set_rate(gpu->clk_shader,
420 gpu->base_rate_shader >> gpu->freq_scale);
422 unsigned int fscale = 1 << (6 - gpu->freq_scale);
423 u32 clock = VIVS_HI_CLOCK_CONTROL_DISABLE_DEBUG_REGISTERS |
424 VIVS_HI_CLOCK_CONTROL_FSCALE_VAL(fscale);
426 etnaviv_gpu_load_clock(gpu, clock);
430 static int etnaviv_hw_reset(struct etnaviv_gpu *gpu)
433 unsigned long timeout;
443 /* We hope that the GPU resets in under one second */
444 timeout = jiffies + msecs_to_jiffies(1000);
446 while (time_is_after_jiffies(timeout)) {
448 etnaviv_gpu_update_clock(gpu);
450 control = gpu_read(gpu, VIVS_HI_CLOCK_CONTROL);
452 /* Wait for stable clock. Vivante's code waited for 1ms */
453 usleep_range(1000, 10000);
455 /* isolate the GPU. */
456 control |= VIVS_HI_CLOCK_CONTROL_ISOLATE_GPU;
457 gpu_write(gpu, VIVS_HI_CLOCK_CONTROL, control);
459 /* set soft reset. */
460 control |= VIVS_HI_CLOCK_CONTROL_SOFT_RESET;
461 gpu_write(gpu, VIVS_HI_CLOCK_CONTROL, control);
463 /* wait for reset. */
466 /* reset soft reset bit. */
467 control &= ~VIVS_HI_CLOCK_CONTROL_SOFT_RESET;
468 gpu_write(gpu, VIVS_HI_CLOCK_CONTROL, control);
470 /* reset GPU isolation. */
471 control &= ~VIVS_HI_CLOCK_CONTROL_ISOLATE_GPU;
472 gpu_write(gpu, VIVS_HI_CLOCK_CONTROL, control);
474 /* read idle register. */
475 idle = gpu_read(gpu, VIVS_HI_IDLE_STATE);
477 /* try reseting again if FE it not idle */
478 if ((idle & VIVS_HI_IDLE_STATE_FE) == 0) {
479 dev_dbg(gpu->dev, "FE is not idle\n");
483 /* read reset register. */
484 control = gpu_read(gpu, VIVS_HI_CLOCK_CONTROL);
486 /* is the GPU idle? */
487 if (((control & VIVS_HI_CLOCK_CONTROL_IDLE_3D) == 0) ||
488 ((control & VIVS_HI_CLOCK_CONTROL_IDLE_2D) == 0)) {
489 dev_dbg(gpu->dev, "GPU is not idle\n");
498 idle = gpu_read(gpu, VIVS_HI_IDLE_STATE);
499 control = gpu_read(gpu, VIVS_HI_CLOCK_CONTROL);
501 dev_err(gpu->dev, "GPU failed to reset: FE %sidle, 3D %sidle, 2D %sidle\n",
502 idle & VIVS_HI_IDLE_STATE_FE ? "" : "not ",
503 control & VIVS_HI_CLOCK_CONTROL_IDLE_3D ? "" : "not ",
504 control & VIVS_HI_CLOCK_CONTROL_IDLE_2D ? "" : "not ");
509 /* We rely on the GPU running, so program the clock */
510 etnaviv_gpu_update_clock(gpu);
515 static void etnaviv_gpu_enable_mlcg(struct etnaviv_gpu *gpu)
519 /* enable clock gating */
520 ppc = gpu_read(gpu, VIVS_PM_POWER_CONTROLS);
521 ppc |= VIVS_PM_POWER_CONTROLS_ENABLE_MODULE_CLOCK_GATING;
523 /* Disable stall module clock gating for 4.3.0.1 and 4.3.0.2 revs */
524 if (gpu->identity.revision == 0x4301 ||
525 gpu->identity.revision == 0x4302)
526 ppc |= VIVS_PM_POWER_CONTROLS_DISABLE_STALL_MODULE_CLOCK_GATING;
528 gpu_write(gpu, VIVS_PM_POWER_CONTROLS, ppc);
530 pmc = gpu_read(gpu, VIVS_PM_MODULE_CONTROLS);
532 /* Disable PA clock gating for GC400+ without bugfix except for GC420 */
533 if (gpu->identity.model >= chipModel_GC400 &&
534 gpu->identity.model != chipModel_GC420 &&
535 !(gpu->identity.minor_features3 & chipMinorFeatures3_BUG_FIXES12))
536 pmc |= VIVS_PM_MODULE_CONTROLS_DISABLE_MODULE_CLOCK_GATING_PA;
539 * Disable PE clock gating on revs < 5.0.0.0 when HZ is
540 * present without a bug fix.
542 if (gpu->identity.revision < 0x5000 &&
543 gpu->identity.minor_features0 & chipMinorFeatures0_HZ &&
544 !(gpu->identity.minor_features1 &
545 chipMinorFeatures1_DISABLE_PE_GATING))
546 pmc |= VIVS_PM_MODULE_CONTROLS_DISABLE_MODULE_CLOCK_GATING_PE;
548 if (gpu->identity.revision < 0x5422)
549 pmc |= BIT(15); /* Unknown bit */
551 /* Disable TX clock gating on affected core revisions. */
552 if (etnaviv_is_model_rev(gpu, GC4000, 0x5222) ||
553 etnaviv_is_model_rev(gpu, GC2000, 0x5108))
554 pmc |= VIVS_PM_MODULE_CONTROLS_DISABLE_MODULE_CLOCK_GATING_TX;
556 pmc |= VIVS_PM_MODULE_CONTROLS_DISABLE_MODULE_CLOCK_GATING_RA_HZ;
557 pmc |= VIVS_PM_MODULE_CONTROLS_DISABLE_MODULE_CLOCK_GATING_RA_EZ;
559 gpu_write(gpu, VIVS_PM_MODULE_CONTROLS, pmc);
562 void etnaviv_gpu_start_fe(struct etnaviv_gpu *gpu, u32 address, u16 prefetch)
564 gpu_write(gpu, VIVS_FE_COMMAND_ADDRESS, address);
565 gpu_write(gpu, VIVS_FE_COMMAND_CONTROL,
566 VIVS_FE_COMMAND_CONTROL_ENABLE |
567 VIVS_FE_COMMAND_CONTROL_PREFETCH(prefetch));
570 static void etnaviv_gpu_setup_pulse_eater(struct etnaviv_gpu *gpu)
573 * Base value for VIVS_PM_PULSE_EATER register on models where it
574 * cannot be read, extracted from vivante kernel driver.
576 u32 pulse_eater = 0x01590880;
578 if (etnaviv_is_model_rev(gpu, GC4000, 0x5208) ||
579 etnaviv_is_model_rev(gpu, GC4000, 0x5222)) {
580 pulse_eater |= BIT(23);
584 if (etnaviv_is_model_rev(gpu, GC1000, 0x5039) ||
585 etnaviv_is_model_rev(gpu, GC1000, 0x5040)) {
586 pulse_eater &= ~BIT(16);
587 pulse_eater |= BIT(17);
590 if ((gpu->identity.revision > 0x5420) &&
591 (gpu->identity.features & chipFeatures_PIPE_3D))
593 /* Performance fix: disable internal DFS */
594 pulse_eater = gpu_read(gpu, VIVS_PM_PULSE_EATER);
595 pulse_eater |= BIT(18);
598 gpu_write(gpu, VIVS_PM_PULSE_EATER, pulse_eater);
601 static void etnaviv_gpu_hw_init(struct etnaviv_gpu *gpu)
605 if ((etnaviv_is_model_rev(gpu, GC320, 0x5007) ||
606 etnaviv_is_model_rev(gpu, GC320, 0x5220)) &&
607 gpu_read(gpu, VIVS_HI_CHIP_TIME) != 0x2062400) {
610 mc_memory_debug = gpu_read(gpu, VIVS_MC_DEBUG_MEMORY) & ~0xff;
612 if (gpu->identity.revision == 0x5007)
613 mc_memory_debug |= 0x0c;
615 mc_memory_debug |= 0x08;
617 gpu_write(gpu, VIVS_MC_DEBUG_MEMORY, mc_memory_debug);
620 /* enable module-level clock gating */
621 etnaviv_gpu_enable_mlcg(gpu);
624 * Update GPU AXI cache atttribute to "cacheable, no allocate".
625 * This is necessary to prevent the iMX6 SoC locking up.
627 gpu_write(gpu, VIVS_HI_AXI_CONFIG,
628 VIVS_HI_AXI_CONFIG_AWCACHE(2) |
629 VIVS_HI_AXI_CONFIG_ARCACHE(2));
631 /* GC2000 rev 5108 needs a special bus config */
632 if (etnaviv_is_model_rev(gpu, GC2000, 0x5108)) {
633 u32 bus_config = gpu_read(gpu, VIVS_MC_BUS_CONFIG);
634 bus_config &= ~(VIVS_MC_BUS_CONFIG_FE_BUS_CONFIG__MASK |
635 VIVS_MC_BUS_CONFIG_TX_BUS_CONFIG__MASK);
636 bus_config |= VIVS_MC_BUS_CONFIG_FE_BUS_CONFIG(1) |
637 VIVS_MC_BUS_CONFIG_TX_BUS_CONFIG(0);
638 gpu_write(gpu, VIVS_MC_BUS_CONFIG, bus_config);
641 /* setup the pulse eater */
642 etnaviv_gpu_setup_pulse_eater(gpu);
645 etnaviv_iommu_restore(gpu);
647 /* Start command processor */
648 prefetch = etnaviv_buffer_init(gpu);
650 gpu_write(gpu, VIVS_HI_INTR_ENBL, ~0U);
651 etnaviv_gpu_start_fe(gpu, etnaviv_cmdbuf_get_va(gpu->buffer),
655 int etnaviv_gpu_init(struct etnaviv_gpu *gpu)
659 ret = pm_runtime_get_sync(gpu->dev);
661 dev_err(gpu->dev, "Failed to enable GPU power domain\n");
665 etnaviv_hw_identify(gpu);
667 if (gpu->identity.model == 0) {
668 dev_err(gpu->dev, "Unknown GPU model\n");
673 /* Exclude VG cores with FE2.0 */
674 if (gpu->identity.features & chipFeatures_PIPE_VG &&
675 gpu->identity.features & chipFeatures_FE20) {
676 dev_info(gpu->dev, "Ignoring GPU with VG and FE2.0\n");
682 * Set the GPU linear window to be at the end of the DMA window, where
683 * the CMA area is likely to reside. This ensures that we are able to
684 * map the command buffers while having the linear window overlap as
685 * much RAM as possible, so we can optimize mappings for other buffers.
687 * For 3D cores only do this if MC2.0 is present, as with MC1.0 it leads
688 * to different views of the memory on the individual engines.
690 if (!(gpu->identity.features & chipFeatures_PIPE_3D) ||
691 (gpu->identity.minor_features0 & chipMinorFeatures0_MC20)) {
692 u32 dma_mask = (u32)dma_get_required_mask(gpu->dev);
693 if (dma_mask < PHYS_OFFSET + SZ_2G)
694 gpu->memory_base = PHYS_OFFSET;
696 gpu->memory_base = dma_mask - SZ_2G + 1;
697 } else if (PHYS_OFFSET >= SZ_2G) {
698 dev_info(gpu->dev, "Need to move linear window on MC1.0, disabling TS\n");
699 gpu->memory_base = PHYS_OFFSET;
700 gpu->identity.features &= ~chipFeatures_FAST_CLEAR;
703 ret = etnaviv_hw_reset(gpu);
705 dev_err(gpu->dev, "GPU reset failed\n");
709 gpu->mmu = etnaviv_iommu_new(gpu);
710 if (IS_ERR(gpu->mmu)) {
711 dev_err(gpu->dev, "Failed to instantiate GPU IOMMU\n");
712 ret = PTR_ERR(gpu->mmu);
716 gpu->cmdbuf_suballoc = etnaviv_cmdbuf_suballoc_new(gpu);
717 if (IS_ERR(gpu->cmdbuf_suballoc)) {
718 dev_err(gpu->dev, "Failed to create cmdbuf suballocator\n");
719 ret = PTR_ERR(gpu->cmdbuf_suballoc);
724 gpu->buffer = etnaviv_cmdbuf_new(gpu->cmdbuf_suballoc, PAGE_SIZE, 0);
727 dev_err(gpu->dev, "could not create command buffer\n");
731 if (gpu->mmu->version == ETNAVIV_IOMMU_V1 &&
732 etnaviv_cmdbuf_get_va(gpu->buffer) > 0x80000000) {
735 "command buffer outside valid memory window\n");
739 /* Setup event management */
740 spin_lock_init(&gpu->event_spinlock);
741 init_completion(&gpu->event_free);
742 for (i = 0; i < ARRAY_SIZE(gpu->event); i++) {
743 gpu->event[i].used = false;
744 complete(&gpu->event_free);
747 /* Now program the hardware */
748 mutex_lock(&gpu->lock);
749 etnaviv_gpu_hw_init(gpu);
750 gpu->exec_state = -1;
751 mutex_unlock(&gpu->lock);
753 pm_runtime_mark_last_busy(gpu->dev);
754 pm_runtime_put_autosuspend(gpu->dev);
759 etnaviv_cmdbuf_free(gpu->buffer);
762 etnaviv_iommu_destroy(gpu->mmu);
765 pm_runtime_mark_last_busy(gpu->dev);
766 pm_runtime_put_autosuspend(gpu->dev);
771 #ifdef CONFIG_DEBUG_FS
777 static void verify_dma(struct etnaviv_gpu *gpu, struct dma_debug *debug)
781 debug->address[0] = gpu_read(gpu, VIVS_FE_DMA_ADDRESS);
782 debug->state[0] = gpu_read(gpu, VIVS_FE_DMA_DEBUG_STATE);
784 for (i = 0; i < 500; i++) {
785 debug->address[1] = gpu_read(gpu, VIVS_FE_DMA_ADDRESS);
786 debug->state[1] = gpu_read(gpu, VIVS_FE_DMA_DEBUG_STATE);
788 if (debug->address[0] != debug->address[1])
791 if (debug->state[0] != debug->state[1])
796 int etnaviv_gpu_debugfs(struct etnaviv_gpu *gpu, struct seq_file *m)
798 struct dma_debug debug;
799 u32 dma_lo, dma_hi, axi, idle;
802 seq_printf(m, "%s Status:\n", dev_name(gpu->dev));
804 ret = pm_runtime_get_sync(gpu->dev);
808 dma_lo = gpu_read(gpu, VIVS_FE_DMA_LOW);
809 dma_hi = gpu_read(gpu, VIVS_FE_DMA_HIGH);
810 axi = gpu_read(gpu, VIVS_HI_AXI_STATUS);
811 idle = gpu_read(gpu, VIVS_HI_IDLE_STATE);
813 verify_dma(gpu, &debug);
815 seq_puts(m, "\tfeatures\n");
816 seq_printf(m, "\t minor_features0: 0x%08x\n",
817 gpu->identity.minor_features0);
818 seq_printf(m, "\t minor_features1: 0x%08x\n",
819 gpu->identity.minor_features1);
820 seq_printf(m, "\t minor_features2: 0x%08x\n",
821 gpu->identity.minor_features2);
822 seq_printf(m, "\t minor_features3: 0x%08x\n",
823 gpu->identity.minor_features3);
824 seq_printf(m, "\t minor_features4: 0x%08x\n",
825 gpu->identity.minor_features4);
826 seq_printf(m, "\t minor_features5: 0x%08x\n",
827 gpu->identity.minor_features5);
829 seq_puts(m, "\tspecs\n");
830 seq_printf(m, "\t stream_count: %d\n",
831 gpu->identity.stream_count);
832 seq_printf(m, "\t register_max: %d\n",
833 gpu->identity.register_max);
834 seq_printf(m, "\t thread_count: %d\n",
835 gpu->identity.thread_count);
836 seq_printf(m, "\t vertex_cache_size: %d\n",
837 gpu->identity.vertex_cache_size);
838 seq_printf(m, "\t shader_core_count: %d\n",
839 gpu->identity.shader_core_count);
840 seq_printf(m, "\t pixel_pipes: %d\n",
841 gpu->identity.pixel_pipes);
842 seq_printf(m, "\t vertex_output_buffer_size: %d\n",
843 gpu->identity.vertex_output_buffer_size);
844 seq_printf(m, "\t buffer_size: %d\n",
845 gpu->identity.buffer_size);
846 seq_printf(m, "\t instruction_count: %d\n",
847 gpu->identity.instruction_count);
848 seq_printf(m, "\t num_constants: %d\n",
849 gpu->identity.num_constants);
850 seq_printf(m, "\t varyings_count: %d\n",
851 gpu->identity.varyings_count);
853 seq_printf(m, "\taxi: 0x%08x\n", axi);
854 seq_printf(m, "\tidle: 0x%08x\n", idle);
855 idle |= ~gpu->idle_mask & ~VIVS_HI_IDLE_STATE_AXI_LP;
856 if ((idle & VIVS_HI_IDLE_STATE_FE) == 0)
857 seq_puts(m, "\t FE is not idle\n");
858 if ((idle & VIVS_HI_IDLE_STATE_DE) == 0)
859 seq_puts(m, "\t DE is not idle\n");
860 if ((idle & VIVS_HI_IDLE_STATE_PE) == 0)
861 seq_puts(m, "\t PE is not idle\n");
862 if ((idle & VIVS_HI_IDLE_STATE_SH) == 0)
863 seq_puts(m, "\t SH is not idle\n");
864 if ((idle & VIVS_HI_IDLE_STATE_PA) == 0)
865 seq_puts(m, "\t PA is not idle\n");
866 if ((idle & VIVS_HI_IDLE_STATE_SE) == 0)
867 seq_puts(m, "\t SE is not idle\n");
868 if ((idle & VIVS_HI_IDLE_STATE_RA) == 0)
869 seq_puts(m, "\t RA is not idle\n");
870 if ((idle & VIVS_HI_IDLE_STATE_TX) == 0)
871 seq_puts(m, "\t TX is not idle\n");
872 if ((idle & VIVS_HI_IDLE_STATE_VG) == 0)
873 seq_puts(m, "\t VG is not idle\n");
874 if ((idle & VIVS_HI_IDLE_STATE_IM) == 0)
875 seq_puts(m, "\t IM is not idle\n");
876 if ((idle & VIVS_HI_IDLE_STATE_FP) == 0)
877 seq_puts(m, "\t FP is not idle\n");
878 if ((idle & VIVS_HI_IDLE_STATE_TS) == 0)
879 seq_puts(m, "\t TS is not idle\n");
880 if (idle & VIVS_HI_IDLE_STATE_AXI_LP)
881 seq_puts(m, "\t AXI low power mode\n");
883 if (gpu->identity.features & chipFeatures_DEBUG_MODE) {
884 u32 read0 = gpu_read(gpu, VIVS_MC_DEBUG_READ0);
885 u32 read1 = gpu_read(gpu, VIVS_MC_DEBUG_READ1);
886 u32 write = gpu_read(gpu, VIVS_MC_DEBUG_WRITE);
888 seq_puts(m, "\tMC\n");
889 seq_printf(m, "\t read0: 0x%08x\n", read0);
890 seq_printf(m, "\t read1: 0x%08x\n", read1);
891 seq_printf(m, "\t write: 0x%08x\n", write);
894 seq_puts(m, "\tDMA ");
896 if (debug.address[0] == debug.address[1] &&
897 debug.state[0] == debug.state[1]) {
898 seq_puts(m, "seems to be stuck\n");
899 } else if (debug.address[0] == debug.address[1]) {
900 seq_puts(m, "address is constant\n");
902 seq_puts(m, "is running\n");
905 seq_printf(m, "\t address 0: 0x%08x\n", debug.address[0]);
906 seq_printf(m, "\t address 1: 0x%08x\n", debug.address[1]);
907 seq_printf(m, "\t state 0: 0x%08x\n", debug.state[0]);
908 seq_printf(m, "\t state 1: 0x%08x\n", debug.state[1]);
909 seq_printf(m, "\t last fetch 64 bit word: 0x%08x 0x%08x\n",
914 pm_runtime_mark_last_busy(gpu->dev);
915 pm_runtime_put_autosuspend(gpu->dev);
922 * Hangcheck detection for locked gpu:
924 static void recover_worker(struct work_struct *work)
926 struct etnaviv_gpu *gpu = container_of(work, struct etnaviv_gpu,
931 dev_err(gpu->dev, "hangcheck recover!\n");
933 if (pm_runtime_get_sync(gpu->dev) < 0)
936 mutex_lock(&gpu->lock);
938 /* Only catch the first event, or when manually re-armed */
939 if (etnaviv_dump_core) {
940 etnaviv_core_dump(gpu);
941 etnaviv_dump_core = false;
944 etnaviv_hw_reset(gpu);
946 /* complete all events, the GPU won't do it after the reset */
947 spin_lock_irqsave(&gpu->event_spinlock, flags);
948 for (i = 0; i < ARRAY_SIZE(gpu->event); i++) {
949 if (!gpu->event[i].used)
951 dma_fence_signal(gpu->event[i].fence);
952 gpu->event[i].fence = NULL;
953 gpu->event[i].used = false;
954 complete(&gpu->event_free);
956 spin_unlock_irqrestore(&gpu->event_spinlock, flags);
957 gpu->completed_fence = gpu->active_fence;
959 etnaviv_gpu_hw_init(gpu);
961 gpu->exec_state = -1;
963 mutex_unlock(&gpu->lock);
964 pm_runtime_mark_last_busy(gpu->dev);
965 pm_runtime_put_autosuspend(gpu->dev);
967 /* Retire the buffer objects in a work */
968 etnaviv_queue_work(gpu->drm, &gpu->retire_work);
971 static void hangcheck_timer_reset(struct etnaviv_gpu *gpu)
973 DBG("%s", dev_name(gpu->dev));
974 mod_timer(&gpu->hangcheck_timer,
975 round_jiffies_up(jiffies + DRM_ETNAVIV_HANGCHECK_JIFFIES));
978 static void hangcheck_handler(unsigned long data)
980 struct etnaviv_gpu *gpu = (struct etnaviv_gpu *)data;
981 u32 fence = gpu->completed_fence;
982 bool progress = false;
984 if (fence != gpu->hangcheck_fence) {
985 gpu->hangcheck_fence = fence;
990 u32 dma_addr = gpu_read(gpu, VIVS_FE_DMA_ADDRESS);
991 int change = dma_addr - gpu->hangcheck_dma_addr;
993 if (change < 0 || change > 16) {
994 gpu->hangcheck_dma_addr = dma_addr;
999 if (!progress && fence_after(gpu->active_fence, fence)) {
1000 dev_err(gpu->dev, "hangcheck detected gpu lockup!\n");
1001 dev_err(gpu->dev, " completed fence: %u\n", fence);
1002 dev_err(gpu->dev, " active fence: %u\n",
1004 etnaviv_queue_work(gpu->drm, &gpu->recover_work);
1007 /* if still more pending work, reset the hangcheck timer: */
1008 if (fence_after(gpu->active_fence, gpu->hangcheck_fence))
1009 hangcheck_timer_reset(gpu);
1012 static void hangcheck_disable(struct etnaviv_gpu *gpu)
1014 del_timer_sync(&gpu->hangcheck_timer);
1015 cancel_work_sync(&gpu->recover_work);
1018 /* fence object management */
1019 struct etnaviv_fence {
1020 struct etnaviv_gpu *gpu;
1021 struct dma_fence base;
1024 static inline struct etnaviv_fence *to_etnaviv_fence(struct dma_fence *fence)
1026 return container_of(fence, struct etnaviv_fence, base);
1029 static const char *etnaviv_fence_get_driver_name(struct dma_fence *fence)
1034 static const char *etnaviv_fence_get_timeline_name(struct dma_fence *fence)
1036 struct etnaviv_fence *f = to_etnaviv_fence(fence);
1038 return dev_name(f->gpu->dev);
1041 static bool etnaviv_fence_enable_signaling(struct dma_fence *fence)
1046 static bool etnaviv_fence_signaled(struct dma_fence *fence)
1048 struct etnaviv_fence *f = to_etnaviv_fence(fence);
1050 return fence_completed(f->gpu, f->base.seqno);
1053 static void etnaviv_fence_release(struct dma_fence *fence)
1055 struct etnaviv_fence *f = to_etnaviv_fence(fence);
1057 kfree_rcu(f, base.rcu);
1060 static const struct dma_fence_ops etnaviv_fence_ops = {
1061 .get_driver_name = etnaviv_fence_get_driver_name,
1062 .get_timeline_name = etnaviv_fence_get_timeline_name,
1063 .enable_signaling = etnaviv_fence_enable_signaling,
1064 .signaled = etnaviv_fence_signaled,
1065 .wait = dma_fence_default_wait,
1066 .release = etnaviv_fence_release,
1069 static struct dma_fence *etnaviv_gpu_fence_alloc(struct etnaviv_gpu *gpu)
1071 struct etnaviv_fence *f;
1074 * GPU lock must already be held, otherwise fence completion order might
1075 * not match the seqno order assigned here.
1077 lockdep_assert_held(&gpu->lock);
1079 f = kzalloc(sizeof(*f), GFP_KERNEL);
1085 dma_fence_init(&f->base, &etnaviv_fence_ops, &gpu->fence_spinlock,
1086 gpu->fence_context, ++gpu->next_fence);
1091 int etnaviv_gpu_fence_sync_obj(struct etnaviv_gem_object *etnaviv_obj,
1092 unsigned int context, bool exclusive, bool explicit)
1094 struct reservation_object *robj = etnaviv_obj->resv;
1095 struct reservation_object_list *fobj;
1096 struct dma_fence *fence;
1100 ret = reservation_object_reserve_shared(robj);
1109 * If we have any shared fences, then the exclusive fence
1110 * should be ignored as it will already have been signalled.
1112 fobj = reservation_object_get_list(robj);
1113 if (!fobj || fobj->shared_count == 0) {
1114 /* Wait on any existing exclusive fence which isn't our own */
1115 fence = reservation_object_get_excl(robj);
1116 if (fence && fence->context != context) {
1117 ret = dma_fence_wait(fence, true);
1123 if (!exclusive || !fobj)
1126 for (i = 0; i < fobj->shared_count; i++) {
1127 fence = rcu_dereference_protected(fobj->shared[i],
1128 reservation_object_held(robj));
1129 if (fence->context != context) {
1130 ret = dma_fence_wait(fence, true);
1143 static unsigned int event_alloc(struct etnaviv_gpu *gpu)
1145 unsigned long ret, flags;
1146 unsigned int i, event = ~0U;
1148 ret = wait_for_completion_timeout(&gpu->event_free,
1149 msecs_to_jiffies(10 * 10000));
1151 dev_err(gpu->dev, "wait_for_completion_timeout failed");
1153 spin_lock_irqsave(&gpu->event_spinlock, flags);
1155 /* find first free event */
1156 for (i = 0; i < ARRAY_SIZE(gpu->event); i++) {
1157 if (gpu->event[i].used == false) {
1158 gpu->event[i].used = true;
1164 spin_unlock_irqrestore(&gpu->event_spinlock, flags);
1169 static void event_free(struct etnaviv_gpu *gpu, unsigned int event)
1171 unsigned long flags;
1173 spin_lock_irqsave(&gpu->event_spinlock, flags);
1175 if (gpu->event[event].used == false) {
1176 dev_warn(gpu->dev, "event %u is already marked as free",
1178 spin_unlock_irqrestore(&gpu->event_spinlock, flags);
1180 gpu->event[event].used = false;
1181 spin_unlock_irqrestore(&gpu->event_spinlock, flags);
1183 complete(&gpu->event_free);
1188 * Cmdstream submission/retirement:
1191 static void retire_worker(struct work_struct *work)
1193 struct etnaviv_gpu *gpu = container_of(work, struct etnaviv_gpu,
1195 u32 fence = gpu->completed_fence;
1196 struct etnaviv_cmdbuf *cmdbuf, *tmp;
1199 mutex_lock(&gpu->lock);
1200 list_for_each_entry_safe(cmdbuf, tmp, &gpu->active_cmd_list, node) {
1201 if (!dma_fence_is_signaled(cmdbuf->fence))
1204 list_del(&cmdbuf->node);
1205 dma_fence_put(cmdbuf->fence);
1207 for (i = 0; i < cmdbuf->nr_bos; i++) {
1208 struct etnaviv_vram_mapping *mapping = cmdbuf->bo_map[i];
1209 struct etnaviv_gem_object *etnaviv_obj = mapping->object;
1211 atomic_dec(&etnaviv_obj->gpu_active);
1212 /* drop the refcount taken in etnaviv_gpu_submit */
1213 etnaviv_gem_mapping_unreference(mapping);
1216 etnaviv_cmdbuf_free(cmdbuf);
1218 * We need to balance the runtime PM count caused by
1219 * each submission. Upon submission, we increment
1220 * the runtime PM counter, and allocate one event.
1221 * So here, we put the runtime PM count for each
1224 pm_runtime_put_autosuspend(gpu->dev);
1227 gpu->retired_fence = fence;
1229 mutex_unlock(&gpu->lock);
1231 wake_up_all(&gpu->fence_event);
1234 int etnaviv_gpu_wait_fence_interruptible(struct etnaviv_gpu *gpu,
1235 u32 fence, struct timespec *timeout)
1239 if (fence_after(fence, gpu->next_fence)) {
1240 DRM_ERROR("waiting on invalid fence: %u (of %u)\n",
1241 fence, gpu->next_fence);
1246 /* No timeout was requested: just test for completion */
1247 ret = fence_completed(gpu, fence) ? 0 : -EBUSY;
1249 unsigned long remaining = etnaviv_timeout_to_jiffies(timeout);
1251 ret = wait_event_interruptible_timeout(gpu->fence_event,
1252 fence_completed(gpu, fence),
1255 DBG("timeout waiting for fence: %u (retired: %u completed: %u)",
1256 fence, gpu->retired_fence,
1257 gpu->completed_fence);
1259 } else if (ret != -ERESTARTSYS) {
1268 * Wait for an object to become inactive. This, on it's own, is not race
1269 * free: the object is moved by the retire worker off the active list, and
1270 * then the iova is put. Moreover, the object could be re-submitted just
1271 * after we notice that it's become inactive.
1273 * Although the retirement happens under the gpu lock, we don't want to hold
1274 * that lock in this function while waiting.
1276 int etnaviv_gpu_wait_obj_inactive(struct etnaviv_gpu *gpu,
1277 struct etnaviv_gem_object *etnaviv_obj, struct timespec *timeout)
1279 unsigned long remaining;
1283 return !is_active(etnaviv_obj) ? 0 : -EBUSY;
1285 remaining = etnaviv_timeout_to_jiffies(timeout);
1287 ret = wait_event_interruptible_timeout(gpu->fence_event,
1288 !is_active(etnaviv_obj),
1291 struct etnaviv_drm_private *priv = gpu->drm->dev_private;
1293 /* Synchronise with the retire worker */
1294 flush_workqueue(priv->wq);
1296 } else if (ret == -ERESTARTSYS) {
1297 return -ERESTARTSYS;
1303 int etnaviv_gpu_pm_get_sync(struct etnaviv_gpu *gpu)
1305 return pm_runtime_get_sync(gpu->dev);
1308 void etnaviv_gpu_pm_put(struct etnaviv_gpu *gpu)
1310 pm_runtime_mark_last_busy(gpu->dev);
1311 pm_runtime_put_autosuspend(gpu->dev);
1314 /* add bo's to gpu's ring, and kick gpu: */
1315 int etnaviv_gpu_submit(struct etnaviv_gpu *gpu,
1316 struct etnaviv_gem_submit *submit, struct etnaviv_cmdbuf *cmdbuf)
1318 struct dma_fence *fence;
1319 unsigned int event, i;
1322 ret = etnaviv_gpu_pm_get_sync(gpu);
1335 event = event_alloc(gpu);
1336 if (unlikely(event == ~0U)) {
1337 DRM_ERROR("no free event\n");
1342 mutex_lock(&gpu->lock);
1344 fence = etnaviv_gpu_fence_alloc(gpu);
1346 event_free(gpu, event);
1351 gpu->event[event].fence = fence;
1352 submit->fence = dma_fence_get(fence);
1353 gpu->active_fence = submit->fence->seqno;
1355 if (gpu->lastctx != cmdbuf->ctx) {
1356 gpu->mmu->flush_seq++;
1357 gpu->switch_context = true;
1358 gpu->lastctx = cmdbuf->ctx;
1361 etnaviv_buffer_queue(gpu, event, cmdbuf);
1363 cmdbuf->fence = fence;
1364 list_add_tail(&cmdbuf->node, &gpu->active_cmd_list);
1366 /* We're committed to adding this command buffer, hold a PM reference */
1367 pm_runtime_get_noresume(gpu->dev);
1369 for (i = 0; i < submit->nr_bos; i++) {
1370 struct etnaviv_gem_object *etnaviv_obj = submit->bos[i].obj;
1372 /* Each cmdbuf takes a refcount on the mapping */
1373 etnaviv_gem_mapping_reference(submit->bos[i].mapping);
1374 cmdbuf->bo_map[i] = submit->bos[i].mapping;
1375 atomic_inc(&etnaviv_obj->gpu_active);
1377 if (submit->bos[i].flags & ETNA_SUBMIT_BO_WRITE)
1378 reservation_object_add_excl_fence(etnaviv_obj->resv,
1381 reservation_object_add_shared_fence(etnaviv_obj->resv,
1384 cmdbuf->nr_bos = submit->nr_bos;
1385 hangcheck_timer_reset(gpu);
1389 mutex_unlock(&gpu->lock);
1392 etnaviv_gpu_pm_put(gpu);
1400 static irqreturn_t irq_handler(int irq, void *data)
1402 struct etnaviv_gpu *gpu = data;
1403 irqreturn_t ret = IRQ_NONE;
1405 u32 intr = gpu_read(gpu, VIVS_HI_INTR_ACKNOWLEDGE);
1410 pm_runtime_mark_last_busy(gpu->dev);
1412 dev_dbg(gpu->dev, "intr 0x%08x\n", intr);
1414 if (intr & VIVS_HI_INTR_ACKNOWLEDGE_AXI_BUS_ERROR) {
1415 dev_err(gpu->dev, "AXI bus error\n");
1416 intr &= ~VIVS_HI_INTR_ACKNOWLEDGE_AXI_BUS_ERROR;
1419 if (intr & VIVS_HI_INTR_ACKNOWLEDGE_MMU_EXCEPTION) {
1422 dev_err_ratelimited(gpu->dev,
1423 "MMU fault status 0x%08x\n",
1424 gpu_read(gpu, VIVS_MMUv2_STATUS));
1425 for (i = 0; i < 4; i++) {
1426 dev_err_ratelimited(gpu->dev,
1427 "MMU %d fault addr 0x%08x\n",
1429 VIVS_MMUv2_EXCEPTION_ADDR(i)));
1431 intr &= ~VIVS_HI_INTR_ACKNOWLEDGE_MMU_EXCEPTION;
1434 while ((event = ffs(intr)) != 0) {
1435 struct dma_fence *fence;
1439 intr &= ~(1 << event);
1441 dev_dbg(gpu->dev, "event %u\n", event);
1443 fence = gpu->event[event].fence;
1444 gpu->event[event].fence = NULL;
1445 dma_fence_signal(fence);
1448 * Events can be processed out of order. Eg,
1449 * - allocate and queue event 0
1450 * - allocate event 1
1451 * - event 0 completes, we process it
1452 * - allocate and queue event 0
1453 * - event 1 and event 0 complete
1454 * we can end up processing event 0 first, then 1.
1456 if (fence_after(fence->seqno, gpu->completed_fence))
1457 gpu->completed_fence = fence->seqno;
1459 event_free(gpu, event);
1462 /* Retire the buffer objects in a work */
1463 etnaviv_queue_work(gpu->drm, &gpu->retire_work);
1471 static int etnaviv_gpu_clk_enable(struct etnaviv_gpu *gpu)
1476 ret = clk_prepare_enable(gpu->clk_bus);
1481 if (gpu->clk_core) {
1482 ret = clk_prepare_enable(gpu->clk_core);
1484 goto disable_clk_bus;
1487 if (gpu->clk_shader) {
1488 ret = clk_prepare_enable(gpu->clk_shader);
1490 goto disable_clk_core;
1497 clk_disable_unprepare(gpu->clk_core);
1500 clk_disable_unprepare(gpu->clk_bus);
1505 static int etnaviv_gpu_clk_disable(struct etnaviv_gpu *gpu)
1507 if (gpu->clk_shader)
1508 clk_disable_unprepare(gpu->clk_shader);
1510 clk_disable_unprepare(gpu->clk_core);
1512 clk_disable_unprepare(gpu->clk_bus);
1517 int etnaviv_gpu_wait_idle(struct etnaviv_gpu *gpu, unsigned int timeout_ms)
1519 unsigned long timeout = jiffies + msecs_to_jiffies(timeout_ms);
1522 u32 idle = gpu_read(gpu, VIVS_HI_IDLE_STATE);
1524 if ((idle & gpu->idle_mask) == gpu->idle_mask)
1527 if (time_is_before_jiffies(timeout)) {
1529 "timed out waiting for idle: idle=0x%x\n",
1538 static int etnaviv_gpu_hw_suspend(struct etnaviv_gpu *gpu)
1541 /* Replace the last WAIT with END */
1542 etnaviv_buffer_end(gpu);
1545 * We know that only the FE is busy here, this should
1546 * happen quickly (as the WAIT is only 200 cycles). If
1547 * we fail, just warn and continue.
1549 etnaviv_gpu_wait_idle(gpu, 100);
1552 return etnaviv_gpu_clk_disable(gpu);
1556 static int etnaviv_gpu_hw_resume(struct etnaviv_gpu *gpu)
1560 ret = mutex_lock_killable(&gpu->lock);
1564 etnaviv_gpu_update_clock(gpu);
1565 etnaviv_gpu_hw_init(gpu);
1567 gpu->switch_context = true;
1568 gpu->exec_state = -1;
1570 mutex_unlock(&gpu->lock);
1577 etnaviv_gpu_cooling_get_max_state(struct thermal_cooling_device *cdev,
1578 unsigned long *state)
1586 etnaviv_gpu_cooling_get_cur_state(struct thermal_cooling_device *cdev,
1587 unsigned long *state)
1589 struct etnaviv_gpu *gpu = cdev->devdata;
1591 *state = gpu->freq_scale;
1597 etnaviv_gpu_cooling_set_cur_state(struct thermal_cooling_device *cdev,
1598 unsigned long state)
1600 struct etnaviv_gpu *gpu = cdev->devdata;
1602 mutex_lock(&gpu->lock);
1603 gpu->freq_scale = state;
1604 if (!pm_runtime_suspended(gpu->dev))
1605 etnaviv_gpu_update_clock(gpu);
1606 mutex_unlock(&gpu->lock);
1611 static struct thermal_cooling_device_ops cooling_ops = {
1612 .get_max_state = etnaviv_gpu_cooling_get_max_state,
1613 .get_cur_state = etnaviv_gpu_cooling_get_cur_state,
1614 .set_cur_state = etnaviv_gpu_cooling_set_cur_state,
1617 static int etnaviv_gpu_bind(struct device *dev, struct device *master,
1620 struct drm_device *drm = data;
1621 struct etnaviv_drm_private *priv = drm->dev_private;
1622 struct etnaviv_gpu *gpu = dev_get_drvdata(dev);
1625 if (IS_ENABLED(CONFIG_DRM_ETNAVIV_THERMAL)) {
1626 gpu->cooling = thermal_of_cooling_device_register(dev->of_node,
1627 (char *)dev_name(dev), gpu, &cooling_ops);
1628 if (IS_ERR(gpu->cooling))
1629 return PTR_ERR(gpu->cooling);
1633 ret = pm_runtime_get_sync(gpu->dev);
1635 ret = etnaviv_gpu_clk_enable(gpu);
1638 if (IS_ENABLED(CONFIG_DRM_ETNAVIV_THERMAL))
1639 thermal_cooling_device_unregister(gpu->cooling);
1644 gpu->fence_context = dma_fence_context_alloc(1);
1645 spin_lock_init(&gpu->fence_spinlock);
1647 INIT_LIST_HEAD(&gpu->active_cmd_list);
1648 INIT_WORK(&gpu->retire_work, retire_worker);
1649 INIT_WORK(&gpu->recover_work, recover_worker);
1650 init_waitqueue_head(&gpu->fence_event);
1652 setup_deferrable_timer(&gpu->hangcheck_timer, hangcheck_handler,
1653 (unsigned long)gpu);
1655 priv->gpu[priv->num_gpus++] = gpu;
1657 pm_runtime_mark_last_busy(gpu->dev);
1658 pm_runtime_put_autosuspend(gpu->dev);
1663 static void etnaviv_gpu_unbind(struct device *dev, struct device *master,
1666 struct etnaviv_gpu *gpu = dev_get_drvdata(dev);
1668 DBG("%s", dev_name(gpu->dev));
1670 hangcheck_disable(gpu);
1673 pm_runtime_get_sync(gpu->dev);
1674 pm_runtime_put_sync_suspend(gpu->dev);
1676 etnaviv_gpu_hw_suspend(gpu);
1680 etnaviv_cmdbuf_free(gpu->buffer);
1684 if (gpu->cmdbuf_suballoc) {
1685 etnaviv_cmdbuf_suballoc_destroy(gpu->cmdbuf_suballoc);
1686 gpu->cmdbuf_suballoc = NULL;
1690 etnaviv_iommu_destroy(gpu->mmu);
1696 if (IS_ENABLED(CONFIG_DRM_ETNAVIV_THERMAL))
1697 thermal_cooling_device_unregister(gpu->cooling);
1698 gpu->cooling = NULL;
1701 static const struct component_ops gpu_ops = {
1702 .bind = etnaviv_gpu_bind,
1703 .unbind = etnaviv_gpu_unbind,
1706 static const struct of_device_id etnaviv_gpu_match[] = {
1708 .compatible = "vivante,gc"
1713 static int etnaviv_gpu_platform_probe(struct platform_device *pdev)
1715 struct device *dev = &pdev->dev;
1716 struct etnaviv_gpu *gpu;
1719 gpu = devm_kzalloc(dev, sizeof(*gpu), GFP_KERNEL);
1723 gpu->dev = &pdev->dev;
1724 mutex_init(&gpu->lock);
1726 /* Map registers: */
1727 gpu->mmio = etnaviv_ioremap(pdev, NULL, dev_name(gpu->dev));
1728 if (IS_ERR(gpu->mmio))
1729 return PTR_ERR(gpu->mmio);
1731 /* Get Interrupt: */
1732 gpu->irq = platform_get_irq(pdev, 0);
1734 dev_err(dev, "failed to get irq: %d\n", gpu->irq);
1738 err = devm_request_irq(&pdev->dev, gpu->irq, irq_handler, 0,
1739 dev_name(gpu->dev), gpu);
1741 dev_err(dev, "failed to request IRQ%u: %d\n", gpu->irq, err);
1746 gpu->clk_bus = devm_clk_get(&pdev->dev, "bus");
1747 DBG("clk_bus: %p", gpu->clk_bus);
1748 if (IS_ERR(gpu->clk_bus))
1749 gpu->clk_bus = NULL;
1751 gpu->clk_core = devm_clk_get(&pdev->dev, "core");
1752 DBG("clk_core: %p", gpu->clk_core);
1753 if (IS_ERR(gpu->clk_core))
1754 gpu->clk_core = NULL;
1755 gpu->base_rate_core = clk_get_rate(gpu->clk_core);
1757 gpu->clk_shader = devm_clk_get(&pdev->dev, "shader");
1758 DBG("clk_shader: %p", gpu->clk_shader);
1759 if (IS_ERR(gpu->clk_shader))
1760 gpu->clk_shader = NULL;
1761 gpu->base_rate_shader = clk_get_rate(gpu->clk_shader);
1763 /* TODO: figure out max mapped size */
1764 dev_set_drvdata(dev, gpu);
1767 * We treat the device as initially suspended. The runtime PM
1768 * autosuspend delay is rather arbitary: no measurements have
1769 * yet been performed to determine an appropriate value.
1771 pm_runtime_use_autosuspend(gpu->dev);
1772 pm_runtime_set_autosuspend_delay(gpu->dev, 200);
1773 pm_runtime_enable(gpu->dev);
1775 err = component_add(&pdev->dev, &gpu_ops);
1777 dev_err(&pdev->dev, "failed to register component: %d\n", err);
1784 static int etnaviv_gpu_platform_remove(struct platform_device *pdev)
1786 component_del(&pdev->dev, &gpu_ops);
1787 pm_runtime_disable(&pdev->dev);
1792 static int etnaviv_gpu_rpm_suspend(struct device *dev)
1794 struct etnaviv_gpu *gpu = dev_get_drvdata(dev);
1797 /* If we have outstanding fences, we're not idle */
1798 if (gpu->completed_fence != gpu->active_fence)
1801 /* Check whether the hardware (except FE) is idle */
1802 mask = gpu->idle_mask & ~VIVS_HI_IDLE_STATE_FE;
1803 idle = gpu_read(gpu, VIVS_HI_IDLE_STATE) & mask;
1807 return etnaviv_gpu_hw_suspend(gpu);
1810 static int etnaviv_gpu_rpm_resume(struct device *dev)
1812 struct etnaviv_gpu *gpu = dev_get_drvdata(dev);
1815 ret = etnaviv_gpu_clk_enable(gpu);
1819 /* Re-initialise the basic hardware state */
1820 if (gpu->drm && gpu->buffer) {
1821 ret = etnaviv_gpu_hw_resume(gpu);
1823 etnaviv_gpu_clk_disable(gpu);
1832 static const struct dev_pm_ops etnaviv_gpu_pm_ops = {
1833 SET_RUNTIME_PM_OPS(etnaviv_gpu_rpm_suspend, etnaviv_gpu_rpm_resume,
1837 struct platform_driver etnaviv_gpu_driver = {
1839 .name = "etnaviv-gpu",
1840 .owner = THIS_MODULE,
1841 .pm = &etnaviv_gpu_pm_ops,
1842 .of_match_table = etnaviv_gpu_match,
1844 .probe = etnaviv_gpu_platform_probe,
1845 .remove = etnaviv_gpu_platform_remove,
1846 .id_table = gpu_ids,