1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2017-2018 The Linux Foundation. All rights reserved. */
5 #include <linux/iopoll.h>
6 #include <linux/pm_opp.h>
7 #include <soc/qcom/cmd-db.h>
10 #include "a6xx_gmu.xml.h"
12 static irqreturn_t a6xx_gmu_irq(int irq, void *data)
14 struct a6xx_gmu *gmu = data;
17 status = gmu_read(gmu, REG_A6XX_GMU_AO_HOST_INTERRUPT_STATUS);
18 gmu_write(gmu, REG_A6XX_GMU_AO_HOST_INTERRUPT_CLR, status);
20 if (status & A6XX_GMU_AO_HOST_INTERRUPT_STATUS_WDOG_BITE) {
21 dev_err_ratelimited(gmu->dev, "GMU watchdog expired\n");
23 /* Temporary until we can recover safely */
27 if (status & A6XX_GMU_AO_HOST_INTERRUPT_STATUS_HOST_AHB_BUS_ERROR)
28 dev_err_ratelimited(gmu->dev, "GMU AHB bus error\n");
30 if (status & A6XX_GMU_AO_HOST_INTERRUPT_STATUS_FENCE_ERR)
31 dev_err_ratelimited(gmu->dev, "GMU fence error: 0x%x\n",
32 gmu_read(gmu, REG_A6XX_GMU_AHB_FENCE_STATUS));
37 static irqreturn_t a6xx_hfi_irq(int irq, void *data)
39 struct a6xx_gmu *gmu = data;
42 status = gmu_read(gmu, REG_A6XX_GMU_GMU2HOST_INTR_INFO);
43 gmu_write(gmu, REG_A6XX_GMU_GMU2HOST_INTR_CLR, status);
45 if (status & A6XX_GMU_GMU2HOST_INTR_INFO_MSGQ)
46 tasklet_schedule(&gmu->hfi_tasklet);
48 if (status & A6XX_GMU_GMU2HOST_INTR_INFO_CM3_FAULT) {
49 dev_err_ratelimited(gmu->dev, "GMU firmware fault\n");
51 /* Temporary until we can recover safely */
58 /* Check to see if the GX rail is still powered */
59 static bool a6xx_gmu_gx_is_on(struct a6xx_gmu *gmu)
61 u32 val = gmu_read(gmu, REG_A6XX_GMU_SPTPRAC_PWR_CLK_STATUS);
64 (A6XX_GMU_SPTPRAC_PWR_CLK_STATUS_GX_HM_GDSC_POWER_OFF |
65 A6XX_GMU_SPTPRAC_PWR_CLK_STATUS_GX_HM_CLK_OFF));
68 static int a6xx_gmu_set_freq(struct a6xx_gmu *gmu, int index)
70 gmu_write(gmu, REG_A6XX_GMU_DCVS_ACK_OPTION, 0);
72 gmu_write(gmu, REG_A6XX_GMU_DCVS_PERF_SETTING,
73 ((index << 24) & 0xff) | (3 & 0xf));
76 * Send an invalid index as a vote for the bus bandwidth and let the
77 * firmware decide on the right vote
79 gmu_write(gmu, REG_A6XX_GMU_DCVS_BW_SETTING, 0xff);
81 /* Set and clear the OOB for DCVS to trigger the GMU */
82 a6xx_gmu_set_oob(gmu, GMU_OOB_DCVS_SET);
83 a6xx_gmu_clear_oob(gmu, GMU_OOB_DCVS_SET);
85 return gmu_read(gmu, REG_A6XX_GMU_DCVS_RETURN);
88 static bool a6xx_gmu_check_idle_level(struct a6xx_gmu *gmu)
91 int local = gmu->idle_level;
93 /* SPTP and IFPC both report as IFPC */
94 if (gmu->idle_level == GMU_IDLE_STATE_SPTP)
95 local = GMU_IDLE_STATE_IFPC;
97 val = gmu_read(gmu, REG_A6XX_GPU_GMU_CX_GMU_RPMH_POWER_STATE);
100 if (gmu->idle_level != GMU_IDLE_STATE_IFPC ||
101 !a6xx_gmu_gx_is_on(gmu))
108 /* Wait for the GMU to get to its most idle state */
109 int a6xx_gmu_wait_for_idle(struct a6xx_gpu *a6xx_gpu)
111 struct a6xx_gmu *gmu = &a6xx_gpu->gmu;
113 return spin_until(a6xx_gmu_check_idle_level(gmu));
116 static int a6xx_gmu_start(struct a6xx_gmu *gmu)
122 val = gmu_read(gmu, REG_A6XX_GMU_CM3_DTCM_START + 0xff8);
123 if (val <= 0x20010004) {
125 reset_val = 0xbabeface;
131 gmu_write(gmu, REG_A6XX_GMU_CM3_SYSRESET, 1);
132 gmu_write(gmu, REG_A6XX_GMU_CM3_SYSRESET, 0);
134 ret = gmu_poll_timeout(gmu, REG_A6XX_GMU_CM3_FW_INIT_RESULT, val,
135 (val & mask) == reset_val, 100, 10000);
138 dev_err(gmu->dev, "GMU firmware initialization timed out\n");
143 static int a6xx_gmu_hfi_start(struct a6xx_gmu *gmu)
148 gmu_rmw(gmu, REG_A6XX_GMU_GMU2HOST_INTR_MASK,
149 A6XX_GMU_GMU2HOST_INTR_INFO_MSGQ, 0);
151 gmu_write(gmu, REG_A6XX_GMU_HFI_CTRL_INIT, 1);
153 ret = gmu_poll_timeout(gmu, REG_A6XX_GMU_HFI_CTRL_STATUS, val,
154 val & 1, 100, 10000);
156 dev_err(gmu->dev, "Unable to start the HFI queues\n");
161 /* Trigger a OOB (out of band) request to the GMU */
162 int a6xx_gmu_set_oob(struct a6xx_gmu *gmu, enum a6xx_gmu_oob_state state)
170 case GMU_OOB_GPU_SET:
171 request = GMU_OOB_GPU_SET_REQUEST;
172 ack = GMU_OOB_GPU_SET_ACK;
175 case GMU_OOB_BOOT_SLUMBER:
176 request = GMU_OOB_BOOT_SLUMBER_REQUEST;
177 ack = GMU_OOB_BOOT_SLUMBER_ACK;
178 name = "BOOT_SLUMBER";
180 case GMU_OOB_DCVS_SET:
181 request = GMU_OOB_DCVS_REQUEST;
182 ack = GMU_OOB_DCVS_ACK;
189 /* Trigger the equested OOB operation */
190 gmu_write(gmu, REG_A6XX_GMU_HOST2GMU_INTR_SET, 1 << request);
192 /* Wait for the acknowledge interrupt */
193 ret = gmu_poll_timeout(gmu, REG_A6XX_GMU_GMU2HOST_INTR_INFO, val,
194 val & (1 << ack), 100, 10000);
198 "Timeout waiting for GMU OOB set %s: 0x%x\n",
200 gmu_read(gmu, REG_A6XX_GMU_GMU2HOST_INTR_INFO));
202 /* Clear the acknowledge interrupt */
203 gmu_write(gmu, REG_A6XX_GMU_GMU2HOST_INTR_CLR, 1 << ack);
208 /* Clear a pending OOB state in the GMU */
209 void a6xx_gmu_clear_oob(struct a6xx_gmu *gmu, enum a6xx_gmu_oob_state state)
212 case GMU_OOB_GPU_SET:
213 gmu_write(gmu, REG_A6XX_GMU_HOST2GMU_INTR_SET,
214 1 << GMU_OOB_GPU_SET_CLEAR);
216 case GMU_OOB_BOOT_SLUMBER:
217 gmu_write(gmu, REG_A6XX_GMU_HOST2GMU_INTR_SET,
218 1 << GMU_OOB_BOOT_SLUMBER_CLEAR);
220 case GMU_OOB_DCVS_SET:
221 gmu_write(gmu, REG_A6XX_GMU_HOST2GMU_INTR_SET,
222 1 << GMU_OOB_DCVS_CLEAR);
227 /* Enable CPU control of SPTP power power collapse */
228 static int a6xx_sptprac_enable(struct a6xx_gmu *gmu)
233 gmu_write(gmu, REG_A6XX_GMU_GX_SPTPRAC_POWER_CONTROL, 0x778000);
235 ret = gmu_poll_timeout(gmu, REG_A6XX_GMU_SPTPRAC_PWR_CLK_STATUS, val,
236 (val & 0x38) == 0x28, 1, 100);
239 dev_err(gmu->dev, "Unable to power on SPTPRAC: 0x%x\n",
240 gmu_read(gmu, REG_A6XX_GMU_SPTPRAC_PWR_CLK_STATUS));
246 /* Disable CPU control of SPTP power power collapse */
247 static void a6xx_sptprac_disable(struct a6xx_gmu *gmu)
252 /* Make sure retention is on */
253 gmu_rmw(gmu, REG_A6XX_GPU_CC_GX_GDSCR, 0, (1 << 11));
255 gmu_write(gmu, REG_A6XX_GMU_GX_SPTPRAC_POWER_CONTROL, 0x778001);
257 ret = gmu_poll_timeout(gmu, REG_A6XX_GMU_SPTPRAC_PWR_CLK_STATUS, val,
258 (val & 0x04), 100, 10000);
261 dev_err(gmu->dev, "failed to power off SPTPRAC: 0x%x\n",
262 gmu_read(gmu, REG_A6XX_GMU_SPTPRAC_PWR_CLK_STATUS));
265 /* Let the GMU know we are starting a boot sequence */
266 static int a6xx_gmu_gfx_rail_on(struct a6xx_gmu *gmu)
270 /* Let the GMU know we are getting ready for boot */
271 gmu_write(gmu, REG_A6XX_GMU_BOOT_SLUMBER_OPTION, 0);
273 /* Choose the "default" power level as the highest available */
274 vote = gmu->gx_arc_votes[gmu->nr_gpu_freqs - 1];
276 gmu_write(gmu, REG_A6XX_GMU_GX_VOTE_IDX, vote & 0xff);
277 gmu_write(gmu, REG_A6XX_GMU_MX_VOTE_IDX, (vote >> 8) & 0xff);
279 /* Let the GMU know the boot sequence has started */
280 return a6xx_gmu_set_oob(gmu, GMU_OOB_BOOT_SLUMBER);
283 /* Let the GMU know that we are about to go into slumber */
284 static int a6xx_gmu_notify_slumber(struct a6xx_gmu *gmu)
288 /* Disable the power counter so the GMU isn't busy */
289 gmu_write(gmu, REG_A6XX_GMU_CX_GMU_POWER_COUNTER_ENABLE, 0);
291 /* Disable SPTP_PC if the CPU is responsible for it */
292 if (gmu->idle_level < GMU_IDLE_STATE_SPTP)
293 a6xx_sptprac_disable(gmu);
295 /* Tell the GMU to get ready to slumber */
296 gmu_write(gmu, REG_A6XX_GMU_BOOT_SLUMBER_OPTION, 1);
298 ret = a6xx_gmu_set_oob(gmu, GMU_OOB_BOOT_SLUMBER);
299 a6xx_gmu_clear_oob(gmu, GMU_OOB_BOOT_SLUMBER);
302 /* Check to see if the GMU really did slumber */
303 if (gmu_read(gmu, REG_A6XX_GPU_GMU_CX_GMU_RPMH_POWER_STATE)
305 dev_err(gmu->dev, "The GMU did not go into slumber\n");
310 /* Put fence into allow mode */
311 gmu_write(gmu, REG_A6XX_GMU_AO_AHB_FENCE_CTRL, 0);
315 static int a6xx_rpmh_start(struct a6xx_gmu *gmu)
320 gmu_write(gmu, REG_A6XX_GMU_RSCC_CONTROL_REQ, 1 << 1);
321 /* Wait for the register to finish posting */
324 ret = gmu_poll_timeout(gmu, REG_A6XX_GMU_RSCC_CONTROL_ACK, val,
325 val & (1 << 1), 100, 10000);
327 dev_err(gmu->dev, "Unable to power on the GPU RSC\n");
331 ret = gmu_poll_timeout(gmu, REG_A6XX_RSCC_SEQ_BUSY_DRV0, val,
335 gmu_write(gmu, REG_A6XX_GMU_RSCC_CONTROL_REQ, 0);
337 /* Re-enable the power counter */
338 gmu_write(gmu, REG_A6XX_GMU_CX_GMU_POWER_COUNTER_ENABLE, 1);
342 dev_err(gmu->dev, "GPU RSC sequence stuck while waking up the GPU\n");
346 static void a6xx_rpmh_stop(struct a6xx_gmu *gmu)
351 gmu_write(gmu, REG_A6XX_GMU_RSCC_CONTROL_REQ, 1);
353 ret = gmu_poll_timeout(gmu, REG_A6XX_GPU_RSCC_RSC_STATUS0_DRV0,
354 val, val & (1 << 16), 100, 10000);
356 dev_err(gmu->dev, "Unable to power off the GPU RSC\n");
358 gmu_write(gmu, REG_A6XX_GMU_RSCC_CONTROL_REQ, 0);
361 static void a6xx_gmu_rpmh_init(struct a6xx_gmu *gmu)
363 /* Disable SDE clock gating */
364 gmu_write(gmu, REG_A6XX_GPU_RSCC_RSC_STATUS0_DRV0, BIT(24));
366 /* Setup RSC PDC handshake for sleep and wakeup */
367 gmu_write(gmu, REG_A6XX_RSCC_PDC_SLAVE_ID_DRV0, 1);
368 gmu_write(gmu, REG_A6XX_RSCC_HIDDEN_TCS_CMD0_DATA, 0);
369 gmu_write(gmu, REG_A6XX_RSCC_HIDDEN_TCS_CMD0_ADDR, 0);
370 gmu_write(gmu, REG_A6XX_RSCC_HIDDEN_TCS_CMD0_DATA + 2, 0);
371 gmu_write(gmu, REG_A6XX_RSCC_HIDDEN_TCS_CMD0_ADDR + 2, 0);
372 gmu_write(gmu, REG_A6XX_RSCC_HIDDEN_TCS_CMD0_DATA + 4, 0x80000000);
373 gmu_write(gmu, REG_A6XX_RSCC_HIDDEN_TCS_CMD0_ADDR + 4, 0);
374 gmu_write(gmu, REG_A6XX_RSCC_OVERRIDE_START_ADDR, 0);
375 gmu_write(gmu, REG_A6XX_RSCC_PDC_SEQ_START_ADDR, 0x4520);
376 gmu_write(gmu, REG_A6XX_RSCC_PDC_MATCH_VALUE_LO, 0x4510);
377 gmu_write(gmu, REG_A6XX_RSCC_PDC_MATCH_VALUE_HI, 0x4514);
379 /* Load RSC sequencer uCode for sleep and wakeup */
380 gmu_write(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0, 0xa7a506a0);
381 gmu_write(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0 + 1, 0xa1e6a6e7);
382 gmu_write(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0 + 2, 0xa2e081e1);
383 gmu_write(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0 + 3, 0xe9a982e2);
384 gmu_write(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0 + 4, 0x0020e8a8);
386 /* Load PDC sequencer uCode for power up and power down sequence */
387 pdc_write(gmu, REG_A6XX_PDC_GPU_SEQ_MEM_0, 0xfebea1e1);
388 pdc_write(gmu, REG_A6XX_PDC_GPU_SEQ_MEM_0 + 1, 0xa5a4a3a2);
389 pdc_write(gmu, REG_A6XX_PDC_GPU_SEQ_MEM_0 + 2, 0x8382a6e0);
390 pdc_write(gmu, REG_A6XX_PDC_GPU_SEQ_MEM_0 + 3, 0xbce3e284);
391 pdc_write(gmu, REG_A6XX_PDC_GPU_SEQ_MEM_0 + 4, 0x002081fc);
393 /* Set TCS commands used by PDC sequence for low power modes */
394 pdc_write(gmu, REG_A6XX_PDC_GPU_TCS1_CMD_ENABLE_BANK, 7);
395 pdc_write(gmu, REG_A6XX_PDC_GPU_TCS1_CMD_WAIT_FOR_CMPL_BANK, 0);
396 pdc_write(gmu, REG_A6XX_PDC_GPU_TCS1_CONTROL, 0);
397 pdc_write(gmu, REG_A6XX_PDC_GPU_TCS1_CMD0_MSGID, 0x10108);
398 pdc_write(gmu, REG_A6XX_PDC_GPU_TCS1_CMD0_ADDR, 0x30010);
399 pdc_write(gmu, REG_A6XX_PDC_GPU_TCS1_CMD0_DATA, 1);
400 pdc_write(gmu, REG_A6XX_PDC_GPU_TCS1_CMD0_MSGID + 4, 0x10108);
401 pdc_write(gmu, REG_A6XX_PDC_GPU_TCS1_CMD0_ADDR + 4, 0x30000);
402 pdc_write(gmu, REG_A6XX_PDC_GPU_TCS1_CMD0_DATA + 4, 0x0);
403 pdc_write(gmu, REG_A6XX_PDC_GPU_TCS1_CMD0_MSGID + 8, 0x10108);
404 pdc_write(gmu, REG_A6XX_PDC_GPU_TCS1_CMD0_ADDR + 8, 0x30080);
405 pdc_write(gmu, REG_A6XX_PDC_GPU_TCS1_CMD0_DATA + 8, 0x0);
406 pdc_write(gmu, REG_A6XX_PDC_GPU_TCS3_CMD_ENABLE_BANK, 7);
407 pdc_write(gmu, REG_A6XX_PDC_GPU_TCS3_CMD_WAIT_FOR_CMPL_BANK, 0);
408 pdc_write(gmu, REG_A6XX_PDC_GPU_TCS3_CONTROL, 0);
409 pdc_write(gmu, REG_A6XX_PDC_GPU_TCS3_CMD0_MSGID, 0x10108);
410 pdc_write(gmu, REG_A6XX_PDC_GPU_TCS3_CMD0_ADDR, 0x30010);
411 pdc_write(gmu, REG_A6XX_PDC_GPU_TCS3_CMD0_DATA, 2);
412 pdc_write(gmu, REG_A6XX_PDC_GPU_TCS3_CMD0_MSGID + 4, 0x10108);
413 pdc_write(gmu, REG_A6XX_PDC_GPU_TCS3_CMD0_ADDR + 4, 0x30000);
414 pdc_write(gmu, REG_A6XX_PDC_GPU_TCS3_CMD0_DATA + 4, 0x3);
415 pdc_write(gmu, REG_A6XX_PDC_GPU_TCS3_CMD0_MSGID + 8, 0x10108);
416 pdc_write(gmu, REG_A6XX_PDC_GPU_TCS3_CMD0_ADDR + 8, 0x30080);
417 pdc_write(gmu, REG_A6XX_PDC_GPU_TCS3_CMD0_DATA + 8, 0x3);
420 pdc_write(gmu, REG_A6XX_PDC_GPU_SEQ_START_ADDR, 0);
421 pdc_write(gmu, REG_A6XX_PDC_GPU_ENABLE_PDC, 0x80000001);
423 /* ensure no writes happen before the uCode is fully written */
428 * The lowest 16 bits of this value are the number of XO clock cycles for main
429 * hysteresis which is set at 0x1680 cycles (300 us). The higher 16 bits are
430 * for the shorter hysteresis that happens after main - this is 0xa (.5 us)
433 #define GMU_PWR_COL_HYST 0x000a1680
435 /* Set up the idle state for the GMU */
436 static void a6xx_gmu_power_config(struct a6xx_gmu *gmu)
438 /* Disable GMU WB/RB buffer */
439 gmu_write(gmu, REG_A6XX_GMU_SYS_BUS_CONFIG, 0x1);
441 gmu_write(gmu, REG_A6XX_GMU_PWR_COL_INTER_FRAME_CTRL, 0x9c40400);
443 switch (gmu->idle_level) {
444 case GMU_IDLE_STATE_IFPC:
445 gmu_write(gmu, REG_A6XX_GMU_PWR_COL_INTER_FRAME_HYST,
447 gmu_rmw(gmu, REG_A6XX_GMU_PWR_COL_INTER_FRAME_CTRL, 0,
448 A6XX_GMU_PWR_COL_INTER_FRAME_CTRL_IFPC_ENABLE |
449 A6XX_GMU_PWR_COL_INTER_FRAME_CTRL_HM_POWER_COLLAPSE_ENABLE);
451 case GMU_IDLE_STATE_SPTP:
452 gmu_write(gmu, REG_A6XX_GMU_PWR_COL_SPTPRAC_HYST,
454 gmu_rmw(gmu, REG_A6XX_GMU_PWR_COL_INTER_FRAME_CTRL, 0,
455 A6XX_GMU_PWR_COL_INTER_FRAME_CTRL_IFPC_ENABLE |
456 A6XX_GMU_PWR_COL_INTER_FRAME_CTRL_SPTPRAC_POWER_CONTROL_ENABLE);
459 /* Enable RPMh GPU client */
460 gmu_rmw(gmu, REG_A6XX_GMU_RPMH_CTRL, 0,
461 A6XX_GMU_RPMH_CTRL_RPMH_INTERFACE_ENABLE |
462 A6XX_GMU_RPMH_CTRL_LLC_VOTE_ENABLE |
463 A6XX_GMU_RPMH_CTRL_DDR_VOTE_ENABLE |
464 A6XX_GMU_RPMH_CTRL_MX_VOTE_ENABLE |
465 A6XX_GMU_RPMH_CTRL_CX_VOTE_ENABLE |
466 A6XX_GMU_RPMH_CTRL_GFX_VOTE_ENABLE);
469 static int a6xx_gmu_fw_start(struct a6xx_gmu *gmu, unsigned int state)
471 static bool rpmh_init;
472 struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu);
473 struct adreno_gpu *adreno_gpu = &a6xx_gpu->base;
478 if (state == GMU_WARM_BOOT) {
479 ret = a6xx_rpmh_start(gmu);
483 if (WARN(!adreno_gpu->fw[ADRENO_FW_GMU],
484 "GMU firmware is not loaded\n"))
487 /* Sanity check the size of the firmware that was loaded */
488 if (adreno_gpu->fw[ADRENO_FW_GMU]->size > 0x8000) {
490 "GMU firmware is bigger than the available region\n");
494 /* Turn on register retention */
495 gmu_write(gmu, REG_A6XX_GMU_GENERAL_7, 1);
497 /* We only need to load the RPMh microcode once */
499 a6xx_gmu_rpmh_init(gmu);
501 } else if (state != GMU_RESET) {
502 ret = a6xx_rpmh_start(gmu);
507 image = (u32 *) adreno_gpu->fw[ADRENO_FW_GMU]->data;
509 for (i = 0; i < adreno_gpu->fw[ADRENO_FW_GMU]->size >> 2; i++)
510 gmu_write(gmu, REG_A6XX_GMU_CM3_ITCM_START + i,
514 gmu_write(gmu, REG_A6XX_GMU_CM3_FW_INIT_RESULT, 0);
515 gmu_write(gmu, REG_A6XX_GMU_CM3_BOOT_CONFIG, 0x02);
517 /* Write the iova of the HFI table */
518 gmu_write(gmu, REG_A6XX_GMU_HFI_QTBL_ADDR, gmu->hfi->iova);
519 gmu_write(gmu, REG_A6XX_GMU_HFI_QTBL_INFO, 1);
521 gmu_write(gmu, REG_A6XX_GMU_AHB_FENCE_RANGE_0,
522 (1 << 31) | (0xa << 18) | (0xa0));
524 chipid = adreno_gpu->rev.core << 24;
525 chipid |= adreno_gpu->rev.major << 16;
526 chipid |= adreno_gpu->rev.minor << 12;
527 chipid |= adreno_gpu->rev.patchid << 8;
529 gmu_write(gmu, REG_A6XX_GMU_HFI_SFR_ADDR, chipid);
531 /* Set up the lowest idle level on the GMU */
532 a6xx_gmu_power_config(gmu);
534 ret = a6xx_gmu_start(gmu);
538 ret = a6xx_gmu_gfx_rail_on(gmu);
542 /* Enable SPTP_PC if the CPU is responsible for it */
543 if (gmu->idle_level < GMU_IDLE_STATE_SPTP) {
544 ret = a6xx_sptprac_enable(gmu);
549 ret = a6xx_gmu_hfi_start(gmu);
553 /* FIXME: Do we need this wmb() here? */
559 #define A6XX_HFI_IRQ_MASK \
560 (A6XX_GMU_GMU2HOST_INTR_INFO_MSGQ | \
561 A6XX_GMU_GMU2HOST_INTR_INFO_CM3_FAULT)
563 #define A6XX_GMU_IRQ_MASK \
564 (A6XX_GMU_AO_HOST_INTERRUPT_STATUS_WDOG_BITE | \
565 A6XX_GMU_AO_HOST_INTERRUPT_STATUS_HOST_AHB_BUS_ERROR | \
566 A6XX_GMU_AO_HOST_INTERRUPT_STATUS_FENCE_ERR)
568 static void a6xx_gmu_irq_enable(struct a6xx_gmu *gmu)
570 gmu_write(gmu, REG_A6XX_GMU_AO_HOST_INTERRUPT_CLR, ~0);
571 gmu_write(gmu, REG_A6XX_GMU_GMU2HOST_INTR_CLR, ~0);
573 gmu_write(gmu, REG_A6XX_GMU_AO_HOST_INTERRUPT_MASK,
575 gmu_write(gmu, REG_A6XX_GMU_GMU2HOST_INTR_MASK,
578 enable_irq(gmu->gmu_irq);
579 enable_irq(gmu->hfi_irq);
582 static void a6xx_gmu_irq_disable(struct a6xx_gmu *gmu)
584 disable_irq(gmu->gmu_irq);
585 disable_irq(gmu->hfi_irq);
587 gmu_write(gmu, REG_A6XX_GMU_AO_HOST_INTERRUPT_MASK, ~0);
588 gmu_write(gmu, REG_A6XX_GMU_GMU2HOST_INTR_MASK, ~0);
591 int a6xx_gmu_reset(struct a6xx_gpu *a6xx_gpu)
593 struct a6xx_gmu *gmu = &a6xx_gpu->gmu;
597 /* Flush all the queues */
600 /* Stop the interrupts */
601 a6xx_gmu_irq_disable(gmu);
603 /* Force off SPTP in case the GMU is managing it */
604 a6xx_sptprac_disable(gmu);
606 /* Make sure there are no outstanding RPMh votes */
607 gmu_poll_timeout(gmu, REG_A6XX_RSCC_TCS0_DRV0_STATUS, val,
608 (val & 1), 100, 10000);
609 gmu_poll_timeout(gmu, REG_A6XX_RSCC_TCS1_DRV0_STATUS, val,
610 (val & 1), 100, 10000);
611 gmu_poll_timeout(gmu, REG_A6XX_RSCC_TCS2_DRV0_STATUS, val,
612 (val & 1), 100, 10000);
613 gmu_poll_timeout(gmu, REG_A6XX_RSCC_TCS3_DRV0_STATUS, val,
614 (val & 1), 100, 1000);
616 /* Force off the GX GSDC */
617 regulator_force_disable(gmu->gx);
619 /* Disable the resources */
620 clk_bulk_disable_unprepare(gmu->nr_clocks, gmu->clocks);
621 pm_runtime_put_sync(gmu->dev);
623 /* Re-enable the resources */
624 pm_runtime_get_sync(gmu->dev);
626 /* Use a known rate to bring up the GMU */
627 clk_set_rate(gmu->core_clk, 200000000);
628 ret = clk_bulk_prepare_enable(gmu->nr_clocks, gmu->clocks);
632 a6xx_gmu_irq_enable(gmu);
634 ret = a6xx_gmu_fw_start(gmu, GMU_RESET);
636 ret = a6xx_hfi_start(gmu, GMU_COLD_BOOT);
638 /* Set the GPU back to the highest power frequency */
639 a6xx_gmu_set_freq(gmu, gmu->nr_gpu_freqs - 1);
643 a6xx_gmu_clear_oob(gmu, GMU_OOB_BOOT_SLUMBER);
648 int a6xx_gmu_resume(struct a6xx_gpu *a6xx_gpu)
650 struct a6xx_gmu *gmu = &a6xx_gpu->gmu;
653 if (WARN(!gmu->mmio, "The GMU is not set up yet\n"))
656 /* Turn on the resources */
657 pm_runtime_get_sync(gmu->dev);
659 /* Use a known rate to bring up the GMU */
660 clk_set_rate(gmu->core_clk, 200000000);
661 ret = clk_bulk_prepare_enable(gmu->nr_clocks, gmu->clocks);
665 a6xx_gmu_irq_enable(gmu);
667 /* Check to see if we are doing a cold or warm boot */
668 status = gmu_read(gmu, REG_A6XX_GMU_GENERAL_7) == 1 ?
669 GMU_WARM_BOOT : GMU_COLD_BOOT;
671 ret = a6xx_gmu_fw_start(gmu, status);
675 ret = a6xx_hfi_start(gmu, status);
677 /* Set the GPU to the highest power frequency */
678 a6xx_gmu_set_freq(gmu, gmu->nr_gpu_freqs - 1);
681 /* Make sure to turn off the boot OOB request on error */
683 a6xx_gmu_clear_oob(gmu, GMU_OOB_BOOT_SLUMBER);
688 bool a6xx_gmu_isidle(struct a6xx_gmu *gmu)
695 reg = gmu_read(gmu, REG_A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS);
697 if (reg & A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS_GPUBUSYIGNAHB)
703 int a6xx_gmu_stop(struct a6xx_gpu *a6xx_gpu)
705 struct a6xx_gmu *gmu = &a6xx_gpu->gmu;
709 * The GMU may still be in slumber unless the GPU started so check and
710 * skip putting it back into slumber if so
712 val = gmu_read(gmu, REG_A6XX_GPU_GMU_CX_GMU_RPMH_POWER_STATE);
715 int ret = a6xx_gmu_wait_for_idle(a6xx_gpu);
717 /* Temporary until we can recover safely */
720 /* tell the GMU we want to slumber */
721 a6xx_gmu_notify_slumber(gmu);
723 ret = gmu_poll_timeout(gmu,
724 REG_A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS, val,
725 !(val & A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS_GPUBUSYIGNAHB),
729 * Let the user know we failed to slumber but don't worry too
730 * much because we are powering down anyway
735 "Unable to slumber GMU: status = 0%x/0%x\n",
737 REG_A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS),
739 REG_A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS2));
745 /* Stop the interrupts and mask the hardware */
746 a6xx_gmu_irq_disable(gmu);
748 /* Tell RPMh to power off the GPU */
751 clk_bulk_disable_unprepare(gmu->nr_clocks, gmu->clocks);
753 pm_runtime_put_sync(gmu->dev);
758 static void a6xx_gmu_memory_free(struct a6xx_gmu *gmu, struct a6xx_gmu_bo *bo)
763 if (IS_ERR_OR_NULL(bo))
766 count = bo->size >> PAGE_SHIFT;
769 for (i = 0; i < count; i++, iova += PAGE_SIZE) {
770 iommu_unmap(gmu->domain, iova, PAGE_SIZE);
771 __free_pages(bo->pages[i], 0);
778 static struct a6xx_gmu_bo *a6xx_gmu_memory_alloc(struct a6xx_gmu *gmu,
781 struct a6xx_gmu_bo *bo;
784 bo = kzalloc(sizeof(*bo), GFP_KERNEL);
786 return ERR_PTR(-ENOMEM);
788 bo->size = PAGE_ALIGN(size);
790 count = bo->size >> PAGE_SHIFT;
792 bo->pages = kcalloc(count, sizeof(struct page *), GFP_KERNEL);
795 return ERR_PTR(-ENOMEM);
798 for (i = 0; i < count; i++) {
799 bo->pages[i] = alloc_page(GFP_KERNEL);
804 bo->iova = gmu->uncached_iova_base;
806 for (i = 0; i < count; i++) {
807 ret = iommu_map(gmu->domain,
808 bo->iova + (PAGE_SIZE * i),
809 page_to_phys(bo->pages[i]), PAGE_SIZE,
810 IOMMU_READ | IOMMU_WRITE);
813 dev_err(gmu->dev, "Unable to map GMU buffer object\n");
815 for (i = i - 1 ; i >= 0; i--)
816 iommu_unmap(gmu->domain,
817 bo->iova + (PAGE_SIZE * i),
824 bo->virt = vmap(bo->pages, count, VM_IOREMAP,
825 pgprot_writecombine(PAGE_KERNEL));
829 /* Align future IOVA addresses on 1MB boundaries */
830 gmu->uncached_iova_base += ALIGN(size, SZ_1M);
835 for (i = 0; i < count; i++) {
837 __free_pages(bo->pages[i], 0);
843 return ERR_PTR(-ENOMEM);
846 static int a6xx_gmu_memory_probe(struct a6xx_gmu *gmu)
851 * The GMU address space is hardcoded to treat the range
852 * 0x60000000 - 0x80000000 as un-cached memory. All buffers shared
853 * between the GMU and the CPU will live in this space
855 gmu->uncached_iova_base = 0x60000000;
858 gmu->domain = iommu_domain_alloc(&platform_bus_type);
862 ret = iommu_attach_device(gmu->domain, gmu->dev);
865 iommu_domain_free(gmu->domain);
872 /* Get the list of RPMh voltage levels from cmd-db */
873 static int a6xx_gmu_rpmh_arc_cmds(const char *id, void *vals, int size)
875 u32 len = cmd_db_read_aux_data_len(id);
880 if (WARN_ON(len > size))
883 cmd_db_read_aux_data(id, vals, len);
886 * The data comes back as an array of unsigned shorts so adjust the
892 /* Return the 'arc-level' for the given frequency */
893 static u32 a6xx_gmu_get_arc_level(struct device *dev, unsigned long freq)
895 struct dev_pm_opp *opp;
896 struct device_node *np;
902 opp = dev_pm_opp_find_freq_exact(dev, freq, true);
906 np = dev_pm_opp_get_of_node(opp);
909 of_property_read_u32(np, "opp-level", &val);
918 static int a6xx_gmu_rpmh_arc_votes_init(struct device *dev, u32 *votes,
919 unsigned long *freqs, int freqs_count,
920 u16 *pri, int pri_count,
921 u16 *sec, int sec_count)
925 /* Construct a vote for each frequency */
926 for (i = 0; i < freqs_count; i++) {
927 u8 pindex = 0, sindex = 0;
928 u32 level = a6xx_gmu_get_arc_level(dev, freqs[i]);
930 /* Get the primary index that matches the arc level */
931 for (j = 0; j < pri_count; j++) {
932 if (pri[j] >= level) {
938 if (j == pri_count) {
940 "Level %u not found in in the RPMh list\n",
942 dev_err(dev, "Available levels:\n");
943 for (j = 0; j < pri_count; j++)
944 dev_err(dev, " %u\n", pri[j]);
950 * Look for a level in in the secondary list that matches. If
951 * nothing fits, use the maximum non zero vote
954 for (j = 0; j < sec_count; j++) {
955 if (sec[j] >= level) {
963 /* Construct the vote */
964 votes[i] = ((pri[pindex] & 0xffff) << 16) |
965 (sindex << 8) | pindex;
972 * The GMU votes with the RPMh for itself and on behalf of the GPU but we need
973 * to construct the list of votes on the CPU and send it over. Query the RPMh
974 * voltage levels and build the votes
977 static int a6xx_gmu_rpmh_votes_init(struct a6xx_gmu *gmu)
979 struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu);
980 struct adreno_gpu *adreno_gpu = &a6xx_gpu->base;
981 struct msm_gpu *gpu = &adreno_gpu->base;
983 u16 gx[16], cx[16], mx[16];
984 u32 gxcount, cxcount, mxcount;
987 /* Get the list of available voltage levels for each component */
988 gxcount = a6xx_gmu_rpmh_arc_cmds("gfx.lvl", gx, sizeof(gx));
989 cxcount = a6xx_gmu_rpmh_arc_cmds("cx.lvl", cx, sizeof(cx));
990 mxcount = a6xx_gmu_rpmh_arc_cmds("mx.lvl", mx, sizeof(mx));
992 /* Build the GX votes */
993 ret = a6xx_gmu_rpmh_arc_votes_init(&gpu->pdev->dev, gmu->gx_arc_votes,
994 gmu->gpu_freqs, gmu->nr_gpu_freqs,
995 gx, gxcount, mx, mxcount);
997 /* Build the CX votes */
998 ret |= a6xx_gmu_rpmh_arc_votes_init(gmu->dev, gmu->cx_arc_votes,
999 gmu->gmu_freqs, gmu->nr_gmu_freqs,
1000 cx, cxcount, mx, mxcount);
1005 static int a6xx_gmu_build_freq_table(struct device *dev, unsigned long *freqs,
1008 int count = dev_pm_opp_get_opp_count(dev);
1009 struct dev_pm_opp *opp;
1011 unsigned long freq = 1;
1014 * The OPP table doesn't contain the "off" frequency level so we need to
1015 * add 1 to the table size to account for it
1018 if (WARN(count + 1 > size,
1019 "The GMU frequency table is being truncated\n"))
1022 /* Set the "off" frequency */
1025 for (i = 0; i < count; i++) {
1026 opp = dev_pm_opp_find_freq_ceil(dev, &freq);
1030 dev_pm_opp_put(opp);
1031 freqs[index++] = freq++;
1037 static int a6xx_gmu_pwrlevels_probe(struct a6xx_gmu *gmu)
1039 struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu);
1040 struct adreno_gpu *adreno_gpu = &a6xx_gpu->base;
1041 struct msm_gpu *gpu = &adreno_gpu->base;
1046 * The GMU handles its own frequency switching so build a list of
1047 * available frequencies to send during initialization
1049 ret = dev_pm_opp_of_add_table(gmu->dev);
1051 dev_err(gmu->dev, "Unable to set the OPP table for the GMU\n");
1055 gmu->nr_gmu_freqs = a6xx_gmu_build_freq_table(gmu->dev,
1056 gmu->gmu_freqs, ARRAY_SIZE(gmu->gmu_freqs));
1059 * The GMU also handles GPU frequency switching so build a list
1060 * from the GPU OPP table
1062 gmu->nr_gpu_freqs = a6xx_gmu_build_freq_table(&gpu->pdev->dev,
1063 gmu->gpu_freqs, ARRAY_SIZE(gmu->gpu_freqs));
1065 /* Build the list of RPMh votes that we'll send to the GMU */
1066 return a6xx_gmu_rpmh_votes_init(gmu);
1069 static int a6xx_gmu_clocks_probe(struct a6xx_gmu *gmu)
1071 int ret = msm_clk_bulk_get(gmu->dev, &gmu->clocks);
1076 gmu->nr_clocks = ret;
1078 gmu->core_clk = msm_clk_bulk_get_clock(gmu->clocks,
1079 gmu->nr_clocks, "gmu");
1084 static void __iomem *a6xx_gmu_get_mmio(struct platform_device *pdev,
1088 struct resource *res = platform_get_resource_byname(pdev,
1089 IORESOURCE_MEM, name);
1092 dev_err(&pdev->dev, "Unable to find the %s registers\n", name);
1093 return ERR_PTR(-EINVAL);
1096 ret = devm_ioremap(&pdev->dev, res->start, resource_size(res));
1098 dev_err(&pdev->dev, "Unable to map the %s registers\n", name);
1099 return ERR_PTR(-EINVAL);
1105 static int a6xx_gmu_get_irq(struct a6xx_gmu *gmu, struct platform_device *pdev,
1106 const char *name, irq_handler_t handler)
1110 irq = platform_get_irq_byname(pdev, name);
1112 ret = devm_request_irq(&pdev->dev, irq, handler, IRQF_TRIGGER_HIGH,
1115 dev_err(&pdev->dev, "Unable to get interrupt %s\n", name);
1124 void a6xx_gmu_remove(struct a6xx_gpu *a6xx_gpu)
1126 struct a6xx_gmu *gmu = &a6xx_gpu->gmu;
1128 if (IS_ERR_OR_NULL(gmu->mmio))
1131 pm_runtime_disable(gmu->dev);
1132 a6xx_gmu_stop(a6xx_gpu);
1134 a6xx_gmu_irq_disable(gmu);
1135 a6xx_gmu_memory_free(gmu, gmu->hfi);
1137 iommu_detach_device(gmu->domain, gmu->dev);
1139 iommu_domain_free(gmu->domain);
1142 int a6xx_gmu_probe(struct a6xx_gpu *a6xx_gpu, struct device_node *node)
1144 struct a6xx_gmu *gmu = &a6xx_gpu->gmu;
1145 struct platform_device *pdev = of_find_device_by_node(node);
1151 gmu->dev = &pdev->dev;
1153 of_dma_configure(gmu->dev, node, true);
1155 /* Fow now, don't do anything fancy until we get our feet under us */
1156 gmu->idle_level = GMU_IDLE_STATE_ACTIVE;
1158 pm_runtime_enable(gmu->dev);
1159 gmu->gx = devm_regulator_get(gmu->dev, "vdd");
1161 /* Get the list of clocks */
1162 ret = a6xx_gmu_clocks_probe(gmu);
1166 /* Set up the IOMMU context bank */
1167 ret = a6xx_gmu_memory_probe(gmu);
1171 /* Allocate memory for for the HFI queues */
1172 gmu->hfi = a6xx_gmu_memory_alloc(gmu, SZ_16K);
1173 if (IS_ERR(gmu->hfi))
1176 /* Allocate memory for the GMU debug region */
1177 gmu->debug = a6xx_gmu_memory_alloc(gmu, SZ_16K);
1178 if (IS_ERR(gmu->debug))
1181 /* Map the GMU registers */
1182 gmu->mmio = a6xx_gmu_get_mmio(pdev, "gmu");
1184 /* Map the GPU power domain controller registers */
1185 gmu->pdc_mmio = a6xx_gmu_get_mmio(pdev, "gmu_pdc");
1187 if (IS_ERR(gmu->mmio) || IS_ERR(gmu->pdc_mmio))
1190 /* Get the HFI and GMU interrupts */
1191 gmu->hfi_irq = a6xx_gmu_get_irq(gmu, pdev, "hfi", a6xx_hfi_irq);
1192 gmu->gmu_irq = a6xx_gmu_get_irq(gmu, pdev, "gmu", a6xx_gmu_irq);
1194 if (gmu->hfi_irq < 0 || gmu->gmu_irq < 0)
1197 /* Set up a tasklet to handle GMU HFI responses */
1198 tasklet_init(&gmu->hfi_tasklet, a6xx_hfi_task, (unsigned long) gmu);
1200 /* Get the power levels for the GMU and GPU */
1201 a6xx_gmu_pwrlevels_probe(gmu);
1203 /* Set up the HFI queues */
1208 a6xx_gmu_memory_free(gmu, gmu->hfi);
1211 iommu_detach_device(gmu->domain, gmu->dev);
1213 iommu_domain_free(gmu->domain);