1 /* Copyright (c) 2016-2017 The Linux Foundation. All rights reserved.
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
14 #include <linux/types.h>
15 #include <linux/cpumask.h>
16 #include <linux/qcom_scm.h>
17 #include <linux/dma-mapping.h>
18 #include <linux/of_address.h>
19 #include <linux/soc/qcom/mdt_loader.h>
24 extern bool hang_debug;
25 static void a5xx_dump(struct msm_gpu *gpu);
29 static int zap_shader_load_mdt(struct device *dev, const char *fwname)
31 const struct firmware *fw;
32 struct device_node *np, *mem_np;
36 void *mem_region = NULL;
39 if (!IS_ENABLED(CONFIG_ARCH_QCOM))
42 np = of_get_child_by_name(dev->of_node, "zap-shader");
46 mem_np = of_parse_phandle(np, "memory-region", 0);
51 ret = of_address_to_resource(mem_np, 0, &r);
57 mem_size = resource_size(&r);
59 /* Request the MDT file for the firmware */
60 ret = reject_firmware(&fw, fwname, dev);
62 DRM_DEV_ERROR(dev, "Unable to load %s\n", fwname);
66 /* Figure out how much memory we need */
67 mem_size = qcom_mdt_get_size(fw);
73 /* Allocate memory for the firmware image */
74 mem_region = memremap(mem_phys, mem_size, MEMREMAP_WC);
80 /* Load the rest of the MDT */
81 ret = qcom_mdt_load(dev, fw, fwname, GPU_PAS_ID, mem_region, mem_phys,
86 /* Send the image to the secure world */
87 ret = qcom_scm_pas_auth_and_reset(GPU_PAS_ID);
89 DRM_DEV_ERROR(dev, "Unable to authorize the image\n");
100 static void a5xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
101 struct msm_file_private *ctx)
103 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
104 struct msm_drm_private *priv = gpu->dev->dev_private;
105 struct msm_ringbuffer *ring = gpu->rb;
106 unsigned int i, ibs = 0;
108 for (i = 0; i < submit->nr_cmds; i++) {
109 switch (submit->cmd[i].type) {
110 case MSM_SUBMIT_CMD_IB_TARGET_BUF:
112 case MSM_SUBMIT_CMD_CTX_RESTORE_BUF:
113 if (priv->lastctx == ctx)
115 case MSM_SUBMIT_CMD_BUF:
116 OUT_PKT7(ring, CP_INDIRECT_BUFFER_PFE, 3);
117 OUT_RING(ring, lower_32_bits(submit->cmd[i].iova));
118 OUT_RING(ring, upper_32_bits(submit->cmd[i].iova));
119 OUT_RING(ring, submit->cmd[i].size);
125 OUT_PKT4(ring, REG_A5XX_CP_SCRATCH_REG(2), 1);
126 OUT_RING(ring, submit->fence->seqno);
128 OUT_PKT7(ring, CP_EVENT_WRITE, 4);
129 OUT_RING(ring, CACHE_FLUSH_TS | (1 << 31));
130 OUT_RING(ring, lower_32_bits(rbmemptr(adreno_gpu, fence)));
131 OUT_RING(ring, upper_32_bits(rbmemptr(adreno_gpu, fence)));
132 OUT_RING(ring, submit->fence->seqno);
134 gpu->funcs->flush(gpu);
137 static const struct {
141 {REG_A5XX_RBBM_CLOCK_CNTL_SP0, 0x02222222},
142 {REG_A5XX_RBBM_CLOCK_CNTL_SP1, 0x02222222},
143 {REG_A5XX_RBBM_CLOCK_CNTL_SP2, 0x02222222},
144 {REG_A5XX_RBBM_CLOCK_CNTL_SP3, 0x02222222},
145 {REG_A5XX_RBBM_CLOCK_CNTL2_SP0, 0x02222220},
146 {REG_A5XX_RBBM_CLOCK_CNTL2_SP1, 0x02222220},
147 {REG_A5XX_RBBM_CLOCK_CNTL2_SP2, 0x02222220},
148 {REG_A5XX_RBBM_CLOCK_CNTL2_SP3, 0x02222220},
149 {REG_A5XX_RBBM_CLOCK_HYST_SP0, 0x0000F3CF},
150 {REG_A5XX_RBBM_CLOCK_HYST_SP1, 0x0000F3CF},
151 {REG_A5XX_RBBM_CLOCK_HYST_SP2, 0x0000F3CF},
152 {REG_A5XX_RBBM_CLOCK_HYST_SP3, 0x0000F3CF},
153 {REG_A5XX_RBBM_CLOCK_DELAY_SP0, 0x00000080},
154 {REG_A5XX_RBBM_CLOCK_DELAY_SP1, 0x00000080},
155 {REG_A5XX_RBBM_CLOCK_DELAY_SP2, 0x00000080},
156 {REG_A5XX_RBBM_CLOCK_DELAY_SP3, 0x00000080},
157 {REG_A5XX_RBBM_CLOCK_CNTL_TP0, 0x22222222},
158 {REG_A5XX_RBBM_CLOCK_CNTL_TP1, 0x22222222},
159 {REG_A5XX_RBBM_CLOCK_CNTL_TP2, 0x22222222},
160 {REG_A5XX_RBBM_CLOCK_CNTL_TP3, 0x22222222},
161 {REG_A5XX_RBBM_CLOCK_CNTL2_TP0, 0x22222222},
162 {REG_A5XX_RBBM_CLOCK_CNTL2_TP1, 0x22222222},
163 {REG_A5XX_RBBM_CLOCK_CNTL2_TP2, 0x22222222},
164 {REG_A5XX_RBBM_CLOCK_CNTL2_TP3, 0x22222222},
165 {REG_A5XX_RBBM_CLOCK_CNTL3_TP0, 0x00002222},
166 {REG_A5XX_RBBM_CLOCK_CNTL3_TP1, 0x00002222},
167 {REG_A5XX_RBBM_CLOCK_CNTL3_TP2, 0x00002222},
168 {REG_A5XX_RBBM_CLOCK_CNTL3_TP3, 0x00002222},
169 {REG_A5XX_RBBM_CLOCK_HYST_TP0, 0x77777777},
170 {REG_A5XX_RBBM_CLOCK_HYST_TP1, 0x77777777},
171 {REG_A5XX_RBBM_CLOCK_HYST_TP2, 0x77777777},
172 {REG_A5XX_RBBM_CLOCK_HYST_TP3, 0x77777777},
173 {REG_A5XX_RBBM_CLOCK_HYST2_TP0, 0x77777777},
174 {REG_A5XX_RBBM_CLOCK_HYST2_TP1, 0x77777777},
175 {REG_A5XX_RBBM_CLOCK_HYST2_TP2, 0x77777777},
176 {REG_A5XX_RBBM_CLOCK_HYST2_TP3, 0x77777777},
177 {REG_A5XX_RBBM_CLOCK_HYST3_TP0, 0x00007777},
178 {REG_A5XX_RBBM_CLOCK_HYST3_TP1, 0x00007777},
179 {REG_A5XX_RBBM_CLOCK_HYST3_TP2, 0x00007777},
180 {REG_A5XX_RBBM_CLOCK_HYST3_TP3, 0x00007777},
181 {REG_A5XX_RBBM_CLOCK_DELAY_TP0, 0x11111111},
182 {REG_A5XX_RBBM_CLOCK_DELAY_TP1, 0x11111111},
183 {REG_A5XX_RBBM_CLOCK_DELAY_TP2, 0x11111111},
184 {REG_A5XX_RBBM_CLOCK_DELAY_TP3, 0x11111111},
185 {REG_A5XX_RBBM_CLOCK_DELAY2_TP0, 0x11111111},
186 {REG_A5XX_RBBM_CLOCK_DELAY2_TP1, 0x11111111},
187 {REG_A5XX_RBBM_CLOCK_DELAY2_TP2, 0x11111111},
188 {REG_A5XX_RBBM_CLOCK_DELAY2_TP3, 0x11111111},
189 {REG_A5XX_RBBM_CLOCK_DELAY3_TP0, 0x00001111},
190 {REG_A5XX_RBBM_CLOCK_DELAY3_TP1, 0x00001111},
191 {REG_A5XX_RBBM_CLOCK_DELAY3_TP2, 0x00001111},
192 {REG_A5XX_RBBM_CLOCK_DELAY3_TP3, 0x00001111},
193 {REG_A5XX_RBBM_CLOCK_CNTL_UCHE, 0x22222222},
194 {REG_A5XX_RBBM_CLOCK_CNTL2_UCHE, 0x22222222},
195 {REG_A5XX_RBBM_CLOCK_CNTL3_UCHE, 0x22222222},
196 {REG_A5XX_RBBM_CLOCK_CNTL4_UCHE, 0x00222222},
197 {REG_A5XX_RBBM_CLOCK_HYST_UCHE, 0x00444444},
198 {REG_A5XX_RBBM_CLOCK_DELAY_UCHE, 0x00000002},
199 {REG_A5XX_RBBM_CLOCK_CNTL_RB0, 0x22222222},
200 {REG_A5XX_RBBM_CLOCK_CNTL_RB1, 0x22222222},
201 {REG_A5XX_RBBM_CLOCK_CNTL_RB2, 0x22222222},
202 {REG_A5XX_RBBM_CLOCK_CNTL_RB3, 0x22222222},
203 {REG_A5XX_RBBM_CLOCK_CNTL2_RB0, 0x00222222},
204 {REG_A5XX_RBBM_CLOCK_CNTL2_RB1, 0x00222222},
205 {REG_A5XX_RBBM_CLOCK_CNTL2_RB2, 0x00222222},
206 {REG_A5XX_RBBM_CLOCK_CNTL2_RB3, 0x00222222},
207 {REG_A5XX_RBBM_CLOCK_CNTL_CCU0, 0x00022220},
208 {REG_A5XX_RBBM_CLOCK_CNTL_CCU1, 0x00022220},
209 {REG_A5XX_RBBM_CLOCK_CNTL_CCU2, 0x00022220},
210 {REG_A5XX_RBBM_CLOCK_CNTL_CCU3, 0x00022220},
211 {REG_A5XX_RBBM_CLOCK_CNTL_RAC, 0x05522222},
212 {REG_A5XX_RBBM_CLOCK_CNTL2_RAC, 0x00505555},
213 {REG_A5XX_RBBM_CLOCK_HYST_RB_CCU0, 0x04040404},
214 {REG_A5XX_RBBM_CLOCK_HYST_RB_CCU1, 0x04040404},
215 {REG_A5XX_RBBM_CLOCK_HYST_RB_CCU2, 0x04040404},
216 {REG_A5XX_RBBM_CLOCK_HYST_RB_CCU3, 0x04040404},
217 {REG_A5XX_RBBM_CLOCK_HYST_RAC, 0x07444044},
218 {REG_A5XX_RBBM_CLOCK_DELAY_RB_CCU_L1_0, 0x00000002},
219 {REG_A5XX_RBBM_CLOCK_DELAY_RB_CCU_L1_1, 0x00000002},
220 {REG_A5XX_RBBM_CLOCK_DELAY_RB_CCU_L1_2, 0x00000002},
221 {REG_A5XX_RBBM_CLOCK_DELAY_RB_CCU_L1_3, 0x00000002},
222 {REG_A5XX_RBBM_CLOCK_DELAY_RAC, 0x00010011},
223 {REG_A5XX_RBBM_CLOCK_CNTL_TSE_RAS_RBBM, 0x04222222},
224 {REG_A5XX_RBBM_CLOCK_MODE_GPC, 0x02222222},
225 {REG_A5XX_RBBM_CLOCK_MODE_VFD, 0x00002222},
226 {REG_A5XX_RBBM_CLOCK_HYST_TSE_RAS_RBBM, 0x00000000},
227 {REG_A5XX_RBBM_CLOCK_HYST_GPC, 0x04104004},
228 {REG_A5XX_RBBM_CLOCK_HYST_VFD, 0x00000000},
229 {REG_A5XX_RBBM_CLOCK_DELAY_HLSQ, 0x00000000},
230 {REG_A5XX_RBBM_CLOCK_DELAY_TSE_RAS_RBBM, 0x00004000},
231 {REG_A5XX_RBBM_CLOCK_DELAY_GPC, 0x00000200},
232 {REG_A5XX_RBBM_CLOCK_DELAY_VFD, 0x00002222}
235 void a5xx_set_hwcg(struct msm_gpu *gpu, bool state)
239 for (i = 0; i < ARRAY_SIZE(a5xx_hwcg); i++)
240 gpu_write(gpu, a5xx_hwcg[i].offset,
241 state ? a5xx_hwcg[i].value : 0);
243 gpu_write(gpu, REG_A5XX_RBBM_CLOCK_CNTL, state ? 0xAAA8AA00 : 0);
244 gpu_write(gpu, REG_A5XX_RBBM_ISDB_CNT, state ? 0x182 : 0x180);
247 static int a5xx_me_init(struct msm_gpu *gpu)
249 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
250 struct msm_ringbuffer *ring = gpu->rb;
252 OUT_PKT7(ring, CP_ME_INIT, 8);
254 OUT_RING(ring, 0x0000002F);
256 /* Enable multiple hardware contexts */
257 OUT_RING(ring, 0x00000003);
259 /* Enable error detection */
260 OUT_RING(ring, 0x20000000);
262 /* Don't enable header dump */
263 OUT_RING(ring, 0x00000000);
264 OUT_RING(ring, 0x00000000);
266 /* Specify workarounds for various microcode issues */
267 if (adreno_is_a530(adreno_gpu)) {
268 /* Workaround for token end syncs
269 * Force a WFI after every direct-render 3D mode draw and every
272 OUT_RING(ring, 0x0000000B);
274 /* No workarounds enabled */
275 OUT_RING(ring, 0x00000000);
278 OUT_RING(ring, 0x00000000);
279 OUT_RING(ring, 0x00000000);
281 gpu->funcs->flush(gpu);
283 return a5xx_idle(gpu) ? 0 : -EINVAL;
286 static struct drm_gem_object *a5xx_ucode_load_bo(struct msm_gpu *gpu,
287 const struct firmware *fw, u64 *iova)
289 struct drm_gem_object *bo;
292 ptr = msm_gem_kernel_new_locked(gpu->dev, fw->size - 4,
293 MSM_BO_UNCACHED | MSM_BO_GPU_READONLY, gpu->aspace, &bo, iova);
296 return ERR_CAST(ptr);
298 memcpy(ptr, &fw->data[4], fw->size - 4);
300 msm_gem_put_vaddr(bo);
304 static int a5xx_ucode_init(struct msm_gpu *gpu)
306 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
307 struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
310 if (!a5xx_gpu->pm4_bo) {
311 a5xx_gpu->pm4_bo = a5xx_ucode_load_bo(gpu, adreno_gpu->pm4,
312 &a5xx_gpu->pm4_iova);
314 if (IS_ERR(a5xx_gpu->pm4_bo)) {
315 ret = PTR_ERR(a5xx_gpu->pm4_bo);
316 a5xx_gpu->pm4_bo = NULL;
317 dev_err(gpu->dev->dev, "could not allocate PM4: %d\n",
323 if (!a5xx_gpu->pfp_bo) {
324 a5xx_gpu->pfp_bo = a5xx_ucode_load_bo(gpu, adreno_gpu->pfp,
325 &a5xx_gpu->pfp_iova);
327 if (IS_ERR(a5xx_gpu->pfp_bo)) {
328 ret = PTR_ERR(a5xx_gpu->pfp_bo);
329 a5xx_gpu->pfp_bo = NULL;
330 dev_err(gpu->dev->dev, "could not allocate PFP: %d\n",
336 gpu_write64(gpu, REG_A5XX_CP_ME_INSTR_BASE_LO,
337 REG_A5XX_CP_ME_INSTR_BASE_HI, a5xx_gpu->pm4_iova);
339 gpu_write64(gpu, REG_A5XX_CP_PFP_INSTR_BASE_LO,
340 REG_A5XX_CP_PFP_INSTR_BASE_HI, a5xx_gpu->pfp_iova);
345 #define SCM_GPU_ZAP_SHADER_RESUME 0
347 static int a5xx_zap_shader_resume(struct msm_gpu *gpu)
351 ret = qcom_scm_set_remote_state(SCM_GPU_ZAP_SHADER_RESUME, GPU_PAS_ID);
353 DRM_ERROR("%s: zap-shader resume failed: %d\n",
359 static int a5xx_zap_shader_init(struct msm_gpu *gpu)
362 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
363 struct platform_device *pdev = gpu->pdev;
367 * If the zap shader is already loaded into memory we just need to kick
368 * the remote processor to reinitialize it
371 return a5xx_zap_shader_resume(gpu);
373 /* We need SCM to be able to load the firmware */
374 if (!qcom_scm_is_available()) {
375 DRM_DEV_ERROR(&pdev->dev, "SCM is not available\n");
376 return -EPROBE_DEFER;
379 /* Each GPU has a target specific zap shader firmware name to use */
380 if (!adreno_gpu->info->zapfw) {
381 DRM_DEV_ERROR(&pdev->dev,
382 "Zap shader firmware file not specified for this target\n");
386 ret = zap_shader_load_mdt(&pdev->dev, adreno_gpu->info->zapfw);
393 #define A5XX_INT_MASK (A5XX_RBBM_INT_0_MASK_RBBM_AHB_ERROR | \
394 A5XX_RBBM_INT_0_MASK_RBBM_TRANSFER_TIMEOUT | \
395 A5XX_RBBM_INT_0_MASK_RBBM_ME_MS_TIMEOUT | \
396 A5XX_RBBM_INT_0_MASK_RBBM_PFP_MS_TIMEOUT | \
397 A5XX_RBBM_INT_0_MASK_RBBM_ETS_MS_TIMEOUT | \
398 A5XX_RBBM_INT_0_MASK_RBBM_ATB_ASYNC_OVERFLOW | \
399 A5XX_RBBM_INT_0_MASK_CP_HW_ERROR | \
400 A5XX_RBBM_INT_0_MASK_MISC_HANG_DETECT | \
401 A5XX_RBBM_INT_0_MASK_CP_CACHE_FLUSH_TS | \
402 A5XX_RBBM_INT_0_MASK_UCHE_OOB_ACCESS | \
403 A5XX_RBBM_INT_0_MASK_GPMU_VOLTAGE_DROOP)
405 static int a5xx_hw_init(struct msm_gpu *gpu)
407 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
410 gpu_write(gpu, REG_A5XX_VBIF_ROUND_ROBIN_QOS_ARB, 0x00000003);
412 /* Make all blocks contribute to the GPU BUSY perf counter */
413 gpu_write(gpu, REG_A5XX_RBBM_PERFCTR_GPU_BUSY_MASKED, 0xFFFFFFFF);
415 /* Enable RBBM error reporting bits */
416 gpu_write(gpu, REG_A5XX_RBBM_AHB_CNTL0, 0x00000001);
418 if (adreno_gpu->info->quirks & ADRENO_QUIRK_FAULT_DETECT_MASK) {
420 * Mask out the activity signals from RB1-3 to avoid false
424 gpu_write(gpu, REG_A5XX_RBBM_INTERFACE_HANG_MASK_CNTL11,
426 gpu_write(gpu, REG_A5XX_RBBM_INTERFACE_HANG_MASK_CNTL12,
428 gpu_write(gpu, REG_A5XX_RBBM_INTERFACE_HANG_MASK_CNTL13,
430 gpu_write(gpu, REG_A5XX_RBBM_INTERFACE_HANG_MASK_CNTL14,
432 gpu_write(gpu, REG_A5XX_RBBM_INTERFACE_HANG_MASK_CNTL15,
434 gpu_write(gpu, REG_A5XX_RBBM_INTERFACE_HANG_MASK_CNTL16,
436 gpu_write(gpu, REG_A5XX_RBBM_INTERFACE_HANG_MASK_CNTL17,
438 gpu_write(gpu, REG_A5XX_RBBM_INTERFACE_HANG_MASK_CNTL18,
442 /* Enable fault detection */
443 gpu_write(gpu, REG_A5XX_RBBM_INTERFACE_HANG_INT_CNTL,
446 /* Turn on performance counters */
447 gpu_write(gpu, REG_A5XX_RBBM_PERFCTR_CNTL, 0x01);
449 /* Increase VFD cache access so LRZ and other data gets evicted less */
450 gpu_write(gpu, REG_A5XX_UCHE_CACHE_WAYS, 0x02);
452 /* Disable L2 bypass in the UCHE */
453 gpu_write(gpu, REG_A5XX_UCHE_TRAP_BASE_LO, 0xFFFF0000);
454 gpu_write(gpu, REG_A5XX_UCHE_TRAP_BASE_HI, 0x0001FFFF);
455 gpu_write(gpu, REG_A5XX_UCHE_WRITE_THRU_BASE_LO, 0xFFFF0000);
456 gpu_write(gpu, REG_A5XX_UCHE_WRITE_THRU_BASE_HI, 0x0001FFFF);
458 /* Set the GMEM VA range (0 to gpu->gmem) */
459 gpu_write(gpu, REG_A5XX_UCHE_GMEM_RANGE_MIN_LO, 0x00100000);
460 gpu_write(gpu, REG_A5XX_UCHE_GMEM_RANGE_MIN_HI, 0x00000000);
461 gpu_write(gpu, REG_A5XX_UCHE_GMEM_RANGE_MAX_LO,
462 0x00100000 + adreno_gpu->gmem - 1);
463 gpu_write(gpu, REG_A5XX_UCHE_GMEM_RANGE_MAX_HI, 0x00000000);
465 gpu_write(gpu, REG_A5XX_CP_MEQ_THRESHOLDS, 0x40);
466 gpu_write(gpu, REG_A5XX_CP_MERCIU_SIZE, 0x40);
467 gpu_write(gpu, REG_A5XX_CP_ROQ_THRESHOLDS_2, 0x80000060);
468 gpu_write(gpu, REG_A5XX_CP_ROQ_THRESHOLDS_1, 0x40201B16);
470 gpu_write(gpu, REG_A5XX_PC_DBG_ECO_CNTL, (0x400 << 11 | 0x300 << 22));
472 if (adreno_gpu->info->quirks & ADRENO_QUIRK_TWO_PASS_USE_WFI)
473 gpu_rmw(gpu, REG_A5XX_PC_DBG_ECO_CNTL, 0, (1 << 8));
475 /* Enable USE_RETENTION_FLOPS */
476 gpu_write(gpu, REG_A5XX_CP_CHICKEN_DBG, 0x02000000);
478 /* Enable ME/PFP split notification */
479 gpu_write(gpu, REG_A5XX_RBBM_AHB_CNTL1, 0xA6FFFFFF);
482 a5xx_set_hwcg(gpu, true);
484 gpu_write(gpu, REG_A5XX_RBBM_AHB_CNTL2, 0x0000003F);
486 /* Set the highest bank bit */
487 gpu_write(gpu, REG_A5XX_TPL1_MODE_CNTL, 2 << 7);
488 gpu_write(gpu, REG_A5XX_RB_MODE_CNTL, 2 << 1);
490 /* Protect registers from the CP */
491 gpu_write(gpu, REG_A5XX_CP_PROTECT_CNTL, 0x00000007);
494 gpu_write(gpu, REG_A5XX_CP_PROTECT(0), ADRENO_PROTECT_RW(0x04, 4));
495 gpu_write(gpu, REG_A5XX_CP_PROTECT(1), ADRENO_PROTECT_RW(0x08, 8));
496 gpu_write(gpu, REG_A5XX_CP_PROTECT(2), ADRENO_PROTECT_RW(0x10, 16));
497 gpu_write(gpu, REG_A5XX_CP_PROTECT(3), ADRENO_PROTECT_RW(0x20, 32));
498 gpu_write(gpu, REG_A5XX_CP_PROTECT(4), ADRENO_PROTECT_RW(0x40, 64));
499 gpu_write(gpu, REG_A5XX_CP_PROTECT(5), ADRENO_PROTECT_RW(0x80, 64));
501 /* Content protect */
502 gpu_write(gpu, REG_A5XX_CP_PROTECT(6),
503 ADRENO_PROTECT_RW(REG_A5XX_RBBM_SECVID_TSB_TRUSTED_BASE_LO,
505 gpu_write(gpu, REG_A5XX_CP_PROTECT(7),
506 ADRENO_PROTECT_RW(REG_A5XX_RBBM_SECVID_TRUST_CNTL, 2));
509 gpu_write(gpu, REG_A5XX_CP_PROTECT(8), ADRENO_PROTECT_RW(0x800, 64));
510 gpu_write(gpu, REG_A5XX_CP_PROTECT(9), ADRENO_PROTECT_RW(0x840, 8));
511 gpu_write(gpu, REG_A5XX_CP_PROTECT(10), ADRENO_PROTECT_RW(0x880, 32));
512 gpu_write(gpu, REG_A5XX_CP_PROTECT(11), ADRENO_PROTECT_RW(0xAA0, 1));
515 gpu_write(gpu, REG_A5XX_CP_PROTECT(12), ADRENO_PROTECT_RW(0xCC0, 1));
516 gpu_write(gpu, REG_A5XX_CP_PROTECT(13), ADRENO_PROTECT_RW(0xCF0, 2));
519 gpu_write(gpu, REG_A5XX_CP_PROTECT(14), ADRENO_PROTECT_RW(0xE68, 8));
520 gpu_write(gpu, REG_A5XX_CP_PROTECT(15), ADRENO_PROTECT_RW(0xE70, 4));
523 gpu_write(gpu, REG_A5XX_CP_PROTECT(16), ADRENO_PROTECT_RW(0xE80, 16));
525 if (adreno_is_a530(adreno_gpu))
526 gpu_write(gpu, REG_A5XX_CP_PROTECT(17),
527 ADRENO_PROTECT_RW(0x10000, 0x8000));
529 gpu_write(gpu, REG_A5XX_RBBM_SECVID_TSB_CNTL, 0);
531 * Disable the trusted memory range - we don't actually supported secure
532 * memory rendering at this point in time and we don't want to block off
533 * part of the virtual memory space.
535 gpu_write64(gpu, REG_A5XX_RBBM_SECVID_TSB_TRUSTED_BASE_LO,
536 REG_A5XX_RBBM_SECVID_TSB_TRUSTED_BASE_HI, 0x00000000);
537 gpu_write(gpu, REG_A5XX_RBBM_SECVID_TSB_TRUSTED_SIZE, 0x00000000);
539 /* Load the GPMU firmware before starting the HW init */
540 a5xx_gpmu_ucode_init(gpu);
542 ret = adreno_hw_init(gpu);
546 ret = a5xx_ucode_init(gpu);
550 /* Disable the interrupts through the initial bringup stage */
551 gpu_write(gpu, REG_A5XX_RBBM_INT_0_MASK, A5XX_INT_MASK);
553 /* Clear ME_HALT to start the micro engine */
554 gpu_write(gpu, REG_A5XX_CP_PFP_ME_CNTL, 0);
555 ret = a5xx_me_init(gpu);
559 ret = a5xx_power_init(gpu);
564 * Send a pipeline event stat to get misbehaving counters to start
567 if (adreno_is_a530(adreno_gpu)) {
568 OUT_PKT7(gpu->rb, CP_EVENT_WRITE, 1);
569 OUT_RING(gpu->rb, 0x0F);
571 gpu->funcs->flush(gpu);
577 * Try to load a zap shader into the secure world. If successful
578 * we can use the CP to switch out of secure mode. If not then we
579 * have no resource but to try to switch ourselves out manually. If we
580 * guessed wrong then access to the RBBM_SECVID_TRUST_CNTL register will
581 * be blocked and a permissions violation will soon follow.
583 ret = a5xx_zap_shader_init(gpu);
585 OUT_PKT7(gpu->rb, CP_SET_SECURE_MODE, 1);
586 OUT_RING(gpu->rb, 0x00000000);
588 gpu->funcs->flush(gpu);
592 /* Print a warning so if we die, we know why */
593 dev_warn_once(gpu->dev->dev,
594 "Zap shader not enabled - using SECVID_TRUST_CNTL instead\n");
595 gpu_write(gpu, REG_A5XX_RBBM_SECVID_TRUST_CNTL, 0x0);
601 static void a5xx_recover(struct msm_gpu *gpu)
605 adreno_dump_info(gpu);
607 for (i = 0; i < 8; i++) {
608 printk("CP_SCRATCH_REG%d: %u\n", i,
609 gpu_read(gpu, REG_A5XX_CP_SCRATCH_REG(i)));
615 gpu_write(gpu, REG_A5XX_RBBM_SW_RESET_CMD, 1);
616 gpu_read(gpu, REG_A5XX_RBBM_SW_RESET_CMD);
617 gpu_write(gpu, REG_A5XX_RBBM_SW_RESET_CMD, 0);
621 static void a5xx_destroy(struct msm_gpu *gpu)
623 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
624 struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
626 DBG("%s", gpu->name);
628 if (a5xx_gpu->pm4_bo) {
629 if (a5xx_gpu->pm4_iova)
630 msm_gem_put_iova(a5xx_gpu->pm4_bo, gpu->aspace);
631 drm_gem_object_unreference_unlocked(a5xx_gpu->pm4_bo);
634 if (a5xx_gpu->pfp_bo) {
635 if (a5xx_gpu->pfp_iova)
636 msm_gem_put_iova(a5xx_gpu->pfp_bo, gpu->aspace);
637 drm_gem_object_unreference_unlocked(a5xx_gpu->pfp_bo);
640 if (a5xx_gpu->gpmu_bo) {
641 if (a5xx_gpu->gpmu_iova)
642 msm_gem_put_iova(a5xx_gpu->gpmu_bo, gpu->aspace);
643 drm_gem_object_unreference_unlocked(a5xx_gpu->gpmu_bo);
646 adreno_gpu_cleanup(adreno_gpu);
650 static inline bool _a5xx_check_idle(struct msm_gpu *gpu)
652 if (gpu_read(gpu, REG_A5XX_RBBM_STATUS) & ~A5XX_RBBM_STATUS_HI_BUSY)
656 * Nearly every abnormality ends up pausing the GPU and triggering a
657 * fault so we can safely just watch for this one interrupt to fire
659 return !(gpu_read(gpu, REG_A5XX_RBBM_INT_0_STATUS) &
660 A5XX_RBBM_INT_0_MASK_MISC_HANG_DETECT);
663 bool a5xx_idle(struct msm_gpu *gpu)
665 /* wait for CP to drain ringbuffer: */
666 if (!adreno_idle(gpu))
669 if (spin_until(_a5xx_check_idle(gpu))) {
670 DRM_ERROR("%s: %ps: timeout waiting for GPU to idle: status %8.8X irq %8.8X\n",
671 gpu->name, __builtin_return_address(0),
672 gpu_read(gpu, REG_A5XX_RBBM_STATUS),
673 gpu_read(gpu, REG_A5XX_RBBM_INT_0_STATUS));
681 static int a5xx_fault_handler(void *arg, unsigned long iova, int flags)
683 struct msm_gpu *gpu = arg;
684 pr_warn_ratelimited("*** gpu fault: iova=%08lx, flags=%d (%u,%u,%u,%u)\n",
686 gpu_read(gpu, REG_A5XX_CP_SCRATCH_REG(4)),
687 gpu_read(gpu, REG_A5XX_CP_SCRATCH_REG(5)),
688 gpu_read(gpu, REG_A5XX_CP_SCRATCH_REG(6)),
689 gpu_read(gpu, REG_A5XX_CP_SCRATCH_REG(7)));
694 static void a5xx_cp_err_irq(struct msm_gpu *gpu)
696 u32 status = gpu_read(gpu, REG_A5XX_CP_INTERRUPT_STATUS);
698 if (status & A5XX_CP_INT_CP_OPCODE_ERROR) {
701 gpu_write(gpu, REG_A5XX_CP_PFP_STAT_ADDR, 0);
704 * REG_A5XX_CP_PFP_STAT_DATA is indexed, and we want index 1 so
708 gpu_read(gpu, REG_A5XX_CP_PFP_STAT_DATA);
709 val = gpu_read(gpu, REG_A5XX_CP_PFP_STAT_DATA);
711 dev_err_ratelimited(gpu->dev->dev, "CP | opcode error | possible opcode=0x%8.8X\n",
715 if (status & A5XX_CP_INT_CP_HW_FAULT_ERROR)
716 dev_err_ratelimited(gpu->dev->dev, "CP | HW fault | status=0x%8.8X\n",
717 gpu_read(gpu, REG_A5XX_CP_HW_FAULT));
719 if (status & A5XX_CP_INT_CP_DMA_ERROR)
720 dev_err_ratelimited(gpu->dev->dev, "CP | DMA error\n");
722 if (status & A5XX_CP_INT_CP_REGISTER_PROTECTION_ERROR) {
723 u32 val = gpu_read(gpu, REG_A5XX_CP_PROTECT_STATUS);
725 dev_err_ratelimited(gpu->dev->dev,
726 "CP | protected mode error | %s | addr=0x%8.8X | status=0x%8.8X\n",
727 val & (1 << 24) ? "WRITE" : "READ",
728 (val & 0xFFFFF) >> 2, val);
731 if (status & A5XX_CP_INT_CP_AHB_ERROR) {
732 u32 status = gpu_read(gpu, REG_A5XX_CP_AHB_FAULT);
733 const char *access[16] = { "reserved", "reserved",
734 "timestamp lo", "timestamp hi", "pfp read", "pfp write",
735 "", "", "me read", "me write", "", "", "crashdump read",
738 dev_err_ratelimited(gpu->dev->dev,
739 "CP | AHB error | addr=%X access=%s error=%d | status=0x%8.8X\n",
740 status & 0xFFFFF, access[(status >> 24) & 0xF],
741 (status & (1 << 31)), status);
745 static void a5xx_rbbm_err_irq(struct msm_gpu *gpu, u32 status)
747 if (status & A5XX_RBBM_INT_0_MASK_RBBM_AHB_ERROR) {
748 u32 val = gpu_read(gpu, REG_A5XX_RBBM_AHB_ERROR_STATUS);
750 dev_err_ratelimited(gpu->dev->dev,
751 "RBBM | AHB bus error | %s | addr=0x%X | ports=0x%X:0x%X\n",
752 val & (1 << 28) ? "WRITE" : "READ",
753 (val & 0xFFFFF) >> 2, (val >> 20) & 0x3,
756 /* Clear the error */
757 gpu_write(gpu, REG_A5XX_RBBM_AHB_CMD, (1 << 4));
759 /* Clear the interrupt */
760 gpu_write(gpu, REG_A5XX_RBBM_INT_CLEAR_CMD,
761 A5XX_RBBM_INT_0_MASK_RBBM_AHB_ERROR);
764 if (status & A5XX_RBBM_INT_0_MASK_RBBM_TRANSFER_TIMEOUT)
765 dev_err_ratelimited(gpu->dev->dev, "RBBM | AHB transfer timeout\n");
767 if (status & A5XX_RBBM_INT_0_MASK_RBBM_ME_MS_TIMEOUT)
768 dev_err_ratelimited(gpu->dev->dev, "RBBM | ME master split | status=0x%X\n",
769 gpu_read(gpu, REG_A5XX_RBBM_AHB_ME_SPLIT_STATUS));
771 if (status & A5XX_RBBM_INT_0_MASK_RBBM_PFP_MS_TIMEOUT)
772 dev_err_ratelimited(gpu->dev->dev, "RBBM | PFP master split | status=0x%X\n",
773 gpu_read(gpu, REG_A5XX_RBBM_AHB_PFP_SPLIT_STATUS));
775 if (status & A5XX_RBBM_INT_0_MASK_RBBM_ETS_MS_TIMEOUT)
776 dev_err_ratelimited(gpu->dev->dev, "RBBM | ETS master split | status=0x%X\n",
777 gpu_read(gpu, REG_A5XX_RBBM_AHB_ETS_SPLIT_STATUS));
779 if (status & A5XX_RBBM_INT_0_MASK_RBBM_ATB_ASYNC_OVERFLOW)
780 dev_err_ratelimited(gpu->dev->dev, "RBBM | ATB ASYNC overflow\n");
782 if (status & A5XX_RBBM_INT_0_MASK_RBBM_ATB_BUS_OVERFLOW)
783 dev_err_ratelimited(gpu->dev->dev, "RBBM | ATB bus overflow\n");
786 static void a5xx_uche_err_irq(struct msm_gpu *gpu)
788 uint64_t addr = (uint64_t) gpu_read(gpu, REG_A5XX_UCHE_TRAP_LOG_HI);
790 addr |= gpu_read(gpu, REG_A5XX_UCHE_TRAP_LOG_LO);
792 dev_err_ratelimited(gpu->dev->dev, "UCHE | Out of bounds access | addr=0x%llX\n",
796 static void a5xx_gpmu_err_irq(struct msm_gpu *gpu)
798 dev_err_ratelimited(gpu->dev->dev, "GPMU | voltage droop\n");
801 static void a5xx_fault_detect_irq(struct msm_gpu *gpu)
803 struct drm_device *dev = gpu->dev;
804 struct msm_drm_private *priv = dev->dev_private;
806 dev_err(dev->dev, "gpu fault fence %x status %8.8X rb %4.4x/%4.4x ib1 %16.16llX/%4.4x ib2 %16.16llX/%4.4x\n",
807 gpu->funcs->last_fence(gpu),
808 gpu_read(gpu, REG_A5XX_RBBM_STATUS),
809 gpu_read(gpu, REG_A5XX_CP_RB_RPTR),
810 gpu_read(gpu, REG_A5XX_CP_RB_WPTR),
811 gpu_read64(gpu, REG_A5XX_CP_IB1_BASE, REG_A5XX_CP_IB1_BASE_HI),
812 gpu_read(gpu, REG_A5XX_CP_IB1_BUFSZ),
813 gpu_read64(gpu, REG_A5XX_CP_IB2_BASE, REG_A5XX_CP_IB2_BASE_HI),
814 gpu_read(gpu, REG_A5XX_CP_IB2_BUFSZ));
816 /* Turn off the hangcheck timer to keep it from bothering us */
817 del_timer(&gpu->hangcheck_timer);
819 queue_work(priv->wq, &gpu->recover_work);
822 #define RBBM_ERROR_MASK \
823 (A5XX_RBBM_INT_0_MASK_RBBM_AHB_ERROR | \
824 A5XX_RBBM_INT_0_MASK_RBBM_TRANSFER_TIMEOUT | \
825 A5XX_RBBM_INT_0_MASK_RBBM_ME_MS_TIMEOUT | \
826 A5XX_RBBM_INT_0_MASK_RBBM_PFP_MS_TIMEOUT | \
827 A5XX_RBBM_INT_0_MASK_RBBM_ETS_MS_TIMEOUT | \
828 A5XX_RBBM_INT_0_MASK_RBBM_ATB_ASYNC_OVERFLOW)
830 static irqreturn_t a5xx_irq(struct msm_gpu *gpu)
832 u32 status = gpu_read(gpu, REG_A5XX_RBBM_INT_0_STATUS);
835 * Clear all the interrupts except RBBM_AHB_ERROR - if we clear it
836 * before the source is cleared the interrupt will storm.
838 gpu_write(gpu, REG_A5XX_RBBM_INT_CLEAR_CMD,
839 status & ~A5XX_RBBM_INT_0_MASK_RBBM_AHB_ERROR);
841 /* Pass status to a5xx_rbbm_err_irq because we've already cleared it */
842 if (status & RBBM_ERROR_MASK)
843 a5xx_rbbm_err_irq(gpu, status);
845 if (status & A5XX_RBBM_INT_0_MASK_CP_HW_ERROR)
846 a5xx_cp_err_irq(gpu);
848 if (status & A5XX_RBBM_INT_0_MASK_MISC_HANG_DETECT)
849 a5xx_fault_detect_irq(gpu);
851 if (status & A5XX_RBBM_INT_0_MASK_UCHE_OOB_ACCESS)
852 a5xx_uche_err_irq(gpu);
854 if (status & A5XX_RBBM_INT_0_MASK_GPMU_VOLTAGE_DROOP)
855 a5xx_gpmu_err_irq(gpu);
857 if (status & A5XX_RBBM_INT_0_MASK_CP_CACHE_FLUSH_TS)
863 static const u32 a5xx_register_offsets[REG_ADRENO_REGISTER_MAX] = {
864 REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_BASE, REG_A5XX_CP_RB_BASE),
865 REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_BASE_HI, REG_A5XX_CP_RB_BASE_HI),
866 REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_RPTR_ADDR, REG_A5XX_CP_RB_RPTR_ADDR),
867 REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_RPTR_ADDR_HI,
868 REG_A5XX_CP_RB_RPTR_ADDR_HI),
869 REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_RPTR, REG_A5XX_CP_RB_RPTR),
870 REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_WPTR, REG_A5XX_CP_RB_WPTR),
871 REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_CNTL, REG_A5XX_CP_RB_CNTL),
874 static const u32 a5xx_registers[] = {
875 0x0000, 0x0002, 0x0004, 0x0020, 0x0022, 0x0026, 0x0029, 0x002B,
876 0x002E, 0x0035, 0x0038, 0x0042, 0x0044, 0x0044, 0x0047, 0x0095,
877 0x0097, 0x00BB, 0x03A0, 0x0464, 0x0469, 0x046F, 0x04D2, 0x04D3,
878 0x04E0, 0x0533, 0x0540, 0x0555, 0x0800, 0x081A, 0x081F, 0x0841,
879 0x0860, 0x0860, 0x0880, 0x08A0, 0x0B00, 0x0B12, 0x0B15, 0x0B28,
880 0x0B78, 0x0B7F, 0x0BB0, 0x0BBD, 0x0BC0, 0x0BC6, 0x0BD0, 0x0C53,
881 0x0C60, 0x0C61, 0x0C80, 0x0C82, 0x0C84, 0x0C85, 0x0C90, 0x0C98,
882 0x0CA0, 0x0CA0, 0x0CB0, 0x0CB2, 0x2180, 0x2185, 0x2580, 0x2585,
883 0x0CC1, 0x0CC1, 0x0CC4, 0x0CC7, 0x0CCC, 0x0CCC, 0x0CD0, 0x0CD8,
884 0x0CE0, 0x0CE5, 0x0CE8, 0x0CE8, 0x0CEC, 0x0CF1, 0x0CFB, 0x0D0E,
885 0x2100, 0x211E, 0x2140, 0x2145, 0x2500, 0x251E, 0x2540, 0x2545,
886 0x0D10, 0x0D17, 0x0D20, 0x0D23, 0x0D30, 0x0D30, 0x20C0, 0x20C0,
887 0x24C0, 0x24C0, 0x0E40, 0x0E43, 0x0E4A, 0x0E4A, 0x0E50, 0x0E57,
888 0x0E60, 0x0E7C, 0x0E80, 0x0E8E, 0x0E90, 0x0E96, 0x0EA0, 0x0EA8,
889 0x0EB0, 0x0EB2, 0xE140, 0xE147, 0xE150, 0xE187, 0xE1A0, 0xE1A9,
890 0xE1B0, 0xE1B6, 0xE1C0, 0xE1C7, 0xE1D0, 0xE1D1, 0xE200, 0xE201,
891 0xE210, 0xE21C, 0xE240, 0xE268, 0xE000, 0xE006, 0xE010, 0xE09A,
892 0xE0A0, 0xE0A4, 0xE0AA, 0xE0EB, 0xE100, 0xE105, 0xE380, 0xE38F,
893 0xE3B0, 0xE3B0, 0xE400, 0xE405, 0xE408, 0xE4E9, 0xE4F0, 0xE4F0,
894 0xE280, 0xE280, 0xE282, 0xE2A3, 0xE2A5, 0xE2C2, 0xE940, 0xE947,
895 0xE950, 0xE987, 0xE9A0, 0xE9A9, 0xE9B0, 0xE9B6, 0xE9C0, 0xE9C7,
896 0xE9D0, 0xE9D1, 0xEA00, 0xEA01, 0xEA10, 0xEA1C, 0xEA40, 0xEA68,
897 0xE800, 0xE806, 0xE810, 0xE89A, 0xE8A0, 0xE8A4, 0xE8AA, 0xE8EB,
898 0xE900, 0xE905, 0xEB80, 0xEB8F, 0xEBB0, 0xEBB0, 0xEC00, 0xEC05,
899 0xEC08, 0xECE9, 0xECF0, 0xECF0, 0xEA80, 0xEA80, 0xEA82, 0xEAA3,
900 0xEAA5, 0xEAC2, 0xA800, 0xA8FF, 0xAC60, 0xAC60, 0xB000, 0xB97F,
904 static void a5xx_dump(struct msm_gpu *gpu)
906 dev_info(gpu->dev->dev, "status: %08x\n",
907 gpu_read(gpu, REG_A5XX_RBBM_STATUS));
911 static int a5xx_pm_resume(struct msm_gpu *gpu)
915 /* Turn on the core power */
916 ret = msm_gpu_pm_resume(gpu);
920 /* Turn the RBCCU domain first to limit the chances of voltage droop */
921 gpu_write(gpu, REG_A5XX_GPMU_RBCCU_POWER_CNTL, 0x778000);
923 /* Wait 3 usecs before polling */
926 ret = spin_usecs(gpu, 20, REG_A5XX_GPMU_RBCCU_PWR_CLK_STATUS,
927 (1 << 20), (1 << 20));
929 DRM_ERROR("%s: timeout waiting for RBCCU GDSC enable: %X\n",
931 gpu_read(gpu, REG_A5XX_GPMU_RBCCU_PWR_CLK_STATUS));
935 /* Turn on the SP domain */
936 gpu_write(gpu, REG_A5XX_GPMU_SP_POWER_CNTL, 0x778000);
937 ret = spin_usecs(gpu, 20, REG_A5XX_GPMU_SP_PWR_CLK_STATUS,
938 (1 << 20), (1 << 20));
940 DRM_ERROR("%s: timeout waiting for SP GDSC enable\n",
946 static int a5xx_pm_suspend(struct msm_gpu *gpu)
948 /* Clear the VBIF pipe before shutting down */
949 gpu_write(gpu, REG_A5XX_VBIF_XIN_HALT_CTRL0, 0xF);
950 spin_until((gpu_read(gpu, REG_A5XX_VBIF_XIN_HALT_CTRL1) & 0xF) == 0xF);
952 gpu_write(gpu, REG_A5XX_VBIF_XIN_HALT_CTRL0, 0);
955 * Reset the VBIF before power collapse to avoid issue with FIFO
958 gpu_write(gpu, REG_A5XX_RBBM_BLOCK_SW_RESET_CMD, 0x003C0000);
959 gpu_write(gpu, REG_A5XX_RBBM_BLOCK_SW_RESET_CMD, 0x00000000);
961 return msm_gpu_pm_suspend(gpu);
964 static int a5xx_get_timestamp(struct msm_gpu *gpu, uint64_t *value)
966 *value = gpu_read64(gpu, REG_A5XX_RBBM_PERFCTR_CP_0_LO,
967 REG_A5XX_RBBM_PERFCTR_CP_0_HI);
972 #ifdef CONFIG_DEBUG_FS
973 static void a5xx_show(struct msm_gpu *gpu, struct seq_file *m)
975 seq_printf(m, "status: %08x\n",
976 gpu_read(gpu, REG_A5XX_RBBM_STATUS));
979 * Temporarily disable hardware clock gating before going into
980 * adreno_show to avoid issues while reading the registers
982 a5xx_set_hwcg(gpu, false);
984 a5xx_set_hwcg(gpu, true);
988 static const struct adreno_gpu_funcs funcs = {
990 .get_param = adreno_get_param,
991 .hw_init = a5xx_hw_init,
992 .pm_suspend = a5xx_pm_suspend,
993 .pm_resume = a5xx_pm_resume,
994 .recover = a5xx_recover,
995 .last_fence = adreno_last_fence,
996 .submit = a5xx_submit,
997 .flush = adreno_flush,
999 .destroy = a5xx_destroy,
1000 #ifdef CONFIG_DEBUG_FS
1004 .get_timestamp = a5xx_get_timestamp,
1007 struct msm_gpu *a5xx_gpu_init(struct drm_device *dev)
1009 struct msm_drm_private *priv = dev->dev_private;
1010 struct platform_device *pdev = priv->gpu_pdev;
1011 struct a5xx_gpu *a5xx_gpu = NULL;
1012 struct adreno_gpu *adreno_gpu;
1013 struct msm_gpu *gpu;
1017 dev_err(dev->dev, "No A5XX device is defined\n");
1018 return ERR_PTR(-ENXIO);
1021 a5xx_gpu = kzalloc(sizeof(*a5xx_gpu), GFP_KERNEL);
1023 return ERR_PTR(-ENOMEM);
1025 adreno_gpu = &a5xx_gpu->base;
1026 gpu = &adreno_gpu->base;
1028 adreno_gpu->registers = a5xx_registers;
1029 adreno_gpu->reg_offsets = a5xx_register_offsets;
1031 a5xx_gpu->lm_leakage = 0x4E001A;
1033 ret = adreno_gpu_init(dev, pdev, adreno_gpu, &funcs);
1035 a5xx_destroy(&(a5xx_gpu->base.base));
1036 return ERR_PTR(ret);
1040 msm_mmu_set_fault_handler(gpu->aspace->mmu, gpu, a5xx_fault_handler);