2 * Copyright 2016 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
23 #include <linux/firmware.h>
26 #include "amdgpu_gfx.h"
30 #include "vega10/soc15ip.h"
31 #include "vega10/GC/gc_9_0_offset.h"
32 #include "vega10/GC/gc_9_0_sh_mask.h"
33 #include "vega10/vega10_enum.h"
34 #include "vega10/HDP/hdp_4_0_offset.h"
36 #include "soc15_common.h"
37 #include "clearstate_gfx9.h"
38 #include "v9_structs.h"
40 #define GFX9_NUM_GFX_RINGS 1
41 #define GFX9_MEC_HPD_SIZE 2048
42 #define RLCG_UCODE_LOADING_START_ADDRESS 0x00002000L
43 #define RLC_SAVE_RESTORE_ADDR_STARTING_OFFSET 0x00000000L
44 #define GFX9_RLC_FORMAT_DIRECT_REG_LIST_LENGTH 34
46 #define mmPWR_MISC_CNTL_STATUS 0x0183
47 #define mmPWR_MISC_CNTL_STATUS_BASE_IDX 0
48 #define PWR_MISC_CNTL_STATUS__PWR_GFX_RLC_CGPG_EN__SHIFT 0x0
49 #define PWR_MISC_CNTL_STATUS__PWR_GFXOFF_STATUS__SHIFT 0x1
50 #define PWR_MISC_CNTL_STATUS__PWR_GFX_RLC_CGPG_EN_MASK 0x00000001L
51 #define PWR_MISC_CNTL_STATUS__PWR_GFXOFF_STATUS_MASK 0x00000006L
55 static const struct amdgpu_gds_reg_offset amdgpu_gds_reg_offset[] =
57 {SOC15_REG_OFFSET(GC, 0, mmGDS_VMID0_BASE), SOC15_REG_OFFSET(GC, 0, mmGDS_VMID0_SIZE),
58 SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID0), SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID0)},
59 {SOC15_REG_OFFSET(GC, 0, mmGDS_VMID1_BASE), SOC15_REG_OFFSET(GC, 0, mmGDS_VMID1_SIZE),
60 SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID1), SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID1)},
61 {SOC15_REG_OFFSET(GC, 0, mmGDS_VMID2_BASE), SOC15_REG_OFFSET(GC, 0, mmGDS_VMID2_SIZE),
62 SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID2), SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID2)},
63 {SOC15_REG_OFFSET(GC, 0, mmGDS_VMID3_BASE), SOC15_REG_OFFSET(GC, 0, mmGDS_VMID3_SIZE),
64 SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID3), SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID3)},
65 {SOC15_REG_OFFSET(GC, 0, mmGDS_VMID4_BASE), SOC15_REG_OFFSET(GC, 0, mmGDS_VMID4_SIZE),
66 SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID4), SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID4)},
67 {SOC15_REG_OFFSET(GC, 0, mmGDS_VMID5_BASE), SOC15_REG_OFFSET(GC, 0, mmGDS_VMID5_SIZE),
68 SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID5), SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID5)},
69 {SOC15_REG_OFFSET(GC, 0, mmGDS_VMID6_BASE), SOC15_REG_OFFSET(GC, 0, mmGDS_VMID6_SIZE),
70 SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID6), SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID6)},
71 {SOC15_REG_OFFSET(GC, 0, mmGDS_VMID7_BASE), SOC15_REG_OFFSET(GC, 0, mmGDS_VMID7_SIZE),
72 SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID7), SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID7)},
73 {SOC15_REG_OFFSET(GC, 0, mmGDS_VMID8_BASE), SOC15_REG_OFFSET(GC, 0, mmGDS_VMID8_SIZE),
74 SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID8), SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID8)},
75 {SOC15_REG_OFFSET(GC, 0, mmGDS_VMID9_BASE), SOC15_REG_OFFSET(GC, 0, mmGDS_VMID9_SIZE),
76 SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID9), SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID9)},
77 {SOC15_REG_OFFSET(GC, 0, mmGDS_VMID10_BASE), SOC15_REG_OFFSET(GC, 0, mmGDS_VMID10_SIZE),
78 SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID10), SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID10)},
79 {SOC15_REG_OFFSET(GC, 0, mmGDS_VMID11_BASE), SOC15_REG_OFFSET(GC, 0, mmGDS_VMID11_SIZE),
80 SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID11), SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID11)},
81 {SOC15_REG_OFFSET(GC, 0, mmGDS_VMID12_BASE), SOC15_REG_OFFSET(GC, 0, mmGDS_VMID12_SIZE),
82 SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID12), SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID12)},
83 {SOC15_REG_OFFSET(GC, 0, mmGDS_VMID13_BASE), SOC15_REG_OFFSET(GC, 0, mmGDS_VMID13_SIZE),
84 SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID13), SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID13)},
85 {SOC15_REG_OFFSET(GC, 0, mmGDS_VMID14_BASE), SOC15_REG_OFFSET(GC, 0, mmGDS_VMID14_SIZE),
86 SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID14), SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID14)},
87 {SOC15_REG_OFFSET(GC, 0, mmGDS_VMID15_BASE), SOC15_REG_OFFSET(GC, 0, mmGDS_VMID15_SIZE),
88 SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID15), SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID15)}
91 static const u32 golden_settings_gc_9_0[] =
93 SOC15_REG_OFFSET(GC, 0, mmCPC_UTCL1_CNTL), 0x08000000, 0x08000080,
94 SOC15_REG_OFFSET(GC, 0, mmCPF_UTCL1_CNTL), 0x08000000, 0x08000080,
95 SOC15_REG_OFFSET(GC, 0, mmCPG_UTCL1_CNTL), 0x08000000, 0x08000080,
96 SOC15_REG_OFFSET(GC, 0, mmDB_DEBUG2), 0xf00fffff, 0x00000420,
97 SOC15_REG_OFFSET(GC, 0, mmGB_GPU_ID), 0x0000000f, 0x00000000,
98 SOC15_REG_OFFSET(GC, 0, mmIA_UTCL1_CNTL), 0x08000000, 0x08000080,
99 SOC15_REG_OFFSET(GC, 0, mmPA_SC_BINNER_EVENT_CNTL_3), 0x00000003, 0x82400024,
100 SOC15_REG_OFFSET(GC, 0, mmPA_SC_ENHANCE), 0x3fffffff, 0x00000001,
101 SOC15_REG_OFFSET(GC, 0, mmPA_SC_LINE_STIPPLE_STATE), 0x0000ff0f, 0x00000000,
102 SOC15_REG_OFFSET(GC, 0, mmRLC_GPM_UTCL1_CNTL_0), 0x08000000, 0x08000080,
103 SOC15_REG_OFFSET(GC, 0, mmRLC_GPM_UTCL1_CNTL_1), 0x08000000, 0x08000080,
104 SOC15_REG_OFFSET(GC, 0, mmRLC_GPM_UTCL1_CNTL_2), 0x08000000, 0x08000080,
105 SOC15_REG_OFFSET(GC, 0, mmRLC_PREWALKER_UTCL1_CNTL), 0x08000000, 0x08000080,
106 SOC15_REG_OFFSET(GC, 0, mmRLC_SPM_UTCL1_CNTL), 0x08000000, 0x08000080,
107 SOC15_REG_OFFSET(GC, 0, mmSH_MEM_CONFIG), 0x00001000, 0x00001000,
108 SOC15_REG_OFFSET(GC, 0, mmSPI_CONFIG_CNTL_1), 0x0000000f, 0x01000107,
109 SOC15_REG_OFFSET(GC, 0, mmSQC_CONFIG), 0x03000000, 0x020a2000,
110 SOC15_REG_OFFSET(GC, 0, mmTA_CNTL_AUX), 0xfffffeef, 0x010b0000,
111 SOC15_REG_OFFSET(GC, 0, mmTCP_CHAN_STEER_HI), 0xffffffff, 0x4a2c0e68,
112 SOC15_REG_OFFSET(GC, 0, mmTCP_CHAN_STEER_LO), 0xffffffff, 0xb5d3f197,
113 SOC15_REG_OFFSET(GC, 0, mmVGT_CACHE_INVALIDATION), 0x3fff3af3, 0x19200000,
114 SOC15_REG_OFFSET(GC, 0, mmVGT_GS_MAX_WAVE_ID), 0x00000fff, 0x000003ff,
115 SOC15_REG_OFFSET(GC, 0, mmWD_UTCL1_CNTL), 0x08000000, 0x08000080
118 static const u32 golden_settings_gc_9_0_vg10[] =
120 SOC15_REG_OFFSET(GC, 0, mmCB_HW_CONTROL), 0x0000f000, 0x00012107,
121 SOC15_REG_OFFSET(GC, 0, mmCB_HW_CONTROL_3), 0x30000000, 0x10000000,
122 SOC15_REG_OFFSET(GC, 0, mmGB_ADDR_CONFIG), 0xffff77ff, 0x2a114042,
123 SOC15_REG_OFFSET(GC, 0, mmGB_ADDR_CONFIG_READ), 0xffff77ff, 0x2a114042,
124 SOC15_REG_OFFSET(GC, 0, mmPA_SC_ENHANCE_1), 0x00008000, 0x00048000,
125 SOC15_REG_OFFSET(GC, 0, mmRMI_UTCL1_CNTL2), 0x00030000, 0x00020000,
126 SOC15_REG_OFFSET(GC, 0, mmTD_CNTL), 0x00001800, 0x00000800
129 static const u32 golden_settings_gc_9_1[] =
131 SOC15_REG_OFFSET(GC, 0, mmCB_HW_CONTROL), 0xfffdf3cf, 0x00014104,
132 SOC15_REG_OFFSET(GC, 0, mmCPC_UTCL1_CNTL), 0x08000000, 0x08000080,
133 SOC15_REG_OFFSET(GC, 0, mmCPF_UTCL1_CNTL), 0x08000000, 0x08000080,
134 SOC15_REG_OFFSET(GC, 0, mmCPG_UTCL1_CNTL), 0x08000000, 0x08000080,
135 SOC15_REG_OFFSET(GC, 0, mmDB_DEBUG2), 0xf00fffff, 0x00000420,
136 SOC15_REG_OFFSET(GC, 0, mmGB_GPU_ID), 0x0000000f, 0x00000000,
137 SOC15_REG_OFFSET(GC, 0, mmIA_UTCL1_CNTL), 0x08000000, 0x08000080,
138 SOC15_REG_OFFSET(GC, 0, mmPA_SC_BINNER_EVENT_CNTL_3), 0x00000003, 0x82400024,
139 SOC15_REG_OFFSET(GC, 0, mmPA_SC_ENHANCE), 0x3fffffff, 0x00000001,
140 SOC15_REG_OFFSET(GC, 0, mmPA_SC_LINE_STIPPLE_STATE), 0x0000ff0f, 0x00000000,
141 SOC15_REG_OFFSET(GC, 0, mmRLC_GPM_UTCL1_CNTL_0), 0x08000000, 0x08000080,
142 SOC15_REG_OFFSET(GC, 0, mmRLC_GPM_UTCL1_CNTL_1), 0x08000000, 0x08000080,
143 SOC15_REG_OFFSET(GC, 0, mmRLC_GPM_UTCL1_CNTL_2), 0x08000000, 0x08000080,
144 SOC15_REG_OFFSET(GC, 0, mmRLC_PREWALKER_UTCL1_CNTL), 0x08000000, 0x08000080,
145 SOC15_REG_OFFSET(GC, 0, mmRLC_SPM_UTCL1_CNTL), 0x08000000, 0x08000080,
146 SOC15_REG_OFFSET(GC, 0, mmTA_CNTL_AUX), 0xfffffeef, 0x010b0000,
147 SOC15_REG_OFFSET(GC, 0, mmTCP_CHAN_STEER_HI), 0xffffffff, 0x00000000,
148 SOC15_REG_OFFSET(GC, 0, mmTCP_CHAN_STEER_LO), 0xffffffff, 0x00003120,
149 SOC15_REG_OFFSET(GC, 0, mmVGT_CACHE_INVALIDATION), 0x3fff3af3, 0x19200000,
150 SOC15_REG_OFFSET(GC, 0, mmVGT_GS_MAX_WAVE_ID), 0x00000fff, 0x000000ff,
151 SOC15_REG_OFFSET(GC, 0, mmWD_UTCL1_CNTL), 0x08000000, 0x08000080
154 static const u32 golden_settings_gc_9_1_rv1[] =
156 SOC15_REG_OFFSET(GC, 0, mmCB_HW_CONTROL_3), 0x30000000, 0x10000000,
157 SOC15_REG_OFFSET(GC, 0, mmGB_ADDR_CONFIG), 0xffff77ff, 0x24000042,
158 SOC15_REG_OFFSET(GC, 0, mmGB_ADDR_CONFIG_READ), 0xffff77ff, 0x24000042,
159 SOC15_REG_OFFSET(GC, 0, mmPA_SC_ENHANCE_1), 0xffffffff, 0x04048000,
160 SOC15_REG_OFFSET(GC, 0, mmPA_SC_MODE_CNTL_1), 0x06000000, 0x06000000,
161 SOC15_REG_OFFSET(GC, 0, mmRMI_UTCL1_CNTL2), 0x00030000, 0x00020000,
162 SOC15_REG_OFFSET(GC, 0, mmTD_CNTL), 0x01bd9f33, 0x00000800
165 #define VEGA10_GB_ADDR_CONFIG_GOLDEN 0x2a114042
166 #define RAVEN_GB_ADDR_CONFIG_GOLDEN 0x24000042
168 static void gfx_v9_0_set_ring_funcs(struct amdgpu_device *adev);
169 static void gfx_v9_0_set_irq_funcs(struct amdgpu_device *adev);
170 static void gfx_v9_0_set_gds_init(struct amdgpu_device *adev);
171 static void gfx_v9_0_set_rlc_funcs(struct amdgpu_device *adev);
172 static int gfx_v9_0_get_cu_info(struct amdgpu_device *adev,
173 struct amdgpu_cu_info *cu_info);
174 static uint64_t gfx_v9_0_get_gpu_clock_counter(struct amdgpu_device *adev);
175 static void gfx_v9_0_select_se_sh(struct amdgpu_device *adev, u32 se_num, u32 sh_num, u32 instance);
176 static void gfx_v9_0_ring_emit_de_meta(struct amdgpu_ring *ring);
178 static void gfx_v9_0_init_golden_registers(struct amdgpu_device *adev)
180 switch (adev->asic_type) {
182 amdgpu_program_register_sequence(adev,
183 golden_settings_gc_9_0,
184 (const u32)ARRAY_SIZE(golden_settings_gc_9_0));
185 amdgpu_program_register_sequence(adev,
186 golden_settings_gc_9_0_vg10,
187 (const u32)ARRAY_SIZE(golden_settings_gc_9_0_vg10));
190 amdgpu_program_register_sequence(adev,
191 golden_settings_gc_9_1,
192 (const u32)ARRAY_SIZE(golden_settings_gc_9_1));
193 amdgpu_program_register_sequence(adev,
194 golden_settings_gc_9_1_rv1,
195 (const u32)ARRAY_SIZE(golden_settings_gc_9_1_rv1));
202 static void gfx_v9_0_scratch_init(struct amdgpu_device *adev)
204 adev->gfx.scratch.num_reg = 8;
205 adev->gfx.scratch.reg_base = SOC15_REG_OFFSET(GC, 0, mmSCRATCH_REG0);
206 adev->gfx.scratch.free_mask = (1u << adev->gfx.scratch.num_reg) - 1;
209 static void gfx_v9_0_write_data_to_reg(struct amdgpu_ring *ring, int eng_sel,
210 bool wc, uint32_t reg, uint32_t val)
212 amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
213 amdgpu_ring_write(ring, WRITE_DATA_ENGINE_SEL(eng_sel) |
214 WRITE_DATA_DST_SEL(0) |
215 (wc ? WR_CONFIRM : 0));
216 amdgpu_ring_write(ring, reg);
217 amdgpu_ring_write(ring, 0);
218 amdgpu_ring_write(ring, val);
221 static void gfx_v9_0_wait_reg_mem(struct amdgpu_ring *ring, int eng_sel,
222 int mem_space, int opt, uint32_t addr0,
223 uint32_t addr1, uint32_t ref, uint32_t mask,
226 amdgpu_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5));
227 amdgpu_ring_write(ring,
228 /* memory (1) or register (0) */
229 (WAIT_REG_MEM_MEM_SPACE(mem_space) |
230 WAIT_REG_MEM_OPERATION(opt) | /* wait */
231 WAIT_REG_MEM_FUNCTION(3) | /* equal */
232 WAIT_REG_MEM_ENGINE(eng_sel)));
235 BUG_ON(addr0 & 0x3); /* Dword align */
236 amdgpu_ring_write(ring, addr0);
237 amdgpu_ring_write(ring, addr1);
238 amdgpu_ring_write(ring, ref);
239 amdgpu_ring_write(ring, mask);
240 amdgpu_ring_write(ring, inv); /* poll interval */
243 static int gfx_v9_0_ring_test_ring(struct amdgpu_ring *ring)
245 struct amdgpu_device *adev = ring->adev;
251 r = amdgpu_gfx_scratch_get(adev, &scratch);
253 DRM_ERROR("amdgpu: cp failed to get scratch reg (%d).\n", r);
256 WREG32(scratch, 0xCAFEDEAD);
257 r = amdgpu_ring_alloc(ring, 3);
259 DRM_ERROR("amdgpu: cp failed to lock ring %d (%d).\n",
261 amdgpu_gfx_scratch_free(adev, scratch);
264 amdgpu_ring_write(ring, PACKET3(PACKET3_SET_UCONFIG_REG, 1));
265 amdgpu_ring_write(ring, (scratch - PACKET3_SET_UCONFIG_REG_START));
266 amdgpu_ring_write(ring, 0xDEADBEEF);
267 amdgpu_ring_commit(ring);
269 for (i = 0; i < adev->usec_timeout; i++) {
270 tmp = RREG32(scratch);
271 if (tmp == 0xDEADBEEF)
275 if (i < adev->usec_timeout) {
276 DRM_INFO("ring test on %d succeeded in %d usecs\n",
279 DRM_ERROR("amdgpu: ring %d test failed (scratch(0x%04X)=0x%08X)\n",
280 ring->idx, scratch, tmp);
283 amdgpu_gfx_scratch_free(adev, scratch);
287 static int gfx_v9_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
289 struct amdgpu_device *adev = ring->adev;
291 struct dma_fence *f = NULL;
296 r = amdgpu_gfx_scratch_get(adev, &scratch);
298 DRM_ERROR("amdgpu: failed to get scratch reg (%ld).\n", r);
301 WREG32(scratch, 0xCAFEDEAD);
302 memset(&ib, 0, sizeof(ib));
303 r = amdgpu_ib_get(adev, NULL, 256, &ib);
305 DRM_ERROR("amdgpu: failed to get ib (%ld).\n", r);
308 ib.ptr[0] = PACKET3(PACKET3_SET_UCONFIG_REG, 1);
309 ib.ptr[1] = ((scratch - PACKET3_SET_UCONFIG_REG_START));
310 ib.ptr[2] = 0xDEADBEEF;
313 r = amdgpu_ib_schedule(ring, 1, &ib, NULL, &f);
317 r = dma_fence_wait_timeout(f, false, timeout);
319 DRM_ERROR("amdgpu: IB test timed out.\n");
323 DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r);
326 tmp = RREG32(scratch);
327 if (tmp == 0xDEADBEEF) {
328 DRM_INFO("ib test on ring %d succeeded\n", ring->idx);
331 DRM_ERROR("amdgpu: ib test failed (scratch(0x%04X)=0x%08X)\n",
336 amdgpu_ib_free(adev, &ib, NULL);
339 amdgpu_gfx_scratch_free(adev, scratch);
343 static int gfx_v9_0_init_microcode(struct amdgpu_device *adev)
345 const char *chip_name;
348 struct amdgpu_firmware_info *info = NULL;
349 const struct common_firmware_header *header = NULL;
350 const struct gfx_firmware_header_v1_0 *cp_hdr;
351 const struct rlc_firmware_header_v2_0 *rlc_hdr;
352 unsigned int *tmp = NULL;
357 switch (adev->asic_type) {
359 chip_name = "vega10";
368 snprintf(fw_name, sizeof(fw_name), "/*(DEBLOBBED)*/", chip_name);
369 err = reject_firmware(&adev->gfx.pfp_fw, fw_name, adev->dev);
372 err = amdgpu_ucode_validate(adev->gfx.pfp_fw);
375 cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.pfp_fw->data;
376 adev->gfx.pfp_fw_version = le32_to_cpu(cp_hdr->header.ucode_version);
377 adev->gfx.pfp_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
379 snprintf(fw_name, sizeof(fw_name), "/*(DEBLOBBED)*/", chip_name);
380 err = reject_firmware(&adev->gfx.me_fw, fw_name, adev->dev);
383 err = amdgpu_ucode_validate(adev->gfx.me_fw);
386 cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.me_fw->data;
387 adev->gfx.me_fw_version = le32_to_cpu(cp_hdr->header.ucode_version);
388 adev->gfx.me_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
390 snprintf(fw_name, sizeof(fw_name), "/*(DEBLOBBED)*/", chip_name);
391 err = reject_firmware(&adev->gfx.ce_fw, fw_name, adev->dev);
394 err = amdgpu_ucode_validate(adev->gfx.ce_fw);
397 cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.ce_fw->data;
398 adev->gfx.ce_fw_version = le32_to_cpu(cp_hdr->header.ucode_version);
399 adev->gfx.ce_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
401 snprintf(fw_name, sizeof(fw_name), "/*(DEBLOBBED)*/", chip_name);
402 err = reject_firmware(&adev->gfx.rlc_fw, fw_name, adev->dev);
405 err = amdgpu_ucode_validate(adev->gfx.rlc_fw);
406 rlc_hdr = (const struct rlc_firmware_header_v2_0 *)adev->gfx.rlc_fw->data;
407 adev->gfx.rlc_fw_version = le32_to_cpu(rlc_hdr->header.ucode_version);
408 adev->gfx.rlc_feature_version = le32_to_cpu(rlc_hdr->ucode_feature_version);
409 adev->gfx.rlc.save_and_restore_offset =
410 le32_to_cpu(rlc_hdr->save_and_restore_offset);
411 adev->gfx.rlc.clear_state_descriptor_offset =
412 le32_to_cpu(rlc_hdr->clear_state_descriptor_offset);
413 adev->gfx.rlc.avail_scratch_ram_locations =
414 le32_to_cpu(rlc_hdr->avail_scratch_ram_locations);
415 adev->gfx.rlc.reg_restore_list_size =
416 le32_to_cpu(rlc_hdr->reg_restore_list_size);
417 adev->gfx.rlc.reg_list_format_start =
418 le32_to_cpu(rlc_hdr->reg_list_format_start);
419 adev->gfx.rlc.reg_list_format_separate_start =
420 le32_to_cpu(rlc_hdr->reg_list_format_separate_start);
421 adev->gfx.rlc.starting_offsets_start =
422 le32_to_cpu(rlc_hdr->starting_offsets_start);
423 adev->gfx.rlc.reg_list_format_size_bytes =
424 le32_to_cpu(rlc_hdr->reg_list_format_size_bytes);
425 adev->gfx.rlc.reg_list_size_bytes =
426 le32_to_cpu(rlc_hdr->reg_list_size_bytes);
427 adev->gfx.rlc.register_list_format =
428 kmalloc(adev->gfx.rlc.reg_list_format_size_bytes +
429 adev->gfx.rlc.reg_list_size_bytes, GFP_KERNEL);
430 if (!adev->gfx.rlc.register_list_format) {
435 tmp = (unsigned int *)((uintptr_t)rlc_hdr +
436 le32_to_cpu(rlc_hdr->reg_list_format_array_offset_bytes));
437 for (i = 0 ; i < (rlc_hdr->reg_list_format_size_bytes >> 2); i++)
438 adev->gfx.rlc.register_list_format[i] = le32_to_cpu(tmp[i]);
440 adev->gfx.rlc.register_restore = adev->gfx.rlc.register_list_format + i;
442 tmp = (unsigned int *)((uintptr_t)rlc_hdr +
443 le32_to_cpu(rlc_hdr->reg_list_array_offset_bytes));
444 for (i = 0 ; i < (rlc_hdr->reg_list_size_bytes >> 2); i++)
445 adev->gfx.rlc.register_restore[i] = le32_to_cpu(tmp[i]);
447 snprintf(fw_name, sizeof(fw_name), "/*(DEBLOBBED)*/", chip_name);
448 err = reject_firmware(&adev->gfx.mec_fw, fw_name, adev->dev);
451 err = amdgpu_ucode_validate(adev->gfx.mec_fw);
454 cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data;
455 adev->gfx.mec_fw_version = le32_to_cpu(cp_hdr->header.ucode_version);
456 adev->gfx.mec_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
459 snprintf(fw_name, sizeof(fw_name), "/*(DEBLOBBED)*/", chip_name);
460 err = reject_firmware(&adev->gfx.mec2_fw, fw_name, adev->dev);
462 err = amdgpu_ucode_validate(adev->gfx.mec2_fw);
465 cp_hdr = (const struct gfx_firmware_header_v1_0 *)
466 adev->gfx.mec2_fw->data;
467 adev->gfx.mec2_fw_version =
468 le32_to_cpu(cp_hdr->header.ucode_version);
469 adev->gfx.mec2_feature_version =
470 le32_to_cpu(cp_hdr->ucode_feature_version);
473 adev->gfx.mec2_fw = NULL;
476 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
477 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_PFP];
478 info->ucode_id = AMDGPU_UCODE_ID_CP_PFP;
479 info->fw = adev->gfx.pfp_fw;
480 header = (const struct common_firmware_header *)info->fw->data;
481 adev->firmware.fw_size +=
482 ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
484 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_ME];
485 info->ucode_id = AMDGPU_UCODE_ID_CP_ME;
486 info->fw = adev->gfx.me_fw;
487 header = (const struct common_firmware_header *)info->fw->data;
488 adev->firmware.fw_size +=
489 ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
491 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_CE];
492 info->ucode_id = AMDGPU_UCODE_ID_CP_CE;
493 info->fw = adev->gfx.ce_fw;
494 header = (const struct common_firmware_header *)info->fw->data;
495 adev->firmware.fw_size +=
496 ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
498 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_G];
499 info->ucode_id = AMDGPU_UCODE_ID_RLC_G;
500 info->fw = adev->gfx.rlc_fw;
501 header = (const struct common_firmware_header *)info->fw->data;
502 adev->firmware.fw_size +=
503 ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
505 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_MEC1];
506 info->ucode_id = AMDGPU_UCODE_ID_CP_MEC1;
507 info->fw = adev->gfx.mec_fw;
508 header = (const struct common_firmware_header *)info->fw->data;
509 cp_hdr = (const struct gfx_firmware_header_v1_0 *)info->fw->data;
510 adev->firmware.fw_size +=
511 ALIGN(le32_to_cpu(header->ucode_size_bytes) - le32_to_cpu(cp_hdr->jt_size) * 4, PAGE_SIZE);
513 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_MEC1_JT];
514 info->ucode_id = AMDGPU_UCODE_ID_CP_MEC1_JT;
515 info->fw = adev->gfx.mec_fw;
516 adev->firmware.fw_size +=
517 ALIGN(le32_to_cpu(cp_hdr->jt_size) * 4, PAGE_SIZE);
519 if (adev->gfx.mec2_fw) {
520 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_MEC2];
521 info->ucode_id = AMDGPU_UCODE_ID_CP_MEC2;
522 info->fw = adev->gfx.mec2_fw;
523 header = (const struct common_firmware_header *)info->fw->data;
524 cp_hdr = (const struct gfx_firmware_header_v1_0 *)info->fw->data;
525 adev->firmware.fw_size +=
526 ALIGN(le32_to_cpu(header->ucode_size_bytes) - le32_to_cpu(cp_hdr->jt_size) * 4, PAGE_SIZE);
527 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_MEC2_JT];
528 info->ucode_id = AMDGPU_UCODE_ID_CP_MEC2_JT;
529 info->fw = adev->gfx.mec2_fw;
530 adev->firmware.fw_size +=
531 ALIGN(le32_to_cpu(cp_hdr->jt_size) * 4, PAGE_SIZE);
539 "gfx9: Failed to load firmware \"%s\"\n",
541 release_firmware(adev->gfx.pfp_fw);
542 adev->gfx.pfp_fw = NULL;
543 release_firmware(adev->gfx.me_fw);
544 adev->gfx.me_fw = NULL;
545 release_firmware(adev->gfx.ce_fw);
546 adev->gfx.ce_fw = NULL;
547 release_firmware(adev->gfx.rlc_fw);
548 adev->gfx.rlc_fw = NULL;
549 release_firmware(adev->gfx.mec_fw);
550 adev->gfx.mec_fw = NULL;
551 release_firmware(adev->gfx.mec2_fw);
552 adev->gfx.mec2_fw = NULL;
557 static u32 gfx_v9_0_get_csb_size(struct amdgpu_device *adev)
560 const struct cs_section_def *sect = NULL;
561 const struct cs_extent_def *ext = NULL;
563 /* begin clear state */
565 /* context control state */
568 for (sect = gfx9_cs_data; sect->section != NULL; ++sect) {
569 for (ext = sect->section; ext->extent != NULL; ++ext) {
570 if (sect->id == SECT_CONTEXT)
571 count += 2 + ext->reg_count;
577 /* end clear state */
585 static void gfx_v9_0_get_csb_buffer(struct amdgpu_device *adev,
586 volatile u32 *buffer)
589 const struct cs_section_def *sect = NULL;
590 const struct cs_extent_def *ext = NULL;
592 if (adev->gfx.rlc.cs_data == NULL)
597 buffer[count++] = cpu_to_le32(PACKET3(PACKET3_PREAMBLE_CNTL, 0));
598 buffer[count++] = cpu_to_le32(PACKET3_PREAMBLE_BEGIN_CLEAR_STATE);
600 buffer[count++] = cpu_to_le32(PACKET3(PACKET3_CONTEXT_CONTROL, 1));
601 buffer[count++] = cpu_to_le32(0x80000000);
602 buffer[count++] = cpu_to_le32(0x80000000);
604 for (sect = adev->gfx.rlc.cs_data; sect->section != NULL; ++sect) {
605 for (ext = sect->section; ext->extent != NULL; ++ext) {
606 if (sect->id == SECT_CONTEXT) {
608 cpu_to_le32(PACKET3(PACKET3_SET_CONTEXT_REG, ext->reg_count));
609 buffer[count++] = cpu_to_le32(ext->reg_index -
610 PACKET3_SET_CONTEXT_REG_START);
611 for (i = 0; i < ext->reg_count; i++)
612 buffer[count++] = cpu_to_le32(ext->extent[i]);
619 buffer[count++] = cpu_to_le32(PACKET3(PACKET3_PREAMBLE_CNTL, 0));
620 buffer[count++] = cpu_to_le32(PACKET3_PREAMBLE_END_CLEAR_STATE);
622 buffer[count++] = cpu_to_le32(PACKET3(PACKET3_CLEAR_STATE, 0));
623 buffer[count++] = cpu_to_le32(0);
626 static void gfx_v9_0_init_lbpw(struct amdgpu_device *adev)
630 /* set mmRLC_LB_THR_CONFIG_1/2/3/4 */
631 WREG32_SOC15(GC, 0, mmRLC_LB_THR_CONFIG_1, 0x0000007F);
632 WREG32_SOC15(GC, 0, mmRLC_LB_THR_CONFIG_2, 0x0333A5A7);
633 WREG32_SOC15(GC, 0, mmRLC_LB_THR_CONFIG_3, 0x00000077);
634 WREG32_SOC15(GC, 0, mmRLC_LB_THR_CONFIG_4, (0x30 | 0x40 << 8 | 0x02FA << 16));
636 /* set mmRLC_LB_CNTR_INIT = 0x0000_0000 */
637 WREG32_SOC15(GC, 0, mmRLC_LB_CNTR_INIT, 0x00000000);
639 /* set mmRLC_LB_CNTR_MAX = 0x0000_0500 */
640 WREG32_SOC15(GC, 0, mmRLC_LB_CNTR_MAX, 0x00000500);
642 mutex_lock(&adev->grbm_idx_mutex);
643 /* set mmRLC_LB_INIT_CU_MASK thru broadcast mode to enable all SE/SH*/
644 gfx_v9_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
645 WREG32_SOC15(GC, 0, mmRLC_LB_INIT_CU_MASK, 0xffffffff);
647 /* set mmRLC_LB_PARAMS = 0x003F_1006 */
648 data = REG_SET_FIELD(0, RLC_LB_PARAMS, FIFO_SAMPLES, 0x0003);
649 data |= REG_SET_FIELD(data, RLC_LB_PARAMS, PG_IDLE_SAMPLES, 0x0010);
650 data |= REG_SET_FIELD(data, RLC_LB_PARAMS, PG_IDLE_SAMPLE_INTERVAL, 0x033F);
651 WREG32_SOC15(GC, 0, mmRLC_LB_PARAMS, data);
653 /* set mmRLC_GPM_GENERAL_7[31-16] = 0x00C0 */
654 data = RREG32_SOC15(GC, 0, mmRLC_GPM_GENERAL_7);
657 WREG32_SOC15(GC, 0, mmRLC_GPM_GENERAL_7, data);
659 /* set RLC_LB_ALWAYS_ACTIVE_CU_MASK = 0xFFF */
660 WREG32_SOC15(GC, 0, mmRLC_LB_ALWAYS_ACTIVE_CU_MASK, 0xFFF);
662 /* set RLC_LB_CNTL = 0x8000_0095, 31 bit is reserved,
663 * but used for RLC_LB_CNTL configuration */
664 data = RLC_LB_CNTL__LB_CNT_SPIM_ACTIVE_MASK;
665 data |= REG_SET_FIELD(data, RLC_LB_CNTL, CU_MASK_USED_OFF_HYST, 0x09);
666 data |= REG_SET_FIELD(data, RLC_LB_CNTL, RESERVED, 0x80000);
667 WREG32_SOC15(GC, 0, mmRLC_LB_CNTL, data);
668 mutex_unlock(&adev->grbm_idx_mutex);
671 static void gfx_v9_0_enable_lbpw(struct amdgpu_device *adev, bool enable)
673 WREG32_FIELD15(GC, 0, RLC_LB_CNTL, LOAD_BALANCE_ENABLE, enable ? 1 : 0);
676 static void rv_init_cp_jump_table(struct amdgpu_device *adev)
678 const __le32 *fw_data;
679 volatile u32 *dst_ptr;
680 int me, i, max_me = 5;
682 u32 table_offset, table_size;
684 /* write the cp table buffer */
685 dst_ptr = adev->gfx.rlc.cp_table_ptr;
686 for (me = 0; me < max_me; me++) {
688 const struct gfx_firmware_header_v1_0 *hdr =
689 (const struct gfx_firmware_header_v1_0 *)adev->gfx.ce_fw->data;
690 fw_data = (const __le32 *)
691 (adev->gfx.ce_fw->data +
692 le32_to_cpu(hdr->header.ucode_array_offset_bytes));
693 table_offset = le32_to_cpu(hdr->jt_offset);
694 table_size = le32_to_cpu(hdr->jt_size);
695 } else if (me == 1) {
696 const struct gfx_firmware_header_v1_0 *hdr =
697 (const struct gfx_firmware_header_v1_0 *)adev->gfx.pfp_fw->data;
698 fw_data = (const __le32 *)
699 (adev->gfx.pfp_fw->data +
700 le32_to_cpu(hdr->header.ucode_array_offset_bytes));
701 table_offset = le32_to_cpu(hdr->jt_offset);
702 table_size = le32_to_cpu(hdr->jt_size);
703 } else if (me == 2) {
704 const struct gfx_firmware_header_v1_0 *hdr =
705 (const struct gfx_firmware_header_v1_0 *)adev->gfx.me_fw->data;
706 fw_data = (const __le32 *)
707 (adev->gfx.me_fw->data +
708 le32_to_cpu(hdr->header.ucode_array_offset_bytes));
709 table_offset = le32_to_cpu(hdr->jt_offset);
710 table_size = le32_to_cpu(hdr->jt_size);
711 } else if (me == 3) {
712 const struct gfx_firmware_header_v1_0 *hdr =
713 (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data;
714 fw_data = (const __le32 *)
715 (adev->gfx.mec_fw->data +
716 le32_to_cpu(hdr->header.ucode_array_offset_bytes));
717 table_offset = le32_to_cpu(hdr->jt_offset);
718 table_size = le32_to_cpu(hdr->jt_size);
719 } else if (me == 4) {
720 const struct gfx_firmware_header_v1_0 *hdr =
721 (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec2_fw->data;
722 fw_data = (const __le32 *)
723 (adev->gfx.mec2_fw->data +
724 le32_to_cpu(hdr->header.ucode_array_offset_bytes));
725 table_offset = le32_to_cpu(hdr->jt_offset);
726 table_size = le32_to_cpu(hdr->jt_size);
729 for (i = 0; i < table_size; i ++) {
730 dst_ptr[bo_offset + i] =
731 cpu_to_le32(le32_to_cpu(fw_data[table_offset + i]));
734 bo_offset += table_size;
738 static void gfx_v9_0_rlc_fini(struct amdgpu_device *adev)
740 /* clear state block */
741 amdgpu_bo_free_kernel(&adev->gfx.rlc.clear_state_obj,
742 &adev->gfx.rlc.clear_state_gpu_addr,
743 (void **)&adev->gfx.rlc.cs_ptr);
745 /* jump table block */
746 amdgpu_bo_free_kernel(&adev->gfx.rlc.cp_table_obj,
747 &adev->gfx.rlc.cp_table_gpu_addr,
748 (void **)&adev->gfx.rlc.cp_table_ptr);
751 static int gfx_v9_0_rlc_init(struct amdgpu_device *adev)
753 volatile u32 *dst_ptr;
755 const struct cs_section_def *cs_data;
758 adev->gfx.rlc.cs_data = gfx9_cs_data;
760 cs_data = adev->gfx.rlc.cs_data;
763 /* clear state block */
764 adev->gfx.rlc.clear_state_size = dws = gfx_v9_0_get_csb_size(adev);
765 r = amdgpu_bo_create_reserved(adev, dws * 4, PAGE_SIZE,
766 AMDGPU_GEM_DOMAIN_VRAM,
767 &adev->gfx.rlc.clear_state_obj,
768 &adev->gfx.rlc.clear_state_gpu_addr,
769 (void **)&adev->gfx.rlc.cs_ptr);
771 dev_err(adev->dev, "(%d) failed to create rlc csb bo\n",
773 gfx_v9_0_rlc_fini(adev);
776 /* set up the cs buffer */
777 dst_ptr = adev->gfx.rlc.cs_ptr;
778 gfx_v9_0_get_csb_buffer(adev, dst_ptr);
779 amdgpu_bo_kunmap(adev->gfx.rlc.clear_state_obj);
780 amdgpu_bo_unreserve(adev->gfx.rlc.clear_state_obj);
783 if (adev->asic_type == CHIP_RAVEN) {
784 /* TODO: double check the cp_table_size for RV */
785 adev->gfx.rlc.cp_table_size = ALIGN(96 * 5 * 4, 2048) + (64 * 1024); /* JT + GDS */
786 r = amdgpu_bo_create_reserved(adev, adev->gfx.rlc.cp_table_size,
787 PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM,
788 &adev->gfx.rlc.cp_table_obj,
789 &adev->gfx.rlc.cp_table_gpu_addr,
790 (void **)&adev->gfx.rlc.cp_table_ptr);
793 "(%d) failed to create cp table bo\n", r);
794 gfx_v9_0_rlc_fini(adev);
798 rv_init_cp_jump_table(adev);
799 amdgpu_bo_kunmap(adev->gfx.rlc.cp_table_obj);
800 amdgpu_bo_unreserve(adev->gfx.rlc.cp_table_obj);
802 gfx_v9_0_init_lbpw(adev);
808 static void gfx_v9_0_mec_fini(struct amdgpu_device *adev)
810 amdgpu_bo_free_kernel(&adev->gfx.mec.hpd_eop_obj, NULL, NULL);
811 amdgpu_bo_free_kernel(&adev->gfx.mec.mec_fw_obj, NULL, NULL);
814 static int gfx_v9_0_mec_init(struct amdgpu_device *adev)
818 const __le32 *fw_data;
823 const struct gfx_firmware_header_v1_0 *mec_hdr;
825 bitmap_zero(adev->gfx.mec.queue_bitmap, AMDGPU_MAX_COMPUTE_QUEUES);
827 /* take ownership of the relevant compute queues */
828 amdgpu_gfx_compute_queue_acquire(adev);
829 mec_hpd_size = adev->gfx.num_compute_rings * GFX9_MEC_HPD_SIZE;
831 r = amdgpu_bo_create_reserved(adev, mec_hpd_size, PAGE_SIZE,
832 AMDGPU_GEM_DOMAIN_GTT,
833 &adev->gfx.mec.hpd_eop_obj,
834 &adev->gfx.mec.hpd_eop_gpu_addr,
837 dev_warn(adev->dev, "(%d) create HDP EOP bo failed\n", r);
838 gfx_v9_0_mec_fini(adev);
842 memset(hpd, 0, adev->gfx.mec.hpd_eop_obj->tbo.mem.size);
844 amdgpu_bo_kunmap(adev->gfx.mec.hpd_eop_obj);
845 amdgpu_bo_unreserve(adev->gfx.mec.hpd_eop_obj);
847 mec_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data;
849 fw_data = (const __le32 *)
850 (adev->gfx.mec_fw->data +
851 le32_to_cpu(mec_hdr->header.ucode_array_offset_bytes));
852 fw_size = le32_to_cpu(mec_hdr->header.ucode_size_bytes) / 4;
854 r = amdgpu_bo_create_reserved(adev, mec_hdr->header.ucode_size_bytes,
855 PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT,
856 &adev->gfx.mec.mec_fw_obj,
857 &adev->gfx.mec.mec_fw_gpu_addr,
860 dev_warn(adev->dev, "(%d) create mec firmware bo failed\n", r);
861 gfx_v9_0_mec_fini(adev);
865 memcpy(fw, fw_data, fw_size);
867 amdgpu_bo_kunmap(adev->gfx.mec.mec_fw_obj);
868 amdgpu_bo_unreserve(adev->gfx.mec.mec_fw_obj);
873 static uint32_t wave_read_ind(struct amdgpu_device *adev, uint32_t simd, uint32_t wave, uint32_t address)
875 WREG32_SOC15(GC, 0, mmSQ_IND_INDEX,
876 (wave << SQ_IND_INDEX__WAVE_ID__SHIFT) |
877 (simd << SQ_IND_INDEX__SIMD_ID__SHIFT) |
878 (address << SQ_IND_INDEX__INDEX__SHIFT) |
879 (SQ_IND_INDEX__FORCE_READ_MASK));
880 return RREG32_SOC15(GC, 0, mmSQ_IND_DATA);
883 static void wave_read_regs(struct amdgpu_device *adev, uint32_t simd,
884 uint32_t wave, uint32_t thread,
885 uint32_t regno, uint32_t num, uint32_t *out)
887 WREG32_SOC15(GC, 0, mmSQ_IND_INDEX,
888 (wave << SQ_IND_INDEX__WAVE_ID__SHIFT) |
889 (simd << SQ_IND_INDEX__SIMD_ID__SHIFT) |
890 (regno << SQ_IND_INDEX__INDEX__SHIFT) |
891 (thread << SQ_IND_INDEX__THREAD_ID__SHIFT) |
892 (SQ_IND_INDEX__FORCE_READ_MASK) |
893 (SQ_IND_INDEX__AUTO_INCR_MASK));
895 *(out++) = RREG32_SOC15(GC, 0, mmSQ_IND_DATA);
898 static void gfx_v9_0_read_wave_data(struct amdgpu_device *adev, uint32_t simd, uint32_t wave, uint32_t *dst, int *no_fields)
900 /* type 1 wave data */
901 dst[(*no_fields)++] = 1;
902 dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_STATUS);
903 dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_PC_LO);
904 dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_PC_HI);
905 dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_EXEC_LO);
906 dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_EXEC_HI);
907 dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_HW_ID);
908 dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_INST_DW0);
909 dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_INST_DW1);
910 dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_GPR_ALLOC);
911 dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_LDS_ALLOC);
912 dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_TRAPSTS);
913 dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_IB_STS);
914 dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_IB_DBG0);
915 dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_M0);
918 static void gfx_v9_0_read_wave_sgprs(struct amdgpu_device *adev, uint32_t simd,
919 uint32_t wave, uint32_t start,
920 uint32_t size, uint32_t *dst)
924 start + SQIND_WAVE_SGPRS_OFFSET, size, dst);
928 static const struct amdgpu_gfx_funcs gfx_v9_0_gfx_funcs = {
929 .get_gpu_clock_counter = &gfx_v9_0_get_gpu_clock_counter,
930 .select_se_sh = &gfx_v9_0_select_se_sh,
931 .read_wave_data = &gfx_v9_0_read_wave_data,
932 .read_wave_sgprs = &gfx_v9_0_read_wave_sgprs,
935 static void gfx_v9_0_gpu_early_init(struct amdgpu_device *adev)
939 adev->gfx.funcs = &gfx_v9_0_gfx_funcs;
941 switch (adev->asic_type) {
943 adev->gfx.config.max_hw_contexts = 8;
944 adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
945 adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
946 adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
947 adev->gfx.config.sc_earlyz_tile_fifo_size = 0x4C0;
948 gb_addr_config = VEGA10_GB_ADDR_CONFIG_GOLDEN;
951 adev->gfx.config.max_hw_contexts = 8;
952 adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
953 adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
954 adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
955 adev->gfx.config.sc_earlyz_tile_fifo_size = 0x4C0;
956 gb_addr_config = RAVEN_GB_ADDR_CONFIG_GOLDEN;
963 adev->gfx.config.gb_addr_config = gb_addr_config;
965 adev->gfx.config.gb_addr_config_fields.num_pipes = 1 <<
967 adev->gfx.config.gb_addr_config,
971 adev->gfx.config.max_tile_pipes =
972 adev->gfx.config.gb_addr_config_fields.num_pipes;
974 adev->gfx.config.gb_addr_config_fields.num_banks = 1 <<
976 adev->gfx.config.gb_addr_config,
979 adev->gfx.config.gb_addr_config_fields.max_compress_frags = 1 <<
981 adev->gfx.config.gb_addr_config,
983 MAX_COMPRESSED_FRAGS);
984 adev->gfx.config.gb_addr_config_fields.num_rb_per_se = 1 <<
986 adev->gfx.config.gb_addr_config,
989 adev->gfx.config.gb_addr_config_fields.num_se = 1 <<
991 adev->gfx.config.gb_addr_config,
994 adev->gfx.config.gb_addr_config_fields.pipe_interleave_size = 1 << (8 +
996 adev->gfx.config.gb_addr_config,
998 PIPE_INTERLEAVE_SIZE));
1001 static int gfx_v9_0_ngg_create_buf(struct amdgpu_device *adev,
1002 struct amdgpu_ngg_buf *ngg_buf,
1004 int default_size_se)
1009 dev_err(adev->dev, "Buffer size is invalid: %d\n", size_se);
1012 size_se = size_se ? size_se : default_size_se;
1014 ngg_buf->size = size_se * adev->gfx.config.max_shader_engines;
1015 r = amdgpu_bo_create_kernel(adev, ngg_buf->size,
1016 PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM,
1021 dev_err(adev->dev, "(%d) failed to create NGG buffer\n", r);
1024 ngg_buf->bo_size = amdgpu_bo_size(ngg_buf->bo);
1029 static int gfx_v9_0_ngg_fini(struct amdgpu_device *adev)
1033 for (i = 0; i < NGG_BUF_MAX; i++)
1034 amdgpu_bo_free_kernel(&adev->gfx.ngg.buf[i].bo,
1035 &adev->gfx.ngg.buf[i].gpu_addr,
1038 memset(&adev->gfx.ngg.buf[0], 0,
1039 sizeof(struct amdgpu_ngg_buf) * NGG_BUF_MAX);
1041 adev->gfx.ngg.init = false;
1046 static int gfx_v9_0_ngg_init(struct amdgpu_device *adev)
1050 if (!amdgpu_ngg || adev->gfx.ngg.init == true)
1053 /* GDS reserve memory: 64 bytes alignment */
1054 adev->gfx.ngg.gds_reserve_size = ALIGN(5 * 4, 0x40);
1055 adev->gds.mem.total_size -= adev->gfx.ngg.gds_reserve_size;
1056 adev->gds.mem.gfx_partition_size -= adev->gfx.ngg.gds_reserve_size;
1057 adev->gfx.ngg.gds_reserve_addr = amdgpu_gds_reg_offset[0].mem_base;
1058 adev->gfx.ngg.gds_reserve_addr += adev->gds.mem.gfx_partition_size;
1060 /* Primitive Buffer */
1061 r = gfx_v9_0_ngg_create_buf(adev, &adev->gfx.ngg.buf[NGG_PRIM],
1062 amdgpu_prim_buf_per_se,
1065 dev_err(adev->dev, "Failed to create Primitive Buffer\n");
1069 /* Position Buffer */
1070 r = gfx_v9_0_ngg_create_buf(adev, &adev->gfx.ngg.buf[NGG_POS],
1071 amdgpu_pos_buf_per_se,
1074 dev_err(adev->dev, "Failed to create Position Buffer\n");
1078 /* Control Sideband */
1079 r = gfx_v9_0_ngg_create_buf(adev, &adev->gfx.ngg.buf[NGG_CNTL],
1080 amdgpu_cntl_sb_buf_per_se,
1083 dev_err(adev->dev, "Failed to create Control Sideband Buffer\n");
1087 /* Parameter Cache, not created by default */
1088 if (amdgpu_param_buf_per_se <= 0)
1091 r = gfx_v9_0_ngg_create_buf(adev, &adev->gfx.ngg.buf[NGG_PARAM],
1092 amdgpu_param_buf_per_se,
1095 dev_err(adev->dev, "Failed to create Parameter Cache\n");
1100 adev->gfx.ngg.init = true;
1103 gfx_v9_0_ngg_fini(adev);
1107 static int gfx_v9_0_ngg_en(struct amdgpu_device *adev)
1109 struct amdgpu_ring *ring = &adev->gfx.gfx_ring[0];
1118 /* Program buffer size */
1120 size = adev->gfx.ngg.buf[NGG_PRIM].size / 256;
1121 data = REG_SET_FIELD(data, WD_BUF_RESOURCE_1, INDEX_BUF_SIZE, size);
1123 size = adev->gfx.ngg.buf[NGG_POS].size / 256;
1124 data = REG_SET_FIELD(data, WD_BUF_RESOURCE_1, POS_BUF_SIZE, size);
1126 WREG32_SOC15(GC, 0, mmWD_BUF_RESOURCE_1, data);
1129 size = adev->gfx.ngg.buf[NGG_CNTL].size / 256;
1130 data = REG_SET_FIELD(data, WD_BUF_RESOURCE_2, CNTL_SB_BUF_SIZE, size);
1132 size = adev->gfx.ngg.buf[NGG_PARAM].size / 1024;
1133 data = REG_SET_FIELD(data, WD_BUF_RESOURCE_2, PARAM_BUF_SIZE, size);
1135 WREG32_SOC15(GC, 0, mmWD_BUF_RESOURCE_2, data);
1137 /* Program buffer base address */
1138 base = lower_32_bits(adev->gfx.ngg.buf[NGG_PRIM].gpu_addr);
1139 data = REG_SET_FIELD(0, WD_INDEX_BUF_BASE, BASE, base);
1140 WREG32_SOC15(GC, 0, mmWD_INDEX_BUF_BASE, data);
1142 base = upper_32_bits(adev->gfx.ngg.buf[NGG_PRIM].gpu_addr);
1143 data = REG_SET_FIELD(0, WD_INDEX_BUF_BASE_HI, BASE_HI, base);
1144 WREG32_SOC15(GC, 0, mmWD_INDEX_BUF_BASE_HI, data);
1146 base = lower_32_bits(adev->gfx.ngg.buf[NGG_POS].gpu_addr);
1147 data = REG_SET_FIELD(0, WD_POS_BUF_BASE, BASE, base);
1148 WREG32_SOC15(GC, 0, mmWD_POS_BUF_BASE, data);
1150 base = upper_32_bits(adev->gfx.ngg.buf[NGG_POS].gpu_addr);
1151 data = REG_SET_FIELD(0, WD_POS_BUF_BASE_HI, BASE_HI, base);
1152 WREG32_SOC15(GC, 0, mmWD_POS_BUF_BASE_HI, data);
1154 base = lower_32_bits(adev->gfx.ngg.buf[NGG_CNTL].gpu_addr);
1155 data = REG_SET_FIELD(0, WD_CNTL_SB_BUF_BASE, BASE, base);
1156 WREG32_SOC15(GC, 0, mmWD_CNTL_SB_BUF_BASE, data);
1158 base = upper_32_bits(adev->gfx.ngg.buf[NGG_CNTL].gpu_addr);
1159 data = REG_SET_FIELD(0, WD_CNTL_SB_BUF_BASE_HI, BASE_HI, base);
1160 WREG32_SOC15(GC, 0, mmWD_CNTL_SB_BUF_BASE_HI, data);
1162 /* Clear GDS reserved memory */
1163 r = amdgpu_ring_alloc(ring, 17);
1165 DRM_ERROR("amdgpu: NGG failed to lock ring %d (%d).\n",
1170 gfx_v9_0_write_data_to_reg(ring, 0, false,
1171 amdgpu_gds_reg_offset[0].mem_size,
1172 (adev->gds.mem.total_size +
1173 adev->gfx.ngg.gds_reserve_size) >>
1176 amdgpu_ring_write(ring, PACKET3(PACKET3_DMA_DATA, 5));
1177 amdgpu_ring_write(ring, (PACKET3_DMA_DATA_CP_SYNC |
1178 PACKET3_DMA_DATA_SRC_SEL(2)));
1179 amdgpu_ring_write(ring, 0);
1180 amdgpu_ring_write(ring, 0);
1181 amdgpu_ring_write(ring, adev->gfx.ngg.gds_reserve_addr);
1182 amdgpu_ring_write(ring, 0);
1183 amdgpu_ring_write(ring, adev->gfx.ngg.gds_reserve_size);
1186 gfx_v9_0_write_data_to_reg(ring, 0, false,
1187 amdgpu_gds_reg_offset[0].mem_size, 0);
1189 amdgpu_ring_commit(ring);
1194 static int gfx_v9_0_compute_ring_init(struct amdgpu_device *adev, int ring_id,
1195 int mec, int pipe, int queue)
1199 struct amdgpu_ring *ring = &adev->gfx.compute_ring[ring_id];
1201 ring = &adev->gfx.compute_ring[ring_id];
1206 ring->queue = queue;
1208 ring->ring_obj = NULL;
1209 ring->use_doorbell = true;
1210 ring->doorbell_index = (AMDGPU_DOORBELL_MEC_RING0 + ring_id) << 1;
1211 ring->eop_gpu_addr = adev->gfx.mec.hpd_eop_gpu_addr
1212 + (ring_id * GFX9_MEC_HPD_SIZE);
1213 sprintf(ring->name, "comp_%d.%d.%d", ring->me, ring->pipe, ring->queue);
1215 irq_type = AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP
1216 + ((ring->me - 1) * adev->gfx.mec.num_pipe_per_mec)
1219 /* type-2 packets are deprecated on MEC, use type-3 instead */
1220 r = amdgpu_ring_init(adev, ring, 1024,
1221 &adev->gfx.eop_irq, irq_type);
1229 static int gfx_v9_0_sw_init(void *handle)
1231 int i, j, k, r, ring_id;
1232 struct amdgpu_ring *ring;
1233 struct amdgpu_kiq *kiq;
1234 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1236 switch (adev->asic_type) {
1239 adev->gfx.mec.num_mec = 2;
1242 adev->gfx.mec.num_mec = 1;
1246 adev->gfx.mec.num_pipe_per_mec = 4;
1247 adev->gfx.mec.num_queue_per_pipe = 8;
1250 r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_GRBM_CP, 178, &adev->gfx.kiq.irq);
1255 r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_GRBM_CP, 181, &adev->gfx.eop_irq);
1259 /* Privileged reg */
1260 r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_GRBM_CP, 184,
1261 &adev->gfx.priv_reg_irq);
1265 /* Privileged inst */
1266 r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_GRBM_CP, 185,
1267 &adev->gfx.priv_inst_irq);
1271 adev->gfx.gfx_current_status = AMDGPU_GFX_NORMAL_MODE;
1273 gfx_v9_0_scratch_init(adev);
1275 r = gfx_v9_0_init_microcode(adev);
1277 DRM_ERROR("Failed to load gfx firmware!\n");
1281 r = gfx_v9_0_rlc_init(adev);
1283 DRM_ERROR("Failed to init rlc BOs!\n");
1287 r = gfx_v9_0_mec_init(adev);
1289 DRM_ERROR("Failed to init MEC BOs!\n");
1293 /* set up the gfx ring */
1294 for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
1295 ring = &adev->gfx.gfx_ring[i];
1296 ring->ring_obj = NULL;
1297 sprintf(ring->name, "gfx");
1298 ring->use_doorbell = true;
1299 ring->doorbell_index = AMDGPU_DOORBELL64_GFX_RING0 << 1;
1300 r = amdgpu_ring_init(adev, ring, 1024,
1301 &adev->gfx.eop_irq, AMDGPU_CP_IRQ_GFX_EOP);
1306 /* set up the compute queues - allocate horizontally across pipes */
1308 for (i = 0; i < adev->gfx.mec.num_mec; ++i) {
1309 for (j = 0; j < adev->gfx.mec.num_queue_per_pipe; j++) {
1310 for (k = 0; k < adev->gfx.mec.num_pipe_per_mec; k++) {
1311 if (!amdgpu_gfx_is_mec_queue_enabled(adev, i, k, j))
1314 r = gfx_v9_0_compute_ring_init(adev,
1325 r = amdgpu_gfx_kiq_init(adev, GFX9_MEC_HPD_SIZE);
1327 DRM_ERROR("Failed to init KIQ BOs!\n");
1331 kiq = &adev->gfx.kiq;
1332 r = amdgpu_gfx_kiq_init_ring(adev, &kiq->ring, &kiq->irq);
1336 /* create MQD for all compute queues as wel as KIQ for SRIOV case */
1337 r = amdgpu_gfx_compute_mqd_sw_init(adev, sizeof(struct v9_mqd));
1341 /* reserve GDS, GWS and OA resource for gfx */
1342 r = amdgpu_bo_create_kernel(adev, adev->gds.mem.gfx_partition_size,
1343 PAGE_SIZE, AMDGPU_GEM_DOMAIN_GDS,
1344 &adev->gds.gds_gfx_bo, NULL, NULL);
1348 r = amdgpu_bo_create_kernel(adev, adev->gds.gws.gfx_partition_size,
1349 PAGE_SIZE, AMDGPU_GEM_DOMAIN_GWS,
1350 &adev->gds.gws_gfx_bo, NULL, NULL);
1354 r = amdgpu_bo_create_kernel(adev, adev->gds.oa.gfx_partition_size,
1355 PAGE_SIZE, AMDGPU_GEM_DOMAIN_OA,
1356 &adev->gds.oa_gfx_bo, NULL, NULL);
1360 adev->gfx.ce_ram_size = 0x8000;
1362 gfx_v9_0_gpu_early_init(adev);
1364 r = gfx_v9_0_ngg_init(adev);
1372 static int gfx_v9_0_sw_fini(void *handle)
1375 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1377 amdgpu_bo_free_kernel(&adev->gds.oa_gfx_bo, NULL, NULL);
1378 amdgpu_bo_free_kernel(&adev->gds.gws_gfx_bo, NULL, NULL);
1379 amdgpu_bo_free_kernel(&adev->gds.gds_gfx_bo, NULL, NULL);
1381 for (i = 0; i < adev->gfx.num_gfx_rings; i++)
1382 amdgpu_ring_fini(&adev->gfx.gfx_ring[i]);
1383 for (i = 0; i < adev->gfx.num_compute_rings; i++)
1384 amdgpu_ring_fini(&adev->gfx.compute_ring[i]);
1386 amdgpu_gfx_compute_mqd_sw_fini(adev);
1387 amdgpu_gfx_kiq_free_ring(&adev->gfx.kiq.ring, &adev->gfx.kiq.irq);
1388 amdgpu_gfx_kiq_fini(adev);
1390 gfx_v9_0_mec_fini(adev);
1391 gfx_v9_0_ngg_fini(adev);
1397 static void gfx_v9_0_tiling_mode_table_init(struct amdgpu_device *adev)
1402 static void gfx_v9_0_select_se_sh(struct amdgpu_device *adev, u32 se_num, u32 sh_num, u32 instance)
1406 if (instance == 0xffffffff)
1407 data = REG_SET_FIELD(0, GRBM_GFX_INDEX, INSTANCE_BROADCAST_WRITES, 1);
1409 data = REG_SET_FIELD(0, GRBM_GFX_INDEX, INSTANCE_INDEX, instance);
1411 if (se_num == 0xffffffff)
1412 data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SE_BROADCAST_WRITES, 1);
1414 data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SE_INDEX, se_num);
1416 if (sh_num == 0xffffffff)
1417 data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SH_BROADCAST_WRITES, 1);
1419 data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SH_INDEX, sh_num);
1421 WREG32_SOC15(GC, 0, mmGRBM_GFX_INDEX, data);
1424 static u32 gfx_v9_0_get_rb_active_bitmap(struct amdgpu_device *adev)
1428 data = RREG32_SOC15(GC, 0, mmCC_RB_BACKEND_DISABLE);
1429 data |= RREG32_SOC15(GC, 0, mmGC_USER_RB_BACKEND_DISABLE);
1431 data &= CC_RB_BACKEND_DISABLE__BACKEND_DISABLE_MASK;
1432 data >>= GC_USER_RB_BACKEND_DISABLE__BACKEND_DISABLE__SHIFT;
1434 mask = amdgpu_gfx_create_bitmask(adev->gfx.config.max_backends_per_se /
1435 adev->gfx.config.max_sh_per_se);
1437 return (~data) & mask;
1440 static void gfx_v9_0_setup_rb(struct amdgpu_device *adev)
1445 u32 rb_bitmap_width_per_sh = adev->gfx.config.max_backends_per_se /
1446 adev->gfx.config.max_sh_per_se;
1448 mutex_lock(&adev->grbm_idx_mutex);
1449 for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
1450 for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
1451 gfx_v9_0_select_se_sh(adev, i, j, 0xffffffff);
1452 data = gfx_v9_0_get_rb_active_bitmap(adev);
1453 active_rbs |= data << ((i * adev->gfx.config.max_sh_per_se + j) *
1454 rb_bitmap_width_per_sh);
1457 gfx_v9_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
1458 mutex_unlock(&adev->grbm_idx_mutex);
1460 adev->gfx.config.backend_enable_mask = active_rbs;
1461 adev->gfx.config.num_rbs = hweight32(active_rbs);
1464 #define DEFAULT_SH_MEM_BASES (0x6000)
1465 #define FIRST_COMPUTE_VMID (8)
1466 #define LAST_COMPUTE_VMID (16)
1467 static void gfx_v9_0_init_compute_vmid(struct amdgpu_device *adev)
1470 uint32_t sh_mem_config;
1471 uint32_t sh_mem_bases;
1474 * Configure apertures:
1475 * LDS: 0x60000000'00000000 - 0x60000001'00000000 (4GB)
1476 * Scratch: 0x60000001'00000000 - 0x60000002'00000000 (4GB)
1477 * GPUVM: 0x60010000'00000000 - 0x60020000'00000000 (1TB)
1479 sh_mem_bases = DEFAULT_SH_MEM_BASES | (DEFAULT_SH_MEM_BASES << 16);
1481 sh_mem_config = SH_MEM_ADDRESS_MODE_64 |
1482 SH_MEM_ALIGNMENT_MODE_UNALIGNED <<
1483 SH_MEM_CONFIG__ALIGNMENT_MODE__SHIFT;
1485 mutex_lock(&adev->srbm_mutex);
1486 for (i = FIRST_COMPUTE_VMID; i < LAST_COMPUTE_VMID; i++) {
1487 soc15_grbm_select(adev, 0, 0, 0, i);
1488 /* CP and shaders */
1489 WREG32_SOC15(GC, 0, mmSH_MEM_CONFIG, sh_mem_config);
1490 WREG32_SOC15(GC, 0, mmSH_MEM_BASES, sh_mem_bases);
1492 soc15_grbm_select(adev, 0, 0, 0, 0);
1493 mutex_unlock(&adev->srbm_mutex);
1496 static void gfx_v9_0_gpu_init(struct amdgpu_device *adev)
1501 WREG32_FIELD15(GC, 0, GRBM_CNTL, READ_TIMEOUT, 0xff);
1503 gfx_v9_0_tiling_mode_table_init(adev);
1505 if (adev->gfx.num_gfx_rings)
1506 gfx_v9_0_setup_rb(adev);
1507 gfx_v9_0_get_cu_info(adev, &adev->gfx.cu_info);
1509 /* XXX SH_MEM regs */
1510 /* where to put LDS, scratch, GPUVM in FSA64 space */
1511 mutex_lock(&adev->srbm_mutex);
1512 for (i = 0; i < 16; i++) {
1513 soc15_grbm_select(adev, 0, 0, 0, i);
1514 /* CP and shaders */
1516 tmp = REG_SET_FIELD(tmp, SH_MEM_CONFIG, ALIGNMENT_MODE,
1517 SH_MEM_ALIGNMENT_MODE_UNALIGNED);
1518 WREG32_SOC15(GC, 0, mmSH_MEM_CONFIG, tmp);
1519 WREG32_SOC15(GC, 0, mmSH_MEM_BASES, 0);
1521 soc15_grbm_select(adev, 0, 0, 0, 0);
1523 mutex_unlock(&adev->srbm_mutex);
1525 gfx_v9_0_init_compute_vmid(adev);
1528 static void gfx_v9_0_wait_for_rlc_serdes(struct amdgpu_device *adev)
1533 mutex_lock(&adev->grbm_idx_mutex);
1534 for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
1535 for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
1536 gfx_v9_0_select_se_sh(adev, i, j, 0xffffffff);
1537 for (k = 0; k < adev->usec_timeout; k++) {
1538 if (RREG32_SOC15(GC, 0, mmRLC_SERDES_CU_MASTER_BUSY) == 0)
1544 gfx_v9_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
1545 mutex_unlock(&adev->grbm_idx_mutex);
1547 mask = RLC_SERDES_NONCU_MASTER_BUSY__SE_MASTER_BUSY_MASK |
1548 RLC_SERDES_NONCU_MASTER_BUSY__GC_MASTER_BUSY_MASK |
1549 RLC_SERDES_NONCU_MASTER_BUSY__TC0_MASTER_BUSY_MASK |
1550 RLC_SERDES_NONCU_MASTER_BUSY__TC1_MASTER_BUSY_MASK;
1551 for (k = 0; k < adev->usec_timeout; k++) {
1552 if ((RREG32_SOC15(GC, 0, mmRLC_SERDES_NONCU_MASTER_BUSY) & mask) == 0)
1558 static void gfx_v9_0_enable_gui_idle_interrupt(struct amdgpu_device *adev,
1561 u32 tmp = RREG32_SOC15(GC, 0, mmCP_INT_CNTL_RING0);
1563 tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CNTX_BUSY_INT_ENABLE, enable ? 1 : 0);
1564 tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CNTX_EMPTY_INT_ENABLE, enable ? 1 : 0);
1565 tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CMP_BUSY_INT_ENABLE, enable ? 1 : 0);
1566 tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, GFX_IDLE_INT_ENABLE, enable ? 1 : 0);
1568 WREG32_SOC15(GC, 0, mmCP_INT_CNTL_RING0, tmp);
1571 static void gfx_v9_0_init_csb(struct amdgpu_device *adev)
1574 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_CSIB_ADDR_HI),
1575 adev->gfx.rlc.clear_state_gpu_addr >> 32);
1576 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_CSIB_ADDR_LO),
1577 adev->gfx.rlc.clear_state_gpu_addr & 0xfffffffc);
1578 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_CSIB_LENGTH),
1579 adev->gfx.rlc.clear_state_size);
1582 static void gfx_v9_0_parse_ind_reg_list(int *register_list_format,
1583 int indirect_offset,
1585 int *unique_indirect_regs,
1586 int *unique_indirect_reg_count,
1587 int max_indirect_reg_count,
1588 int *indirect_start_offsets,
1589 int *indirect_start_offsets_count,
1590 int max_indirect_start_offsets_count)
1593 bool new_entry = true;
1595 for (; indirect_offset < list_size; indirect_offset++) {
1599 indirect_start_offsets[*indirect_start_offsets_count] = indirect_offset;
1600 *indirect_start_offsets_count = *indirect_start_offsets_count + 1;
1601 BUG_ON(*indirect_start_offsets_count >= max_indirect_start_offsets_count);
1604 if (register_list_format[indirect_offset] == 0xFFFFFFFF) {
1609 indirect_offset += 2;
1611 /* look for the matching indice */
1612 for (idx = 0; idx < *unique_indirect_reg_count; idx++) {
1613 if (unique_indirect_regs[idx] ==
1614 register_list_format[indirect_offset])
1618 if (idx >= *unique_indirect_reg_count) {
1619 unique_indirect_regs[*unique_indirect_reg_count] =
1620 register_list_format[indirect_offset];
1621 idx = *unique_indirect_reg_count;
1622 *unique_indirect_reg_count = *unique_indirect_reg_count + 1;
1623 BUG_ON(*unique_indirect_reg_count >= max_indirect_reg_count);
1626 register_list_format[indirect_offset] = idx;
1630 static int gfx_v9_0_init_rlc_save_restore_list(struct amdgpu_device *adev)
1632 int unique_indirect_regs[] = {0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0};
1633 int unique_indirect_reg_count = 0;
1635 int indirect_start_offsets[] = {0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0};
1636 int indirect_start_offsets_count = 0;
1642 u32 *register_list_format =
1643 kmalloc(adev->gfx.rlc.reg_list_format_size_bytes, GFP_KERNEL);
1644 if (!register_list_format)
1646 memcpy(register_list_format, adev->gfx.rlc.register_list_format,
1647 adev->gfx.rlc.reg_list_format_size_bytes);
1649 /* setup unique_indirect_regs array and indirect_start_offsets array */
1650 gfx_v9_0_parse_ind_reg_list(register_list_format,
1651 GFX9_RLC_FORMAT_DIRECT_REG_LIST_LENGTH,
1652 adev->gfx.rlc.reg_list_format_size_bytes >> 2,
1653 unique_indirect_regs,
1654 &unique_indirect_reg_count,
1655 sizeof(unique_indirect_regs)/sizeof(int),
1656 indirect_start_offsets,
1657 &indirect_start_offsets_count,
1658 sizeof(indirect_start_offsets)/sizeof(int));
1660 /* enable auto inc in case it is disabled */
1661 tmp = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_SRM_CNTL));
1662 tmp |= RLC_SRM_CNTL__AUTO_INCR_ADDR_MASK;
1663 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_SRM_CNTL), tmp);
1665 /* write register_restore table to offset 0x0 using RLC_SRM_ARAM_ADDR/DATA */
1666 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_SRM_ARAM_ADDR),
1667 RLC_SAVE_RESTORE_ADDR_STARTING_OFFSET);
1668 for (i = 0; i < adev->gfx.rlc.reg_list_size_bytes >> 2; i++)
1669 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_SRM_ARAM_DATA),
1670 adev->gfx.rlc.register_restore[i]);
1672 /* load direct register */
1673 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_SRM_ARAM_ADDR), 0);
1674 for (i = 0; i < adev->gfx.rlc.reg_list_size_bytes >> 2; i++)
1675 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_SRM_ARAM_DATA),
1676 adev->gfx.rlc.register_restore[i]);
1678 /* load indirect register */
1679 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_GPM_SCRATCH_ADDR),
1680 adev->gfx.rlc.reg_list_format_start);
1681 for (i = 0; i < adev->gfx.rlc.reg_list_format_size_bytes >> 2; i++)
1682 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_GPM_SCRATCH_DATA),
1683 register_list_format[i]);
1685 /* set save/restore list size */
1686 list_size = adev->gfx.rlc.reg_list_size_bytes >> 2;
1687 list_size = list_size >> 1;
1688 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_GPM_SCRATCH_ADDR),
1689 adev->gfx.rlc.reg_restore_list_size);
1690 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_GPM_SCRATCH_DATA), list_size);
1692 /* write the starting offsets to RLC scratch ram */
1693 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_GPM_SCRATCH_ADDR),
1694 adev->gfx.rlc.starting_offsets_start);
1695 for (i = 0; i < sizeof(indirect_start_offsets)/sizeof(int); i++)
1696 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_GPM_SCRATCH_DATA),
1697 indirect_start_offsets[i]);
1699 /* load unique indirect regs*/
1700 for (i = 0; i < sizeof(unique_indirect_regs)/sizeof(int); i++) {
1701 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_SRM_INDEX_CNTL_ADDR_0) + i,
1702 unique_indirect_regs[i] & 0x3FFFF);
1703 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_SRM_INDEX_CNTL_DATA_0) + i,
1704 unique_indirect_regs[i] >> 20);
1707 kfree(register_list_format);
1711 static void gfx_v9_0_enable_save_restore_machine(struct amdgpu_device *adev)
1715 tmp = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_SRM_CNTL));
1716 tmp |= RLC_SRM_CNTL__SRM_ENABLE_MASK;
1717 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_SRM_CNTL), tmp);
1720 static void pwr_10_0_gfxip_control_over_cgpg(struct amdgpu_device *adev,
1724 uint32_t default_data = 0;
1726 default_data = data = RREG32(SOC15_REG_OFFSET(PWR, 0, mmPWR_MISC_CNTL_STATUS));
1727 if (enable == true) {
1728 /* enable GFXIP control over CGPG */
1729 data |= PWR_MISC_CNTL_STATUS__PWR_GFX_RLC_CGPG_EN_MASK;
1730 if(default_data != data)
1731 WREG32(SOC15_REG_OFFSET(PWR, 0, mmPWR_MISC_CNTL_STATUS), data);
1734 data &= ~PWR_MISC_CNTL_STATUS__PWR_GFXOFF_STATUS_MASK;
1735 data |= (2 << PWR_MISC_CNTL_STATUS__PWR_GFXOFF_STATUS__SHIFT);
1736 if(default_data != data)
1737 WREG32(SOC15_REG_OFFSET(PWR, 0, mmPWR_MISC_CNTL_STATUS), data);
1739 /* restore GFXIP control over GCPG */
1740 data &= ~PWR_MISC_CNTL_STATUS__PWR_GFX_RLC_CGPG_EN_MASK;
1741 if(default_data != data)
1742 WREG32(SOC15_REG_OFFSET(PWR, 0, mmPWR_MISC_CNTL_STATUS), data);
1746 static void gfx_v9_0_init_gfx_power_gating(struct amdgpu_device *adev)
1750 if (adev->pg_flags & (AMD_PG_SUPPORT_GFX_PG |
1751 AMD_PG_SUPPORT_GFX_SMG |
1752 AMD_PG_SUPPORT_GFX_DMG)) {
1753 /* init IDLE_POLL_COUNT = 60 */
1754 data = RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_RB_WPTR_POLL_CNTL));
1755 data &= ~CP_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT_MASK;
1756 data |= (0x60 << CP_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT);
1757 WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_RB_WPTR_POLL_CNTL), data);
1759 /* init RLC PG Delay */
1761 data |= (0x10 << RLC_PG_DELAY__POWER_UP_DELAY__SHIFT);
1762 data |= (0x10 << RLC_PG_DELAY__POWER_DOWN_DELAY__SHIFT);
1763 data |= (0x10 << RLC_PG_DELAY__CMD_PROPAGATE_DELAY__SHIFT);
1764 data |= (0x40 << RLC_PG_DELAY__MEM_SLEEP_DELAY__SHIFT);
1765 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_DELAY), data);
1767 data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_DELAY_2));
1768 data &= ~RLC_PG_DELAY_2__SERDES_CMD_DELAY_MASK;
1769 data |= (0x4 << RLC_PG_DELAY_2__SERDES_CMD_DELAY__SHIFT);
1770 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_DELAY_2), data);
1772 data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_DELAY_3));
1773 data &= ~RLC_PG_DELAY_3__CGCG_ACTIVE_BEFORE_CGPG_MASK;
1774 data |= (0xff << RLC_PG_DELAY_3__CGCG_ACTIVE_BEFORE_CGPG__SHIFT);
1775 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_DELAY_3), data);
1777 data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_AUTO_PG_CTRL));
1778 data &= ~RLC_AUTO_PG_CTRL__GRBM_REG_SAVE_GFX_IDLE_THRESHOLD_MASK;
1780 /* program GRBM_REG_SAVE_GFX_IDLE_THRESHOLD to 0x55f0 */
1781 data |= (0x55f0 << RLC_AUTO_PG_CTRL__GRBM_REG_SAVE_GFX_IDLE_THRESHOLD__SHIFT);
1782 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_AUTO_PG_CTRL), data);
1784 pwr_10_0_gfxip_control_over_cgpg(adev, true);
1788 static void gfx_v9_0_enable_sck_slow_down_on_power_up(struct amdgpu_device *adev,
1792 uint32_t default_data = 0;
1794 default_data = data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL));
1796 if (enable == true) {
1797 data |= RLC_PG_CNTL__SMU_CLK_SLOWDOWN_ON_PU_ENABLE_MASK;
1798 if (default_data != data)
1799 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL), data);
1801 data &= ~RLC_PG_CNTL__SMU_CLK_SLOWDOWN_ON_PU_ENABLE_MASK;
1802 if(default_data != data)
1803 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL), data);
1807 static void gfx_v9_0_enable_sck_slow_down_on_power_down(struct amdgpu_device *adev,
1811 uint32_t default_data = 0;
1813 default_data = data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL));
1815 if (enable == true) {
1816 data |= RLC_PG_CNTL__SMU_CLK_SLOWDOWN_ON_PD_ENABLE_MASK;
1817 if(default_data != data)
1818 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL), data);
1820 data &= ~RLC_PG_CNTL__SMU_CLK_SLOWDOWN_ON_PD_ENABLE_MASK;
1821 if(default_data != data)
1822 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL), data);
1826 static void gfx_v9_0_enable_cp_power_gating(struct amdgpu_device *adev,
1830 uint32_t default_data = 0;
1832 default_data = data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL));
1834 if (enable == true) {
1835 data &= ~RLC_PG_CNTL__CP_PG_DISABLE_MASK;
1836 if(default_data != data)
1837 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL), data);
1839 data |= RLC_PG_CNTL__CP_PG_DISABLE_MASK;
1840 if(default_data != data)
1841 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL), data);
1845 static void gfx_v9_0_enable_gfx_cg_power_gating(struct amdgpu_device *adev,
1848 uint32_t data, default_data;
1850 default_data = data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL));
1852 data |= RLC_PG_CNTL__GFX_POWER_GATING_ENABLE_MASK;
1854 data &= ~RLC_PG_CNTL__GFX_POWER_GATING_ENABLE_MASK;
1855 if(default_data != data)
1856 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL), data);
1859 static void gfx_v9_0_enable_gfx_pipeline_powergating(struct amdgpu_device *adev,
1862 uint32_t data, default_data;
1864 default_data = data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL));
1866 data |= RLC_PG_CNTL__GFX_PIPELINE_PG_ENABLE_MASK;
1868 data &= ~RLC_PG_CNTL__GFX_PIPELINE_PG_ENABLE_MASK;
1869 if(default_data != data)
1870 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL), data);
1873 /* read any GFX register to wake up GFX */
1874 data = RREG32(SOC15_REG_OFFSET(GC, 0, mmDB_RENDER_CONTROL));
1877 static void gfx_v9_0_enable_gfx_static_mg_power_gating(struct amdgpu_device *adev,
1880 uint32_t data, default_data;
1882 default_data = data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL));
1884 data |= RLC_PG_CNTL__STATIC_PER_CU_PG_ENABLE_MASK;
1886 data &= ~RLC_PG_CNTL__STATIC_PER_CU_PG_ENABLE_MASK;
1887 if(default_data != data)
1888 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL), data);
1891 static void gfx_v9_0_enable_gfx_dynamic_mg_power_gating(struct amdgpu_device *adev,
1894 uint32_t data, default_data;
1896 default_data = data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL));
1898 data |= RLC_PG_CNTL__DYN_PER_CU_PG_ENABLE_MASK;
1900 data &= ~RLC_PG_CNTL__DYN_PER_CU_PG_ENABLE_MASK;
1901 if(default_data != data)
1902 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL), data);
1905 static void gfx_v9_0_init_pg(struct amdgpu_device *adev)
1907 if (adev->pg_flags & (AMD_PG_SUPPORT_GFX_PG |
1908 AMD_PG_SUPPORT_GFX_SMG |
1909 AMD_PG_SUPPORT_GFX_DMG |
1911 AMD_PG_SUPPORT_GDS |
1912 AMD_PG_SUPPORT_RLC_SMU_HS)) {
1913 gfx_v9_0_init_csb(adev);
1914 gfx_v9_0_init_rlc_save_restore_list(adev);
1915 gfx_v9_0_enable_save_restore_machine(adev);
1917 if (adev->asic_type == CHIP_RAVEN) {
1918 WREG32(mmRLC_JUMP_TABLE_RESTORE,
1919 adev->gfx.rlc.cp_table_gpu_addr >> 8);
1920 gfx_v9_0_init_gfx_power_gating(adev);
1922 if (adev->pg_flags & AMD_PG_SUPPORT_RLC_SMU_HS) {
1923 gfx_v9_0_enable_sck_slow_down_on_power_up(adev, true);
1924 gfx_v9_0_enable_sck_slow_down_on_power_down(adev, true);
1926 gfx_v9_0_enable_sck_slow_down_on_power_up(adev, false);
1927 gfx_v9_0_enable_sck_slow_down_on_power_down(adev, false);
1930 if (adev->pg_flags & AMD_PG_SUPPORT_CP)
1931 gfx_v9_0_enable_cp_power_gating(adev, true);
1933 gfx_v9_0_enable_cp_power_gating(adev, false);
1938 void gfx_v9_0_rlc_stop(struct amdgpu_device *adev)
1940 u32 tmp = RREG32_SOC15(GC, 0, mmRLC_CNTL);
1942 tmp = REG_SET_FIELD(tmp, RLC_CNTL, RLC_ENABLE_F32, 0);
1943 WREG32_SOC15(GC, 0, mmRLC_CNTL, tmp);
1945 gfx_v9_0_enable_gui_idle_interrupt(adev, false);
1947 gfx_v9_0_wait_for_rlc_serdes(adev);
1950 static void gfx_v9_0_rlc_reset(struct amdgpu_device *adev)
1952 WREG32_FIELD15(GC, 0, GRBM_SOFT_RESET, SOFT_RESET_RLC, 1);
1954 WREG32_FIELD15(GC, 0, GRBM_SOFT_RESET, SOFT_RESET_RLC, 0);
1958 static void gfx_v9_0_rlc_start(struct amdgpu_device *adev)
1960 #ifdef AMDGPU_RLC_DEBUG_RETRY
1964 WREG32_FIELD15(GC, 0, RLC_CNTL, RLC_ENABLE_F32, 1);
1967 /* carrizo do enable cp interrupt after cp inited */
1968 if (!(adev->flags & AMD_IS_APU)) {
1969 gfx_v9_0_enable_gui_idle_interrupt(adev, true);
1973 #ifdef AMDGPU_RLC_DEBUG_RETRY
1974 /* RLC_GPM_GENERAL_6 : RLC Ucode version */
1975 rlc_ucode_ver = RREG32_SOC15(GC, 0, mmRLC_GPM_GENERAL_6);
1976 if(rlc_ucode_ver == 0x108) {
1977 DRM_INFO("Using rlc debug ucode. mmRLC_GPM_GENERAL_6 ==0x08%x / fw_ver == %i \n",
1978 rlc_ucode_ver, adev->gfx.rlc_fw_version);
1979 /* RLC_GPM_TIMER_INT_3 : Timer interval in RefCLK cycles,
1980 * default is 0x9C4 to create a 100us interval */
1981 WREG32_SOC15(GC, 0, mmRLC_GPM_TIMER_INT_3, 0x9C4);
1982 /* RLC_GPM_GENERAL_12 : Minimum gap between wptr and rptr
1983 * to disable the page fault retry interrupts, default is
1985 WREG32_SOC15(GC, 0, mmRLC_GPM_GENERAL_12, 0x100);
1990 static int gfx_v9_0_rlc_load_microcode(struct amdgpu_device *adev)
1992 const struct rlc_firmware_header_v2_0 *hdr;
1993 const __le32 *fw_data;
1994 unsigned i, fw_size;
1996 if (!adev->gfx.rlc_fw)
1999 hdr = (const struct rlc_firmware_header_v2_0 *)adev->gfx.rlc_fw->data;
2000 amdgpu_ucode_print_rlc_hdr(&hdr->header);
2002 fw_data = (const __le32 *)(adev->gfx.rlc_fw->data +
2003 le32_to_cpu(hdr->header.ucode_array_offset_bytes));
2004 fw_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4;
2006 WREG32_SOC15(GC, 0, mmRLC_GPM_UCODE_ADDR,
2007 RLCG_UCODE_LOADING_START_ADDRESS);
2008 for (i = 0; i < fw_size; i++)
2009 WREG32_SOC15(GC, 0, mmRLC_GPM_UCODE_DATA, le32_to_cpup(fw_data++));
2010 WREG32_SOC15(GC, 0, mmRLC_GPM_UCODE_ADDR, adev->gfx.rlc_fw_version);
2015 static int gfx_v9_0_rlc_resume(struct amdgpu_device *adev)
2019 if (amdgpu_sriov_vf(adev))
2022 gfx_v9_0_rlc_stop(adev);
2025 WREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL, 0);
2028 WREG32_SOC15(GC, 0, mmRLC_PG_CNTL, 0);
2030 gfx_v9_0_rlc_reset(adev);
2032 gfx_v9_0_init_pg(adev);
2034 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
2035 /* legacy rlc firmware loading */
2036 r = gfx_v9_0_rlc_load_microcode(adev);
2041 if (adev->asic_type == CHIP_RAVEN) {
2042 if (amdgpu_lbpw != 0)
2043 gfx_v9_0_enable_lbpw(adev, true);
2045 gfx_v9_0_enable_lbpw(adev, false);
2048 gfx_v9_0_rlc_start(adev);
2053 static void gfx_v9_0_cp_gfx_enable(struct amdgpu_device *adev, bool enable)
2056 u32 tmp = RREG32_SOC15(GC, 0, mmCP_ME_CNTL);
2058 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, ME_HALT, enable ? 0 : 1);
2059 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, PFP_HALT, enable ? 0 : 1);
2060 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, CE_HALT, enable ? 0 : 1);
2062 for (i = 0; i < adev->gfx.num_gfx_rings; i++)
2063 adev->gfx.gfx_ring[i].ready = false;
2065 WREG32_SOC15(GC, 0, mmCP_ME_CNTL, tmp);
2069 static int gfx_v9_0_cp_gfx_load_microcode(struct amdgpu_device *adev)
2071 const struct gfx_firmware_header_v1_0 *pfp_hdr;
2072 const struct gfx_firmware_header_v1_0 *ce_hdr;
2073 const struct gfx_firmware_header_v1_0 *me_hdr;
2074 const __le32 *fw_data;
2075 unsigned i, fw_size;
2077 if (!adev->gfx.me_fw || !adev->gfx.pfp_fw || !adev->gfx.ce_fw)
2080 pfp_hdr = (const struct gfx_firmware_header_v1_0 *)
2081 adev->gfx.pfp_fw->data;
2082 ce_hdr = (const struct gfx_firmware_header_v1_0 *)
2083 adev->gfx.ce_fw->data;
2084 me_hdr = (const struct gfx_firmware_header_v1_0 *)
2085 adev->gfx.me_fw->data;
2087 amdgpu_ucode_print_gfx_hdr(&pfp_hdr->header);
2088 amdgpu_ucode_print_gfx_hdr(&ce_hdr->header);
2089 amdgpu_ucode_print_gfx_hdr(&me_hdr->header);
2091 gfx_v9_0_cp_gfx_enable(adev, false);
2094 fw_data = (const __le32 *)
2095 (adev->gfx.pfp_fw->data +
2096 le32_to_cpu(pfp_hdr->header.ucode_array_offset_bytes));
2097 fw_size = le32_to_cpu(pfp_hdr->header.ucode_size_bytes) / 4;
2098 WREG32_SOC15(GC, 0, mmCP_PFP_UCODE_ADDR, 0);
2099 for (i = 0; i < fw_size; i++)
2100 WREG32_SOC15(GC, 0, mmCP_PFP_UCODE_DATA, le32_to_cpup(fw_data++));
2101 WREG32_SOC15(GC, 0, mmCP_PFP_UCODE_ADDR, adev->gfx.pfp_fw_version);
2104 fw_data = (const __le32 *)
2105 (adev->gfx.ce_fw->data +
2106 le32_to_cpu(ce_hdr->header.ucode_array_offset_bytes));
2107 fw_size = le32_to_cpu(ce_hdr->header.ucode_size_bytes) / 4;
2108 WREG32_SOC15(GC, 0, mmCP_CE_UCODE_ADDR, 0);
2109 for (i = 0; i < fw_size; i++)
2110 WREG32_SOC15(GC, 0, mmCP_CE_UCODE_DATA, le32_to_cpup(fw_data++));
2111 WREG32_SOC15(GC, 0, mmCP_CE_UCODE_ADDR, adev->gfx.ce_fw_version);
2114 fw_data = (const __le32 *)
2115 (adev->gfx.me_fw->data +
2116 le32_to_cpu(me_hdr->header.ucode_array_offset_bytes));
2117 fw_size = le32_to_cpu(me_hdr->header.ucode_size_bytes) / 4;
2118 WREG32_SOC15(GC, 0, mmCP_ME_RAM_WADDR, 0);
2119 for (i = 0; i < fw_size; i++)
2120 WREG32_SOC15(GC, 0, mmCP_ME_RAM_DATA, le32_to_cpup(fw_data++));
2121 WREG32_SOC15(GC, 0, mmCP_ME_RAM_WADDR, adev->gfx.me_fw_version);
2126 static int gfx_v9_0_cp_gfx_start(struct amdgpu_device *adev)
2128 struct amdgpu_ring *ring = &adev->gfx.gfx_ring[0];
2129 const struct cs_section_def *sect = NULL;
2130 const struct cs_extent_def *ext = NULL;
2134 WREG32_SOC15(GC, 0, mmCP_MAX_CONTEXT, adev->gfx.config.max_hw_contexts - 1);
2135 WREG32_SOC15(GC, 0, mmCP_DEVICE_ID, 1);
2137 gfx_v9_0_cp_gfx_enable(adev, true);
2139 r = amdgpu_ring_alloc(ring, gfx_v9_0_get_csb_size(adev) + 4 + 3);
2141 DRM_ERROR("amdgpu: cp failed to lock ring (%d).\n", r);
2145 amdgpu_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
2146 amdgpu_ring_write(ring, PACKET3_PREAMBLE_BEGIN_CLEAR_STATE);
2148 amdgpu_ring_write(ring, PACKET3(PACKET3_CONTEXT_CONTROL, 1));
2149 amdgpu_ring_write(ring, 0x80000000);
2150 amdgpu_ring_write(ring, 0x80000000);
2152 for (sect = gfx9_cs_data; sect->section != NULL; ++sect) {
2153 for (ext = sect->section; ext->extent != NULL; ++ext) {
2154 if (sect->id == SECT_CONTEXT) {
2155 amdgpu_ring_write(ring,
2156 PACKET3(PACKET3_SET_CONTEXT_REG,
2158 amdgpu_ring_write(ring,
2159 ext->reg_index - PACKET3_SET_CONTEXT_REG_START);
2160 for (i = 0; i < ext->reg_count; i++)
2161 amdgpu_ring_write(ring, ext->extent[i]);
2166 amdgpu_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
2167 amdgpu_ring_write(ring, PACKET3_PREAMBLE_END_CLEAR_STATE);
2169 amdgpu_ring_write(ring, PACKET3(PACKET3_CLEAR_STATE, 0));
2170 amdgpu_ring_write(ring, 0);
2172 amdgpu_ring_write(ring, PACKET3(PACKET3_SET_BASE, 2));
2173 amdgpu_ring_write(ring, PACKET3_BASE_INDEX(CE_PARTITION_BASE));
2174 amdgpu_ring_write(ring, 0x8000);
2175 amdgpu_ring_write(ring, 0x8000);
2177 amdgpu_ring_write(ring, PACKET3(PACKET3_SET_UCONFIG_REG,1));
2178 tmp = (PACKET3_SET_UCONFIG_REG_INDEX_TYPE |
2179 (SOC15_REG_OFFSET(GC, 0, mmVGT_INDEX_TYPE) - PACKET3_SET_UCONFIG_REG_START));
2180 amdgpu_ring_write(ring, tmp);
2181 amdgpu_ring_write(ring, 0);
2183 amdgpu_ring_commit(ring);
2188 static int gfx_v9_0_cp_gfx_resume(struct amdgpu_device *adev)
2190 struct amdgpu_ring *ring;
2193 u64 rb_addr, rptr_addr, wptr_gpu_addr;
2195 /* Set the write pointer delay */
2196 WREG32_SOC15(GC, 0, mmCP_RB_WPTR_DELAY, 0);
2198 /* set the RB to use vmid 0 */
2199 WREG32_SOC15(GC, 0, mmCP_RB_VMID, 0);
2201 /* Set ring buffer size */
2202 ring = &adev->gfx.gfx_ring[0];
2203 rb_bufsz = order_base_2(ring->ring_size / 8);
2204 tmp = REG_SET_FIELD(0, CP_RB0_CNTL, RB_BUFSZ, rb_bufsz);
2205 tmp = REG_SET_FIELD(tmp, CP_RB0_CNTL, RB_BLKSZ, rb_bufsz - 2);
2207 tmp = REG_SET_FIELD(tmp, CP_RB0_CNTL, BUF_SWAP, 1);
2209 WREG32_SOC15(GC, 0, mmCP_RB0_CNTL, tmp);
2211 /* Initialize the ring buffer's write pointers */
2213 WREG32_SOC15(GC, 0, mmCP_RB0_WPTR, lower_32_bits(ring->wptr));
2214 WREG32_SOC15(GC, 0, mmCP_RB0_WPTR_HI, upper_32_bits(ring->wptr));
2216 /* set the wb address wether it's enabled or not */
2217 rptr_addr = adev->wb.gpu_addr + (ring->rptr_offs * 4);
2218 WREG32_SOC15(GC, 0, mmCP_RB0_RPTR_ADDR, lower_32_bits(rptr_addr));
2219 WREG32_SOC15(GC, 0, mmCP_RB0_RPTR_ADDR_HI, upper_32_bits(rptr_addr) & CP_RB_RPTR_ADDR_HI__RB_RPTR_ADDR_HI_MASK);
2221 wptr_gpu_addr = adev->wb.gpu_addr + (ring->wptr_offs * 4);
2222 WREG32_SOC15(GC, 0, mmCP_RB_WPTR_POLL_ADDR_LO, lower_32_bits(wptr_gpu_addr));
2223 WREG32_SOC15(GC, 0, mmCP_RB_WPTR_POLL_ADDR_HI, upper_32_bits(wptr_gpu_addr));
2226 WREG32_SOC15(GC, 0, mmCP_RB0_CNTL, tmp);
2228 rb_addr = ring->gpu_addr >> 8;
2229 WREG32_SOC15(GC, 0, mmCP_RB0_BASE, rb_addr);
2230 WREG32_SOC15(GC, 0, mmCP_RB0_BASE_HI, upper_32_bits(rb_addr));
2232 tmp = RREG32_SOC15(GC, 0, mmCP_RB_DOORBELL_CONTROL);
2233 if (ring->use_doorbell) {
2234 tmp = REG_SET_FIELD(tmp, CP_RB_DOORBELL_CONTROL,
2235 DOORBELL_OFFSET, ring->doorbell_index);
2236 tmp = REG_SET_FIELD(tmp, CP_RB_DOORBELL_CONTROL,
2239 tmp = REG_SET_FIELD(tmp, CP_RB_DOORBELL_CONTROL, DOORBELL_EN, 0);
2241 WREG32_SOC15(GC, 0, mmCP_RB_DOORBELL_CONTROL, tmp);
2243 tmp = REG_SET_FIELD(0, CP_RB_DOORBELL_RANGE_LOWER,
2244 DOORBELL_RANGE_LOWER, ring->doorbell_index);
2245 WREG32_SOC15(GC, 0, mmCP_RB_DOORBELL_RANGE_LOWER, tmp);
2247 WREG32_SOC15(GC, 0, mmCP_RB_DOORBELL_RANGE_UPPER,
2248 CP_RB_DOORBELL_RANGE_UPPER__DOORBELL_RANGE_UPPER_MASK);
2251 /* start the ring */
2252 gfx_v9_0_cp_gfx_start(adev);
2258 static void gfx_v9_0_cp_compute_enable(struct amdgpu_device *adev, bool enable)
2263 WREG32_SOC15(GC, 0, mmCP_MEC_CNTL, 0);
2265 WREG32_SOC15(GC, 0, mmCP_MEC_CNTL,
2266 (CP_MEC_CNTL__MEC_ME1_HALT_MASK | CP_MEC_CNTL__MEC_ME2_HALT_MASK));
2267 for (i = 0; i < adev->gfx.num_compute_rings; i++)
2268 adev->gfx.compute_ring[i].ready = false;
2269 adev->gfx.kiq.ring.ready = false;
2274 static int gfx_v9_0_cp_compute_load_microcode(struct amdgpu_device *adev)
2276 const struct gfx_firmware_header_v1_0 *mec_hdr;
2277 const __le32 *fw_data;
2281 if (!adev->gfx.mec_fw)
2284 gfx_v9_0_cp_compute_enable(adev, false);
2286 mec_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data;
2287 amdgpu_ucode_print_gfx_hdr(&mec_hdr->header);
2289 fw_data = (const __le32 *)
2290 (adev->gfx.mec_fw->data +
2291 le32_to_cpu(mec_hdr->header.ucode_array_offset_bytes));
2293 tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, VMID, 0);
2294 tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, CACHE_POLICY, 0);
2295 WREG32_SOC15(GC, 0, mmCP_CPC_IC_BASE_CNTL, tmp);
2297 WREG32_SOC15(GC, 0, mmCP_CPC_IC_BASE_LO,
2298 adev->gfx.mec.mec_fw_gpu_addr & 0xFFFFF000);
2299 WREG32_SOC15(GC, 0, mmCP_CPC_IC_BASE_HI,
2300 upper_32_bits(adev->gfx.mec.mec_fw_gpu_addr));
2303 WREG32_SOC15(GC, 0, mmCP_MEC_ME1_UCODE_ADDR,
2304 mec_hdr->jt_offset);
2305 for (i = 0; i < mec_hdr->jt_size; i++)
2306 WREG32_SOC15(GC, 0, mmCP_MEC_ME1_UCODE_DATA,
2307 le32_to_cpup(fw_data + mec_hdr->jt_offset + i));
2309 WREG32_SOC15(GC, 0, mmCP_MEC_ME1_UCODE_ADDR,
2310 adev->gfx.mec_fw_version);
2311 /* Todo : Loading MEC2 firmware is only necessary if MEC2 should run different microcode than MEC1. */
2317 static void gfx_v9_0_kiq_setting(struct amdgpu_ring *ring)
2320 struct amdgpu_device *adev = ring->adev;
2322 /* tell RLC which is KIQ queue */
2323 tmp = RREG32_SOC15(GC, 0, mmRLC_CP_SCHEDULERS);
2325 tmp |= (ring->me << 5) | (ring->pipe << 3) | (ring->queue);
2326 WREG32_SOC15(GC, 0, mmRLC_CP_SCHEDULERS, tmp);
2328 WREG32_SOC15(GC, 0, mmRLC_CP_SCHEDULERS, tmp);
2331 static int gfx_v9_0_kiq_kcq_enable(struct amdgpu_device *adev)
2333 struct amdgpu_ring *kiq_ring = &adev->gfx.kiq.ring;
2334 uint32_t scratch, tmp = 0;
2335 uint64_t queue_mask = 0;
2338 for (i = 0; i < AMDGPU_MAX_COMPUTE_QUEUES; ++i) {
2339 if (!test_bit(i, adev->gfx.mec.queue_bitmap))
2342 /* This situation may be hit in the future if a new HW
2343 * generation exposes more than 64 queues. If so, the
2344 * definition of queue_mask needs updating */
2345 if (WARN_ON(i >= (sizeof(queue_mask)*8))) {
2346 DRM_ERROR("Invalid KCQ enabled: %d\n", i);
2350 queue_mask |= (1ull << i);
2353 r = amdgpu_gfx_scratch_get(adev, &scratch);
2355 DRM_ERROR("Failed to get scratch reg (%d).\n", r);
2358 WREG32(scratch, 0xCAFEDEAD);
2360 r = amdgpu_ring_alloc(kiq_ring, (7 * adev->gfx.num_compute_rings) + 11);
2362 DRM_ERROR("Failed to lock KIQ (%d).\n", r);
2363 amdgpu_gfx_scratch_free(adev, scratch);
2368 amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_SET_RESOURCES, 6));
2369 amdgpu_ring_write(kiq_ring, PACKET3_SET_RESOURCES_VMID_MASK(0) |
2370 PACKET3_SET_RESOURCES_QUEUE_TYPE(0)); /* vmid_mask:0 queue_type:0 (KIQ) */
2371 amdgpu_ring_write(kiq_ring, lower_32_bits(queue_mask)); /* queue mask lo */
2372 amdgpu_ring_write(kiq_ring, upper_32_bits(queue_mask)); /* queue mask hi */
2373 amdgpu_ring_write(kiq_ring, 0); /* gws mask lo */
2374 amdgpu_ring_write(kiq_ring, 0); /* gws mask hi */
2375 amdgpu_ring_write(kiq_ring, 0); /* oac mask */
2376 amdgpu_ring_write(kiq_ring, 0); /* gds heap base:0, gds heap size:0 */
2377 for (i = 0; i < adev->gfx.num_compute_rings; i++) {
2378 struct amdgpu_ring *ring = &adev->gfx.compute_ring[i];
2379 uint64_t mqd_addr = amdgpu_bo_gpu_offset(ring->mqd_obj);
2380 uint64_t wptr_addr = adev->wb.gpu_addr + (ring->wptr_offs * 4);
2382 amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_MAP_QUEUES, 5));
2383 /* Q_sel:0, vmid:0, vidmem: 1, engine:0, num_Q:1*/
2384 amdgpu_ring_write(kiq_ring, /* Q_sel: 0, vmid: 0, engine: 0, num_Q: 1 */
2385 PACKET3_MAP_QUEUES_QUEUE_SEL(0) | /* Queue_Sel */
2386 PACKET3_MAP_QUEUES_VMID(0) | /* VMID */
2387 PACKET3_MAP_QUEUES_QUEUE(ring->queue) |
2388 PACKET3_MAP_QUEUES_PIPE(ring->pipe) |
2389 PACKET3_MAP_QUEUES_ME((ring->me == 1 ? 0 : 1)) |
2390 PACKET3_MAP_QUEUES_QUEUE_TYPE(0) | /*queue_type: normal compute queue */
2391 PACKET3_MAP_QUEUES_ALLOC_FORMAT(1) | /* alloc format: all_on_one_pipe */
2392 PACKET3_MAP_QUEUES_ENGINE_SEL(0) | /* engine_sel: compute */
2393 PACKET3_MAP_QUEUES_NUM_QUEUES(1)); /* num_queues: must be 1 */
2394 amdgpu_ring_write(kiq_ring, PACKET3_MAP_QUEUES_DOORBELL_OFFSET(ring->doorbell_index));
2395 amdgpu_ring_write(kiq_ring, lower_32_bits(mqd_addr));
2396 amdgpu_ring_write(kiq_ring, upper_32_bits(mqd_addr));
2397 amdgpu_ring_write(kiq_ring, lower_32_bits(wptr_addr));
2398 amdgpu_ring_write(kiq_ring, upper_32_bits(wptr_addr));
2400 /* write to scratch for completion */
2401 amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_SET_UCONFIG_REG, 1));
2402 amdgpu_ring_write(kiq_ring, (scratch - PACKET3_SET_UCONFIG_REG_START));
2403 amdgpu_ring_write(kiq_ring, 0xDEADBEEF);
2404 amdgpu_ring_commit(kiq_ring);
2406 for (i = 0; i < adev->usec_timeout; i++) {
2407 tmp = RREG32(scratch);
2408 if (tmp == 0xDEADBEEF)
2412 if (i >= adev->usec_timeout) {
2413 DRM_ERROR("KCQ enable failed (scratch(0x%04X)=0x%08X)\n",
2417 amdgpu_gfx_scratch_free(adev, scratch);
2422 static int gfx_v9_0_mqd_init(struct amdgpu_ring *ring)
2424 struct amdgpu_device *adev = ring->adev;
2425 struct v9_mqd *mqd = ring->mqd_ptr;
2426 uint64_t hqd_gpu_addr, wb_gpu_addr, eop_base_addr;
2429 mqd->header = 0xC0310800;
2430 mqd->compute_pipelinestat_enable = 0x00000001;
2431 mqd->compute_static_thread_mgmt_se0 = 0xffffffff;
2432 mqd->compute_static_thread_mgmt_se1 = 0xffffffff;
2433 mqd->compute_static_thread_mgmt_se2 = 0xffffffff;
2434 mqd->compute_static_thread_mgmt_se3 = 0xffffffff;
2435 mqd->compute_misc_reserved = 0x00000003;
2437 eop_base_addr = ring->eop_gpu_addr >> 8;
2438 mqd->cp_hqd_eop_base_addr_lo = eop_base_addr;
2439 mqd->cp_hqd_eop_base_addr_hi = upper_32_bits(eop_base_addr);
2441 /* set the EOP size, register value is 2^(EOP_SIZE+1) dwords */
2442 tmp = RREG32_SOC15(GC, 0, mmCP_HQD_EOP_CONTROL);
2443 tmp = REG_SET_FIELD(tmp, CP_HQD_EOP_CONTROL, EOP_SIZE,
2444 (order_base_2(GFX9_MEC_HPD_SIZE / 4) - 1));
2446 mqd->cp_hqd_eop_control = tmp;
2448 /* enable doorbell? */
2449 tmp = RREG32_SOC15(GC, 0, mmCP_HQD_PQ_DOORBELL_CONTROL);
2451 if (ring->use_doorbell) {
2452 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
2453 DOORBELL_OFFSET, ring->doorbell_index);
2454 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
2456 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
2457 DOORBELL_SOURCE, 0);
2458 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
2462 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
2465 mqd->cp_hqd_pq_doorbell_control = tmp;
2467 /* disable the queue if it's active */
2469 mqd->cp_hqd_dequeue_request = 0;
2470 mqd->cp_hqd_pq_rptr = 0;
2471 mqd->cp_hqd_pq_wptr_lo = 0;
2472 mqd->cp_hqd_pq_wptr_hi = 0;
2474 /* set the pointer to the MQD */
2475 mqd->cp_mqd_base_addr_lo = ring->mqd_gpu_addr & 0xfffffffc;
2476 mqd->cp_mqd_base_addr_hi = upper_32_bits(ring->mqd_gpu_addr);
2478 /* set MQD vmid to 0 */
2479 tmp = RREG32_SOC15(GC, 0, mmCP_MQD_CONTROL);
2480 tmp = REG_SET_FIELD(tmp, CP_MQD_CONTROL, VMID, 0);
2481 mqd->cp_mqd_control = tmp;
2483 /* set the pointer to the HQD, this is similar CP_RB0_BASE/_HI */
2484 hqd_gpu_addr = ring->gpu_addr >> 8;
2485 mqd->cp_hqd_pq_base_lo = hqd_gpu_addr;
2486 mqd->cp_hqd_pq_base_hi = upper_32_bits(hqd_gpu_addr);
2488 /* set up the HQD, this is similar to CP_RB0_CNTL */
2489 tmp = RREG32_SOC15(GC, 0, mmCP_HQD_PQ_CONTROL);
2490 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, QUEUE_SIZE,
2491 (order_base_2(ring->ring_size / 4) - 1));
2492 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, RPTR_BLOCK_SIZE,
2493 ((order_base_2(AMDGPU_GPU_PAGE_SIZE / 4) - 1) << 8));
2495 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, ENDIAN_SWAP, 1);
2497 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, UNORD_DISPATCH, 0);
2498 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, ROQ_PQ_IB_FLIP, 0);
2499 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, PRIV_STATE, 1);
2500 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, KMD_QUEUE, 1);
2501 mqd->cp_hqd_pq_control = tmp;
2503 /* set the wb address whether it's enabled or not */
2504 wb_gpu_addr = adev->wb.gpu_addr + (ring->rptr_offs * 4);
2505 mqd->cp_hqd_pq_rptr_report_addr_lo = wb_gpu_addr & 0xfffffffc;
2506 mqd->cp_hqd_pq_rptr_report_addr_hi =
2507 upper_32_bits(wb_gpu_addr) & 0xffff;
2509 /* only used if CP_PQ_WPTR_POLL_CNTL.CP_PQ_WPTR_POLL_CNTL__EN_MASK=1 */
2510 wb_gpu_addr = adev->wb.gpu_addr + (ring->wptr_offs * 4);
2511 mqd->cp_hqd_pq_wptr_poll_addr_lo = wb_gpu_addr & 0xfffffffc;
2512 mqd->cp_hqd_pq_wptr_poll_addr_hi = upper_32_bits(wb_gpu_addr) & 0xffff;
2515 /* enable the doorbell if requested */
2516 if (ring->use_doorbell) {
2517 tmp = RREG32_SOC15(GC, 0, mmCP_HQD_PQ_DOORBELL_CONTROL);
2518 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
2519 DOORBELL_OFFSET, ring->doorbell_index);
2521 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
2523 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
2524 DOORBELL_SOURCE, 0);
2525 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
2529 mqd->cp_hqd_pq_doorbell_control = tmp;
2531 /* reset read and write pointers, similar to CP_RB0_WPTR/_RPTR */
2533 mqd->cp_hqd_pq_rptr = RREG32_SOC15(GC, 0, mmCP_HQD_PQ_RPTR);
2535 /* set the vmid for the queue */
2536 mqd->cp_hqd_vmid = 0;
2538 tmp = RREG32_SOC15(GC, 0, mmCP_HQD_PERSISTENT_STATE);
2539 tmp = REG_SET_FIELD(tmp, CP_HQD_PERSISTENT_STATE, PRELOAD_SIZE, 0x53);
2540 mqd->cp_hqd_persistent_state = tmp;
2542 /* set MIN_IB_AVAIL_SIZE */
2543 tmp = RREG32_SOC15(GC, 0, mmCP_HQD_IB_CONTROL);
2544 tmp = REG_SET_FIELD(tmp, CP_HQD_IB_CONTROL, MIN_IB_AVAIL_SIZE, 3);
2545 mqd->cp_hqd_ib_control = tmp;
2547 /* activate the queue */
2548 mqd->cp_hqd_active = 1;
2553 static int gfx_v9_0_kiq_init_register(struct amdgpu_ring *ring)
2555 struct amdgpu_device *adev = ring->adev;
2556 struct v9_mqd *mqd = ring->mqd_ptr;
2559 /* disable wptr polling */
2560 WREG32_FIELD15(GC, 0, CP_PQ_WPTR_POLL_CNTL, EN, 0);
2562 WREG32_SOC15(GC, 0, mmCP_HQD_EOP_BASE_ADDR,
2563 mqd->cp_hqd_eop_base_addr_lo);
2564 WREG32_SOC15(GC, 0, mmCP_HQD_EOP_BASE_ADDR_HI,
2565 mqd->cp_hqd_eop_base_addr_hi);
2567 /* set the EOP size, register value is 2^(EOP_SIZE+1) dwords */
2568 WREG32_SOC15(GC, 0, mmCP_HQD_EOP_CONTROL,
2569 mqd->cp_hqd_eop_control);
2571 /* enable doorbell? */
2572 WREG32_SOC15(GC, 0, mmCP_HQD_PQ_DOORBELL_CONTROL,
2573 mqd->cp_hqd_pq_doorbell_control);
2575 /* disable the queue if it's active */
2576 if (RREG32_SOC15(GC, 0, mmCP_HQD_ACTIVE) & 1) {
2577 WREG32_SOC15(GC, 0, mmCP_HQD_DEQUEUE_REQUEST, 1);
2578 for (j = 0; j < adev->usec_timeout; j++) {
2579 if (!(RREG32_SOC15(GC, 0, mmCP_HQD_ACTIVE) & 1))
2583 WREG32_SOC15(GC, 0, mmCP_HQD_DEQUEUE_REQUEST,
2584 mqd->cp_hqd_dequeue_request);
2585 WREG32_SOC15(GC, 0, mmCP_HQD_PQ_RPTR,
2586 mqd->cp_hqd_pq_rptr);
2587 WREG32_SOC15(GC, 0, mmCP_HQD_PQ_WPTR_LO,
2588 mqd->cp_hqd_pq_wptr_lo);
2589 WREG32_SOC15(GC, 0, mmCP_HQD_PQ_WPTR_HI,
2590 mqd->cp_hqd_pq_wptr_hi);
2593 /* set the pointer to the MQD */
2594 WREG32_SOC15(GC, 0, mmCP_MQD_BASE_ADDR,
2595 mqd->cp_mqd_base_addr_lo);
2596 WREG32_SOC15(GC, 0, mmCP_MQD_BASE_ADDR_HI,
2597 mqd->cp_mqd_base_addr_hi);
2599 /* set MQD vmid to 0 */
2600 WREG32_SOC15(GC, 0, mmCP_MQD_CONTROL,
2601 mqd->cp_mqd_control);
2603 /* set the pointer to the HQD, this is similar CP_RB0_BASE/_HI */
2604 WREG32_SOC15(GC, 0, mmCP_HQD_PQ_BASE,
2605 mqd->cp_hqd_pq_base_lo);
2606 WREG32_SOC15(GC, 0, mmCP_HQD_PQ_BASE_HI,
2607 mqd->cp_hqd_pq_base_hi);
2609 /* set up the HQD, this is similar to CP_RB0_CNTL */
2610 WREG32_SOC15(GC, 0, mmCP_HQD_PQ_CONTROL,
2611 mqd->cp_hqd_pq_control);
2613 /* set the wb address whether it's enabled or not */
2614 WREG32_SOC15(GC, 0, mmCP_HQD_PQ_RPTR_REPORT_ADDR,
2615 mqd->cp_hqd_pq_rptr_report_addr_lo);
2616 WREG32_SOC15(GC, 0, mmCP_HQD_PQ_RPTR_REPORT_ADDR_HI,
2617 mqd->cp_hqd_pq_rptr_report_addr_hi);
2619 /* only used if CP_PQ_WPTR_POLL_CNTL.CP_PQ_WPTR_POLL_CNTL__EN_MASK=1 */
2620 WREG32_SOC15(GC, 0, mmCP_HQD_PQ_WPTR_POLL_ADDR,
2621 mqd->cp_hqd_pq_wptr_poll_addr_lo);
2622 WREG32_SOC15(GC, 0, mmCP_HQD_PQ_WPTR_POLL_ADDR_HI,
2623 mqd->cp_hqd_pq_wptr_poll_addr_hi);
2625 /* enable the doorbell if requested */
2626 if (ring->use_doorbell) {
2627 WREG32_SOC15(GC, 0, mmCP_MEC_DOORBELL_RANGE_LOWER,
2628 (AMDGPU_DOORBELL64_KIQ *2) << 2);
2629 WREG32_SOC15(GC, 0, mmCP_MEC_DOORBELL_RANGE_UPPER,
2630 (AMDGPU_DOORBELL64_USERQUEUE_END * 2) << 2);
2633 WREG32_SOC15(GC, 0, mmCP_HQD_PQ_DOORBELL_CONTROL,
2634 mqd->cp_hqd_pq_doorbell_control);
2636 /* reset read and write pointers, similar to CP_RB0_WPTR/_RPTR */
2637 WREG32_SOC15(GC, 0, mmCP_HQD_PQ_WPTR_LO,
2638 mqd->cp_hqd_pq_wptr_lo);
2639 WREG32_SOC15(GC, 0, mmCP_HQD_PQ_WPTR_HI,
2640 mqd->cp_hqd_pq_wptr_hi);
2642 /* set the vmid for the queue */
2643 WREG32_SOC15(GC, 0, mmCP_HQD_VMID, mqd->cp_hqd_vmid);
2645 WREG32_SOC15(GC, 0, mmCP_HQD_PERSISTENT_STATE,
2646 mqd->cp_hqd_persistent_state);
2648 /* activate the queue */
2649 WREG32_SOC15(GC, 0, mmCP_HQD_ACTIVE,
2650 mqd->cp_hqd_active);
2652 if (ring->use_doorbell)
2653 WREG32_FIELD15(GC, 0, CP_PQ_STATUS, DOORBELL_ENABLE, 1);
2658 static int gfx_v9_0_kiq_init_queue(struct amdgpu_ring *ring)
2660 struct amdgpu_device *adev = ring->adev;
2661 struct v9_mqd *mqd = ring->mqd_ptr;
2662 int mqd_idx = AMDGPU_MAX_COMPUTE_RINGS;
2664 gfx_v9_0_kiq_setting(ring);
2666 if (adev->gfx.in_reset) { /* for GPU_RESET case */
2667 /* reset MQD to a clean status */
2668 if (adev->gfx.mec.mqd_backup[mqd_idx])
2669 memcpy(mqd, adev->gfx.mec.mqd_backup[mqd_idx], sizeof(*mqd));
2671 /* reset ring buffer */
2673 amdgpu_ring_clear_ring(ring);
2675 mutex_lock(&adev->srbm_mutex);
2676 soc15_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
2677 gfx_v9_0_kiq_init_register(ring);
2678 soc15_grbm_select(adev, 0, 0, 0, 0);
2679 mutex_unlock(&adev->srbm_mutex);
2681 memset((void *)mqd, 0, sizeof(*mqd));
2682 mutex_lock(&adev->srbm_mutex);
2683 soc15_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
2684 gfx_v9_0_mqd_init(ring);
2685 gfx_v9_0_kiq_init_register(ring);
2686 soc15_grbm_select(adev, 0, 0, 0, 0);
2687 mutex_unlock(&adev->srbm_mutex);
2689 if (adev->gfx.mec.mqd_backup[mqd_idx])
2690 memcpy(adev->gfx.mec.mqd_backup[mqd_idx], mqd, sizeof(*mqd));
2696 static int gfx_v9_0_kcq_init_queue(struct amdgpu_ring *ring)
2698 struct amdgpu_device *adev = ring->adev;
2699 struct v9_mqd *mqd = ring->mqd_ptr;
2700 int mqd_idx = ring - &adev->gfx.compute_ring[0];
2702 if (!adev->gfx.in_reset && !adev->gfx.in_suspend) {
2703 memset((void *)mqd, 0, sizeof(*mqd));
2704 mutex_lock(&adev->srbm_mutex);
2705 soc15_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
2706 gfx_v9_0_mqd_init(ring);
2707 soc15_grbm_select(adev, 0, 0, 0, 0);
2708 mutex_unlock(&adev->srbm_mutex);
2710 if (adev->gfx.mec.mqd_backup[mqd_idx])
2711 memcpy(adev->gfx.mec.mqd_backup[mqd_idx], mqd, sizeof(*mqd));
2712 } else if (adev->gfx.in_reset) { /* for GPU_RESET case */
2713 /* reset MQD to a clean status */
2714 if (adev->gfx.mec.mqd_backup[mqd_idx])
2715 memcpy(mqd, adev->gfx.mec.mqd_backup[mqd_idx], sizeof(*mqd));
2717 /* reset ring buffer */
2719 amdgpu_ring_clear_ring(ring);
2721 amdgpu_ring_clear_ring(ring);
2727 static int gfx_v9_0_kiq_resume(struct amdgpu_device *adev)
2729 struct amdgpu_ring *ring = NULL;
2732 gfx_v9_0_cp_compute_enable(adev, true);
2734 ring = &adev->gfx.kiq.ring;
2736 r = amdgpu_bo_reserve(ring->mqd_obj, false);
2737 if (unlikely(r != 0))
2740 r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr);
2742 r = gfx_v9_0_kiq_init_queue(ring);
2743 amdgpu_bo_kunmap(ring->mqd_obj);
2744 ring->mqd_ptr = NULL;
2746 amdgpu_bo_unreserve(ring->mqd_obj);
2750 for (i = 0; i < adev->gfx.num_compute_rings; i++) {
2751 ring = &adev->gfx.compute_ring[i];
2753 r = amdgpu_bo_reserve(ring->mqd_obj, false);
2754 if (unlikely(r != 0))
2756 r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr);
2758 r = gfx_v9_0_kcq_init_queue(ring);
2759 amdgpu_bo_kunmap(ring->mqd_obj);
2760 ring->mqd_ptr = NULL;
2762 amdgpu_bo_unreserve(ring->mqd_obj);
2767 r = gfx_v9_0_kiq_kcq_enable(adev);
2772 static int gfx_v9_0_cp_resume(struct amdgpu_device *adev)
2775 struct amdgpu_ring *ring;
2777 if (!(adev->flags & AMD_IS_APU))
2778 gfx_v9_0_enable_gui_idle_interrupt(adev, false);
2780 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
2781 /* legacy firmware loading */
2782 r = gfx_v9_0_cp_gfx_load_microcode(adev);
2786 r = gfx_v9_0_cp_compute_load_microcode(adev);
2791 r = gfx_v9_0_cp_gfx_resume(adev);
2795 r = gfx_v9_0_kiq_resume(adev);
2799 ring = &adev->gfx.gfx_ring[0];
2800 r = amdgpu_ring_test_ring(ring);
2802 ring->ready = false;
2806 ring = &adev->gfx.kiq.ring;
2808 r = amdgpu_ring_test_ring(ring);
2810 ring->ready = false;
2812 for (i = 0; i < adev->gfx.num_compute_rings; i++) {
2813 ring = &adev->gfx.compute_ring[i];
2816 r = amdgpu_ring_test_ring(ring);
2818 ring->ready = false;
2821 gfx_v9_0_enable_gui_idle_interrupt(adev, true);
2826 static void gfx_v9_0_cp_enable(struct amdgpu_device *adev, bool enable)
2828 gfx_v9_0_cp_gfx_enable(adev, enable);
2829 gfx_v9_0_cp_compute_enable(adev, enable);
2832 static int gfx_v9_0_hw_init(void *handle)
2835 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2837 gfx_v9_0_init_golden_registers(adev);
2839 gfx_v9_0_gpu_init(adev);
2841 r = gfx_v9_0_rlc_resume(adev);
2845 r = gfx_v9_0_cp_resume(adev);
2849 r = gfx_v9_0_ngg_en(adev);
2856 static int gfx_v9_0_hw_fini(void *handle)
2858 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2860 amdgpu_irq_put(adev, &adev->gfx.priv_reg_irq, 0);
2861 amdgpu_irq_put(adev, &adev->gfx.priv_inst_irq, 0);
2862 if (amdgpu_sriov_vf(adev)) {
2863 gfx_v9_0_cp_gfx_enable(adev, false);
2864 /* must disable polling for SRIOV when hw finished, otherwise
2865 * CPC engine may still keep fetching WB address which is already
2866 * invalid after sw finished and trigger DMAR reading error in
2869 WREG32_FIELD15(GC, 0, CP_PQ_WPTR_POLL_CNTL, EN, 0);
2872 gfx_v9_0_cp_enable(adev, false);
2873 gfx_v9_0_rlc_stop(adev);
2878 static int gfx_v9_0_suspend(void *handle)
2880 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2882 adev->gfx.in_suspend = true;
2883 return gfx_v9_0_hw_fini(adev);
2886 static int gfx_v9_0_resume(void *handle)
2888 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2891 r = gfx_v9_0_hw_init(adev);
2892 adev->gfx.in_suspend = false;
2896 static bool gfx_v9_0_is_idle(void *handle)
2898 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2900 if (REG_GET_FIELD(RREG32_SOC15(GC, 0, mmGRBM_STATUS),
2901 GRBM_STATUS, GUI_ACTIVE))
2907 static int gfx_v9_0_wait_for_idle(void *handle)
2911 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2913 for (i = 0; i < adev->usec_timeout; i++) {
2914 /* read MC_STATUS */
2915 tmp = RREG32_SOC15(GC, 0, mmGRBM_STATUS) &
2916 GRBM_STATUS__GUI_ACTIVE_MASK;
2918 if (!REG_GET_FIELD(tmp, GRBM_STATUS, GUI_ACTIVE))
2925 static int gfx_v9_0_soft_reset(void *handle)
2927 u32 grbm_soft_reset = 0;
2929 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2932 tmp = RREG32_SOC15(GC, 0, mmGRBM_STATUS);
2933 if (tmp & (GRBM_STATUS__PA_BUSY_MASK | GRBM_STATUS__SC_BUSY_MASK |
2934 GRBM_STATUS__BCI_BUSY_MASK | GRBM_STATUS__SX_BUSY_MASK |
2935 GRBM_STATUS__TA_BUSY_MASK | GRBM_STATUS__VGT_BUSY_MASK |
2936 GRBM_STATUS__DB_BUSY_MASK | GRBM_STATUS__CB_BUSY_MASK |
2937 GRBM_STATUS__GDS_BUSY_MASK | GRBM_STATUS__SPI_BUSY_MASK |
2938 GRBM_STATUS__IA_BUSY_MASK | GRBM_STATUS__IA_BUSY_NO_DMA_MASK)) {
2939 grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset,
2940 GRBM_SOFT_RESET, SOFT_RESET_CP, 1);
2941 grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset,
2942 GRBM_SOFT_RESET, SOFT_RESET_GFX, 1);
2945 if (tmp & (GRBM_STATUS__CP_BUSY_MASK | GRBM_STATUS__CP_COHERENCY_BUSY_MASK)) {
2946 grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset,
2947 GRBM_SOFT_RESET, SOFT_RESET_CP, 1);
2951 tmp = RREG32_SOC15(GC, 0, mmGRBM_STATUS2);
2952 if (REG_GET_FIELD(tmp, GRBM_STATUS2, RLC_BUSY))
2953 grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset,
2954 GRBM_SOFT_RESET, SOFT_RESET_RLC, 1);
2957 if (grbm_soft_reset) {
2959 gfx_v9_0_rlc_stop(adev);
2961 /* Disable GFX parsing/prefetching */
2962 gfx_v9_0_cp_gfx_enable(adev, false);
2964 /* Disable MEC parsing/prefetching */
2965 gfx_v9_0_cp_compute_enable(adev, false);
2967 if (grbm_soft_reset) {
2968 tmp = RREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET);
2969 tmp |= grbm_soft_reset;
2970 dev_info(adev->dev, "GRBM_SOFT_RESET=0x%08X\n", tmp);
2971 WREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET, tmp);
2972 tmp = RREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET);
2976 tmp &= ~grbm_soft_reset;
2977 WREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET, tmp);
2978 tmp = RREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET);
2981 /* Wait a little for things to settle down */
2987 static uint64_t gfx_v9_0_get_gpu_clock_counter(struct amdgpu_device *adev)
2991 mutex_lock(&adev->gfx.gpu_clock_mutex);
2992 WREG32_SOC15(GC, 0, mmRLC_CAPTURE_GPU_CLOCK_COUNT, 1);
2993 clock = (uint64_t)RREG32_SOC15(GC, 0, mmRLC_GPU_CLOCK_COUNT_LSB) |
2994 ((uint64_t)RREG32_SOC15(GC, 0, mmRLC_GPU_CLOCK_COUNT_MSB) << 32ULL);
2995 mutex_unlock(&adev->gfx.gpu_clock_mutex);
2999 static void gfx_v9_0_ring_emit_gds_switch(struct amdgpu_ring *ring,
3001 uint32_t gds_base, uint32_t gds_size,
3002 uint32_t gws_base, uint32_t gws_size,
3003 uint32_t oa_base, uint32_t oa_size)
3005 gds_base = gds_base >> AMDGPU_GDS_SHIFT;
3006 gds_size = gds_size >> AMDGPU_GDS_SHIFT;
3008 gws_base = gws_base >> AMDGPU_GWS_SHIFT;
3009 gws_size = gws_size >> AMDGPU_GWS_SHIFT;
3011 oa_base = oa_base >> AMDGPU_OA_SHIFT;
3012 oa_size = oa_size >> AMDGPU_OA_SHIFT;
3015 gfx_v9_0_write_data_to_reg(ring, 0, false,
3016 amdgpu_gds_reg_offset[vmid].mem_base,
3020 gfx_v9_0_write_data_to_reg(ring, 0, false,
3021 amdgpu_gds_reg_offset[vmid].mem_size,
3025 gfx_v9_0_write_data_to_reg(ring, 0, false,
3026 amdgpu_gds_reg_offset[vmid].gws,
3027 gws_size << GDS_GWS_VMID0__SIZE__SHIFT | gws_base);
3030 gfx_v9_0_write_data_to_reg(ring, 0, false,
3031 amdgpu_gds_reg_offset[vmid].oa,
3032 (1 << (oa_size + oa_base)) - (1 << oa_base));
3035 static int gfx_v9_0_early_init(void *handle)
3037 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3039 adev->gfx.num_gfx_rings = GFX9_NUM_GFX_RINGS;
3040 adev->gfx.num_compute_rings = AMDGPU_MAX_COMPUTE_RINGS;
3041 gfx_v9_0_set_ring_funcs(adev);
3042 gfx_v9_0_set_irq_funcs(adev);
3043 gfx_v9_0_set_gds_init(adev);
3044 gfx_v9_0_set_rlc_funcs(adev);
3049 static int gfx_v9_0_late_init(void *handle)
3051 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3054 r = amdgpu_irq_get(adev, &adev->gfx.priv_reg_irq, 0);
3058 r = amdgpu_irq_get(adev, &adev->gfx.priv_inst_irq, 0);
3065 static void gfx_v9_0_enter_rlc_safe_mode(struct amdgpu_device *adev)
3067 uint32_t rlc_setting, data;
3070 if (adev->gfx.rlc.in_safe_mode)
3073 /* if RLC is not enabled, do nothing */
3074 rlc_setting = RREG32_SOC15(GC, 0, mmRLC_CNTL);
3075 if (!(rlc_setting & RLC_CNTL__RLC_ENABLE_F32_MASK))
3078 if (adev->cg_flags &
3079 (AMD_CG_SUPPORT_GFX_CGCG | AMD_CG_SUPPORT_GFX_MGCG |
3080 AMD_CG_SUPPORT_GFX_3D_CGCG)) {
3081 data = RLC_SAFE_MODE__CMD_MASK;
3082 data |= (1 << RLC_SAFE_MODE__MESSAGE__SHIFT);
3083 WREG32_SOC15(GC, 0, mmRLC_SAFE_MODE, data);
3085 /* wait for RLC_SAFE_MODE */
3086 for (i = 0; i < adev->usec_timeout; i++) {
3087 if (!REG_GET_FIELD(RREG32_SOC15(GC, 0, mmRLC_SAFE_MODE), RLC_SAFE_MODE, CMD))
3091 adev->gfx.rlc.in_safe_mode = true;
3095 static void gfx_v9_0_exit_rlc_safe_mode(struct amdgpu_device *adev)
3097 uint32_t rlc_setting, data;
3099 if (!adev->gfx.rlc.in_safe_mode)
3102 /* if RLC is not enabled, do nothing */
3103 rlc_setting = RREG32_SOC15(GC, 0, mmRLC_CNTL);
3104 if (!(rlc_setting & RLC_CNTL__RLC_ENABLE_F32_MASK))
3107 if (adev->cg_flags &
3108 (AMD_CG_SUPPORT_GFX_CGCG | AMD_CG_SUPPORT_GFX_MGCG)) {
3110 * Try to exit safe mode only if it is already in safe
3113 data = RLC_SAFE_MODE__CMD_MASK;
3114 WREG32_SOC15(GC, 0, mmRLC_SAFE_MODE, data);
3115 adev->gfx.rlc.in_safe_mode = false;
3119 static void gfx_v9_0_update_gfx_cg_power_gating(struct amdgpu_device *adev,
3122 /* TODO: double check if we need to perform under safe mdoe */
3123 /* gfx_v9_0_enter_rlc_safe_mode(adev); */
3125 if ((adev->pg_flags & AMD_PG_SUPPORT_GFX_PG) && enable) {
3126 gfx_v9_0_enable_gfx_cg_power_gating(adev, true);
3127 if (adev->pg_flags & AMD_PG_SUPPORT_GFX_PIPELINE)
3128 gfx_v9_0_enable_gfx_pipeline_powergating(adev, true);
3130 gfx_v9_0_enable_gfx_cg_power_gating(adev, false);
3131 gfx_v9_0_enable_gfx_pipeline_powergating(adev, false);
3134 /* gfx_v9_0_exit_rlc_safe_mode(adev); */
3137 static void gfx_v9_0_update_gfx_mg_power_gating(struct amdgpu_device *adev,
3140 /* TODO: double check if we need to perform under safe mode */
3141 /* gfx_v9_0_enter_rlc_safe_mode(adev); */
3143 if ((adev->pg_flags & AMD_PG_SUPPORT_GFX_SMG) && enable)
3144 gfx_v9_0_enable_gfx_static_mg_power_gating(adev, true);
3146 gfx_v9_0_enable_gfx_static_mg_power_gating(adev, false);
3148 if ((adev->pg_flags & AMD_PG_SUPPORT_GFX_DMG) && enable)
3149 gfx_v9_0_enable_gfx_dynamic_mg_power_gating(adev, true);
3151 gfx_v9_0_enable_gfx_dynamic_mg_power_gating(adev, false);
3153 /* gfx_v9_0_exit_rlc_safe_mode(adev); */
3156 static void gfx_v9_0_update_medium_grain_clock_gating(struct amdgpu_device *adev,
3161 /* It is disabled by HW by default */
3162 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGCG)) {
3163 /* 1 - RLC_CGTT_MGCG_OVERRIDE */
3164 def = data = RREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE);
3165 data &= ~(RLC_CGTT_MGCG_OVERRIDE__CPF_CGTT_SCLK_OVERRIDE_MASK |
3166 RLC_CGTT_MGCG_OVERRIDE__GRBM_CGTT_SCLK_OVERRIDE_MASK |
3167 RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGCG_OVERRIDE_MASK |
3168 RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGLS_OVERRIDE_MASK);
3170 /* only for Vega10 & Raven1 */
3171 data |= RLC_CGTT_MGCG_OVERRIDE__RLC_CGTT_SCLK_OVERRIDE_MASK;
3174 WREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE, data);
3176 /* MGLS is a global flag to control all MGLS in GFX */
3177 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGLS) {
3178 /* 2 - RLC memory Light sleep */
3179 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_RLC_LS) {
3180 def = data = RREG32_SOC15(GC, 0, mmRLC_MEM_SLP_CNTL);
3181 data |= RLC_MEM_SLP_CNTL__RLC_MEM_LS_EN_MASK;
3183 WREG32_SOC15(GC, 0, mmRLC_MEM_SLP_CNTL, data);
3185 /* 3 - CP memory Light sleep */
3186 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CP_LS) {
3187 def = data = RREG32_SOC15(GC, 0, mmCP_MEM_SLP_CNTL);
3188 data |= CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK;
3190 WREG32_SOC15(GC, 0, mmCP_MEM_SLP_CNTL, data);
3194 /* 1 - MGCG_OVERRIDE */
3195 def = data = RREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE);
3196 data |= (RLC_CGTT_MGCG_OVERRIDE__CPF_CGTT_SCLK_OVERRIDE_MASK |
3197 RLC_CGTT_MGCG_OVERRIDE__RLC_CGTT_SCLK_OVERRIDE_MASK |
3198 RLC_CGTT_MGCG_OVERRIDE__GRBM_CGTT_SCLK_OVERRIDE_MASK |
3199 RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGCG_OVERRIDE_MASK |
3200 RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGLS_OVERRIDE_MASK);
3202 WREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE, data);
3204 /* 2 - disable MGLS in RLC */
3205 data = RREG32_SOC15(GC, 0, mmRLC_MEM_SLP_CNTL);
3206 if (data & RLC_MEM_SLP_CNTL__RLC_MEM_LS_EN_MASK) {
3207 data &= ~RLC_MEM_SLP_CNTL__RLC_MEM_LS_EN_MASK;
3208 WREG32_SOC15(GC, 0, mmRLC_MEM_SLP_CNTL, data);
3211 /* 3 - disable MGLS in CP */
3212 data = RREG32_SOC15(GC, 0, mmCP_MEM_SLP_CNTL);
3213 if (data & CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK) {
3214 data &= ~CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK;
3215 WREG32_SOC15(GC, 0, mmCP_MEM_SLP_CNTL, data);
3220 static void gfx_v9_0_update_3d_clock_gating(struct amdgpu_device *adev,
3225 adev->gfx.rlc.funcs->enter_safe_mode(adev);
3227 /* Enable 3D CGCG/CGLS */
3228 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_3D_CGCG)) {
3229 /* write cmd to clear cgcg/cgls ov */
3230 def = data = RREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE);
3231 /* unset CGCG override */
3232 data &= ~RLC_CGTT_MGCG_OVERRIDE__GFXIP_GFX3D_CG_OVERRIDE_MASK;
3233 /* update CGCG and CGLS override bits */
3235 WREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE, data);
3236 /* enable 3Dcgcg FSM(0x0020003f) */
3237 def = RREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL_3D);
3238 data = (0x2000 << RLC_CGCG_CGLS_CTRL_3D__CGCG_GFX_IDLE_THRESHOLD__SHIFT) |
3239 RLC_CGCG_CGLS_CTRL_3D__CGCG_EN_MASK;
3240 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_3D_CGLS)
3241 data |= (0x000F << RLC_CGCG_CGLS_CTRL_3D__CGLS_REP_COMPANSAT_DELAY__SHIFT) |
3242 RLC_CGCG_CGLS_CTRL_3D__CGLS_EN_MASK;
3244 WREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL_3D, data);
3246 /* set IDLE_POLL_COUNT(0x00900100) */
3247 def = RREG32_SOC15(GC, 0, mmCP_RB_WPTR_POLL_CNTL);
3248 data = (0x0100 << CP_RB_WPTR_POLL_CNTL__POLL_FREQUENCY__SHIFT) |
3249 (0x0090 << CP_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT);
3251 WREG32_SOC15(GC, 0, mmCP_RB_WPTR_POLL_CNTL, data);
3253 /* Disable CGCG/CGLS */
3254 def = data = RREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL_3D);
3255 /* disable cgcg, cgls should be disabled */
3256 data &= ~(RLC_CGCG_CGLS_CTRL_3D__CGCG_EN_MASK |
3257 RLC_CGCG_CGLS_CTRL_3D__CGLS_EN_MASK);
3258 /* disable cgcg and cgls in FSM */
3260 WREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL_3D, data);
3263 adev->gfx.rlc.funcs->exit_safe_mode(adev);
3266 static void gfx_v9_0_update_coarse_grain_clock_gating(struct amdgpu_device *adev,
3271 adev->gfx.rlc.funcs->enter_safe_mode(adev);
3273 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGCG)) {
3274 def = data = RREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE);
3275 /* unset CGCG override */
3276 data &= ~RLC_CGTT_MGCG_OVERRIDE__GFXIP_CGCG_OVERRIDE_MASK;
3277 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGLS)
3278 data &= ~RLC_CGTT_MGCG_OVERRIDE__GFXIP_CGLS_OVERRIDE_MASK;
3280 data |= RLC_CGTT_MGCG_OVERRIDE__GFXIP_CGLS_OVERRIDE_MASK;
3281 /* update CGCG and CGLS override bits */
3283 WREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE, data);
3285 /* enable cgcg FSM(0x0020003F) */
3286 def = RREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL);
3287 data = (0x2000 << RLC_CGCG_CGLS_CTRL__CGCG_GFX_IDLE_THRESHOLD__SHIFT) |
3288 RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK;
3289 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGLS)
3290 data |= (0x000F << RLC_CGCG_CGLS_CTRL__CGLS_REP_COMPANSAT_DELAY__SHIFT) |
3291 RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK;
3293 WREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL, data);
3295 /* set IDLE_POLL_COUNT(0x00900100) */
3296 def = RREG32_SOC15(GC, 0, mmCP_RB_WPTR_POLL_CNTL);
3297 data = (0x0100 << CP_RB_WPTR_POLL_CNTL__POLL_FREQUENCY__SHIFT) |
3298 (0x0090 << CP_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT);
3300 WREG32_SOC15(GC, 0, mmCP_RB_WPTR_POLL_CNTL, data);
3302 def = data = RREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL);
3303 /* reset CGCG/CGLS bits */
3304 data &= ~(RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK | RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK);
3305 /* disable cgcg and cgls in FSM */
3307 WREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL, data);
3310 adev->gfx.rlc.funcs->exit_safe_mode(adev);
3313 static int gfx_v9_0_update_gfx_clock_gating(struct amdgpu_device *adev,
3317 /* CGCG/CGLS should be enabled after MGCG/MGLS
3318 * === MGCG + MGLS ===
3320 gfx_v9_0_update_medium_grain_clock_gating(adev, enable);
3321 /* === CGCG /CGLS for GFX 3D Only === */
3322 gfx_v9_0_update_3d_clock_gating(adev, enable);
3323 /* === CGCG + CGLS === */
3324 gfx_v9_0_update_coarse_grain_clock_gating(adev, enable);
3326 /* CGCG/CGLS should be disabled before MGCG/MGLS
3327 * === CGCG + CGLS ===
3329 gfx_v9_0_update_coarse_grain_clock_gating(adev, enable);
3330 /* === CGCG /CGLS for GFX 3D Only === */
3331 gfx_v9_0_update_3d_clock_gating(adev, enable);
3332 /* === MGCG + MGLS === */
3333 gfx_v9_0_update_medium_grain_clock_gating(adev, enable);
3338 static const struct amdgpu_rlc_funcs gfx_v9_0_rlc_funcs = {
3339 .enter_safe_mode = gfx_v9_0_enter_rlc_safe_mode,
3340 .exit_safe_mode = gfx_v9_0_exit_rlc_safe_mode
3343 static int gfx_v9_0_set_powergating_state(void *handle,
3344 enum amd_powergating_state state)
3346 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3347 bool enable = (state == AMD_PG_STATE_GATE) ? true : false;
3349 switch (adev->asic_type) {
3351 if (adev->pg_flags & AMD_PG_SUPPORT_RLC_SMU_HS) {
3352 gfx_v9_0_enable_sck_slow_down_on_power_up(adev, true);
3353 gfx_v9_0_enable_sck_slow_down_on_power_down(adev, true);
3355 gfx_v9_0_enable_sck_slow_down_on_power_up(adev, false);
3356 gfx_v9_0_enable_sck_slow_down_on_power_down(adev, false);
3359 if (adev->pg_flags & AMD_PG_SUPPORT_CP)
3360 gfx_v9_0_enable_cp_power_gating(adev, true);
3362 gfx_v9_0_enable_cp_power_gating(adev, false);
3364 /* update gfx cgpg state */
3365 gfx_v9_0_update_gfx_cg_power_gating(adev, enable);
3367 /* update mgcg state */
3368 gfx_v9_0_update_gfx_mg_power_gating(adev, enable);
3377 static int gfx_v9_0_set_clockgating_state(void *handle,
3378 enum amd_clockgating_state state)
3380 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3382 if (amdgpu_sriov_vf(adev))
3385 switch (adev->asic_type) {
3388 gfx_v9_0_update_gfx_clock_gating(adev,
3389 state == AMD_CG_STATE_GATE ? true : false);
3397 static void gfx_v9_0_get_clockgating_state(void *handle, u32 *flags)
3399 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3402 if (amdgpu_sriov_vf(adev))
3405 /* AMD_CG_SUPPORT_GFX_MGCG */
3406 data = RREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE);
3407 if (!(data & RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGCG_OVERRIDE_MASK))
3408 *flags |= AMD_CG_SUPPORT_GFX_MGCG;
3410 /* AMD_CG_SUPPORT_GFX_CGCG */
3411 data = RREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL);
3412 if (data & RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK)
3413 *flags |= AMD_CG_SUPPORT_GFX_CGCG;
3415 /* AMD_CG_SUPPORT_GFX_CGLS */
3416 if (data & RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK)
3417 *flags |= AMD_CG_SUPPORT_GFX_CGLS;
3419 /* AMD_CG_SUPPORT_GFX_RLC_LS */
3420 data = RREG32_SOC15(GC, 0, mmRLC_MEM_SLP_CNTL);
3421 if (data & RLC_MEM_SLP_CNTL__RLC_MEM_LS_EN_MASK)
3422 *flags |= AMD_CG_SUPPORT_GFX_RLC_LS | AMD_CG_SUPPORT_GFX_MGLS;
3424 /* AMD_CG_SUPPORT_GFX_CP_LS */
3425 data = RREG32_SOC15(GC, 0, mmCP_MEM_SLP_CNTL);
3426 if (data & CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK)
3427 *flags |= AMD_CG_SUPPORT_GFX_CP_LS | AMD_CG_SUPPORT_GFX_MGLS;
3429 /* AMD_CG_SUPPORT_GFX_3D_CGCG */
3430 data = RREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL_3D);
3431 if (data & RLC_CGCG_CGLS_CTRL_3D__CGCG_EN_MASK)
3432 *flags |= AMD_CG_SUPPORT_GFX_3D_CGCG;
3434 /* AMD_CG_SUPPORT_GFX_3D_CGLS */
3435 if (data & RLC_CGCG_CGLS_CTRL_3D__CGLS_EN_MASK)
3436 *flags |= AMD_CG_SUPPORT_GFX_3D_CGLS;
3439 static u64 gfx_v9_0_ring_get_rptr_gfx(struct amdgpu_ring *ring)
3441 return ring->adev->wb.wb[ring->rptr_offs]; /* gfx9 is 32bit rptr*/
3444 static u64 gfx_v9_0_ring_get_wptr_gfx(struct amdgpu_ring *ring)
3446 struct amdgpu_device *adev = ring->adev;
3449 /* XXX check if swapping is necessary on BE */
3450 if (ring->use_doorbell) {
3451 wptr = atomic64_read((atomic64_t *)&adev->wb.wb[ring->wptr_offs]);
3453 wptr = RREG32_SOC15(GC, 0, mmCP_RB0_WPTR);
3454 wptr += (u64)RREG32_SOC15(GC, 0, mmCP_RB0_WPTR_HI) << 32;
3460 static void gfx_v9_0_ring_set_wptr_gfx(struct amdgpu_ring *ring)
3462 struct amdgpu_device *adev = ring->adev;
3464 if (ring->use_doorbell) {
3465 /* XXX check if swapping is necessary on BE */
3466 atomic64_set((atomic64_t*)&adev->wb.wb[ring->wptr_offs], ring->wptr);
3467 WDOORBELL64(ring->doorbell_index, ring->wptr);
3469 WREG32_SOC15(GC, 0, mmCP_RB0_WPTR, lower_32_bits(ring->wptr));
3470 WREG32_SOC15(GC, 0, mmCP_RB0_WPTR_HI, upper_32_bits(ring->wptr));
3474 static void gfx_v9_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
3476 u32 ref_and_mask, reg_mem_engine;
3477 struct nbio_hdp_flush_reg *nbio_hf_reg;
3479 if (ring->adev->asic_type == CHIP_VEGA10)
3480 nbio_hf_reg = &nbio_v6_1_hdp_flush_reg;
3482 if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE) {
3485 ref_and_mask = nbio_hf_reg->ref_and_mask_cp2 << ring->pipe;
3488 ref_and_mask = nbio_hf_reg->ref_and_mask_cp6 << ring->pipe;
3495 ref_and_mask = nbio_hf_reg->ref_and_mask_cp0;
3496 reg_mem_engine = 1; /* pfp */
3499 gfx_v9_0_wait_reg_mem(ring, reg_mem_engine, 0, 1,
3500 nbio_hf_reg->hdp_flush_req_offset,
3501 nbio_hf_reg->hdp_flush_done_offset,
3502 ref_and_mask, ref_and_mask, 0x20);
3505 static void gfx_v9_0_ring_emit_hdp_invalidate(struct amdgpu_ring *ring)
3507 gfx_v9_0_write_data_to_reg(ring, 0, true,
3508 SOC15_REG_OFFSET(HDP, 0, mmHDP_DEBUG0), 1);
3511 static void gfx_v9_0_ring_emit_ib_gfx(struct amdgpu_ring *ring,
3512 struct amdgpu_ib *ib,
3513 unsigned vm_id, bool ctx_switch)
3515 u32 header, control = 0;
3517 if (ib->flags & AMDGPU_IB_FLAG_CE)
3518 header = PACKET3(PACKET3_INDIRECT_BUFFER_CONST, 2);
3520 header = PACKET3(PACKET3_INDIRECT_BUFFER, 2);
3522 control |= ib->length_dw | (vm_id << 24);
3524 if (amdgpu_sriov_vf(ring->adev) && (ib->flags & AMDGPU_IB_FLAG_PREEMPT)) {
3525 control |= INDIRECT_BUFFER_PRE_ENB(1);
3527 if (!(ib->flags & AMDGPU_IB_FLAG_CE))
3528 gfx_v9_0_ring_emit_de_meta(ring);
3531 amdgpu_ring_write(ring, header);
3532 BUG_ON(ib->gpu_addr & 0x3); /* Dword align */
3533 amdgpu_ring_write(ring,
3537 lower_32_bits(ib->gpu_addr));
3538 amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
3539 amdgpu_ring_write(ring, control);
3542 static void gfx_v9_0_ring_emit_ib_compute(struct amdgpu_ring *ring,
3543 struct amdgpu_ib *ib,
3544 unsigned vm_id, bool ctx_switch)
3546 u32 control = INDIRECT_BUFFER_VALID | ib->length_dw | (vm_id << 24);
3548 amdgpu_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
3549 BUG_ON(ib->gpu_addr & 0x3); /* Dword align */
3550 amdgpu_ring_write(ring,
3554 lower_32_bits(ib->gpu_addr));
3555 amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
3556 amdgpu_ring_write(ring, control);
3559 static void gfx_v9_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr,
3560 u64 seq, unsigned flags)
3562 bool write64bit = flags & AMDGPU_FENCE_FLAG_64BIT;
3563 bool int_sel = flags & AMDGPU_FENCE_FLAG_INT;
3565 /* RELEASE_MEM - flush caches, send int */
3566 amdgpu_ring_write(ring, PACKET3(PACKET3_RELEASE_MEM, 6));
3567 amdgpu_ring_write(ring, (EOP_TCL1_ACTION_EN |
3569 EOP_TC_WB_ACTION_EN |
3570 EOP_TC_MD_ACTION_EN |
3571 EVENT_TYPE(CACHE_FLUSH_AND_INV_TS_EVENT) |
3573 amdgpu_ring_write(ring, DATA_SEL(write64bit ? 2 : 1) | INT_SEL(int_sel ? 2 : 0));
3576 * the address should be Qword aligned if 64bit write, Dword
3577 * aligned if only send 32bit data low (discard data high)
3583 amdgpu_ring_write(ring, lower_32_bits(addr));
3584 amdgpu_ring_write(ring, upper_32_bits(addr));
3585 amdgpu_ring_write(ring, lower_32_bits(seq));
3586 amdgpu_ring_write(ring, upper_32_bits(seq));
3587 amdgpu_ring_write(ring, 0);
3590 static void gfx_v9_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
3592 int usepfp = (ring->funcs->type == AMDGPU_RING_TYPE_GFX);
3593 uint32_t seq = ring->fence_drv.sync_seq;
3594 uint64_t addr = ring->fence_drv.gpu_addr;
3596 gfx_v9_0_wait_reg_mem(ring, usepfp, 1, 0,
3597 lower_32_bits(addr), upper_32_bits(addr),
3598 seq, 0xffffffff, 4);
3601 static void gfx_v9_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
3602 unsigned vm_id, uint64_t pd_addr)
3604 struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub];
3605 int usepfp = (ring->funcs->type == AMDGPU_RING_TYPE_GFX);
3606 uint32_t req = ring->adev->gart.gart_funcs->get_invalidate_req(vm_id);
3607 unsigned eng = ring->vm_inv_eng;
3609 pd_addr = amdgpu_gart_get_vm_pde(ring->adev, pd_addr);
3610 pd_addr |= AMDGPU_PTE_VALID;
3612 gfx_v9_0_write_data_to_reg(ring, usepfp, true,
3613 hub->ctx0_ptb_addr_lo32 + (2 * vm_id),
3614 lower_32_bits(pd_addr));
3616 gfx_v9_0_write_data_to_reg(ring, usepfp, true,
3617 hub->ctx0_ptb_addr_hi32 + (2 * vm_id),
3618 upper_32_bits(pd_addr));
3620 gfx_v9_0_write_data_to_reg(ring, usepfp, true,
3621 hub->vm_inv_eng0_req + eng, req);
3623 /* wait for the invalidate to complete */
3624 gfx_v9_0_wait_reg_mem(ring, 0, 0, 0, hub->vm_inv_eng0_ack +
3625 eng, 0, 1 << vm_id, 1 << vm_id, 0x20);
3627 /* compute doesn't have PFP */
3629 /* sync PFP to ME, otherwise we might get invalid PFP reads */
3630 amdgpu_ring_write(ring, PACKET3(PACKET3_PFP_SYNC_ME, 0));
3631 amdgpu_ring_write(ring, 0x0);
3635 static u64 gfx_v9_0_ring_get_rptr_compute(struct amdgpu_ring *ring)
3637 return ring->adev->wb.wb[ring->rptr_offs]; /* gfx9 hardware is 32bit rptr */
3640 static u64 gfx_v9_0_ring_get_wptr_compute(struct amdgpu_ring *ring)
3644 /* XXX check if swapping is necessary on BE */
3645 if (ring->use_doorbell)
3646 wptr = atomic64_read((atomic64_t *)&ring->adev->wb.wb[ring->wptr_offs]);
3652 static void gfx_v9_0_ring_set_wptr_compute(struct amdgpu_ring *ring)
3654 struct amdgpu_device *adev = ring->adev;
3656 /* XXX check if swapping is necessary on BE */
3657 if (ring->use_doorbell) {
3658 atomic64_set((atomic64_t*)&adev->wb.wb[ring->wptr_offs], ring->wptr);
3659 WDOORBELL64(ring->doorbell_index, ring->wptr);
3661 BUG(); /* only DOORBELL method supported on gfx9 now */
3665 static void gfx_v9_0_ring_emit_fence_kiq(struct amdgpu_ring *ring, u64 addr,
3666 u64 seq, unsigned int flags)
3668 /* we only allocate 32bit for each seq wb address */
3669 BUG_ON(flags & AMDGPU_FENCE_FLAG_64BIT);
3671 /* write fence seq to the "addr" */
3672 amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
3673 amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
3674 WRITE_DATA_DST_SEL(5) | WR_CONFIRM));
3675 amdgpu_ring_write(ring, lower_32_bits(addr));
3676 amdgpu_ring_write(ring, upper_32_bits(addr));
3677 amdgpu_ring_write(ring, lower_32_bits(seq));
3679 if (flags & AMDGPU_FENCE_FLAG_INT) {
3680 /* set register to trigger INT */
3681 amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
3682 amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
3683 WRITE_DATA_DST_SEL(0) | WR_CONFIRM));
3684 amdgpu_ring_write(ring, SOC15_REG_OFFSET(GC, 0, mmCPC_INT_STATUS));
3685 amdgpu_ring_write(ring, 0);
3686 amdgpu_ring_write(ring, 0x20000000); /* src_id is 178 */
3690 static void gfx_v9_ring_emit_sb(struct amdgpu_ring *ring)
3692 amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0));
3693 amdgpu_ring_write(ring, 0);
3696 static void gfx_v9_0_ring_emit_ce_meta(struct amdgpu_ring *ring)
3698 static struct v9_ce_ib_state ce_payload = {0};
3702 cnt = (sizeof(ce_payload) >> 2) + 4 - 2;
3703 csa_addr = AMDGPU_VA_RESERVED_SIZE - 2 * 4096;
3705 amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, cnt));
3706 amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(2) |
3707 WRITE_DATA_DST_SEL(8) |
3709 WRITE_DATA_CACHE_POLICY(0));
3710 amdgpu_ring_write(ring, lower_32_bits(csa_addr + offsetof(struct v9_gfx_meta_data, ce_payload)));
3711 amdgpu_ring_write(ring, upper_32_bits(csa_addr + offsetof(struct v9_gfx_meta_data, ce_payload)));
3712 amdgpu_ring_write_multiple(ring, (void *)&ce_payload, sizeof(ce_payload) >> 2);
3715 static void gfx_v9_0_ring_emit_de_meta(struct amdgpu_ring *ring)
3717 static struct v9_de_ib_state de_payload = {0};
3718 uint64_t csa_addr, gds_addr;
3721 csa_addr = AMDGPU_VA_RESERVED_SIZE - 2 * 4096;
3722 gds_addr = csa_addr + 4096;
3723 de_payload.gds_backup_addrlo = lower_32_bits(gds_addr);
3724 de_payload.gds_backup_addrhi = upper_32_bits(gds_addr);
3726 cnt = (sizeof(de_payload) >> 2) + 4 - 2;
3727 amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, cnt));
3728 amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(1) |
3729 WRITE_DATA_DST_SEL(8) |
3731 WRITE_DATA_CACHE_POLICY(0));
3732 amdgpu_ring_write(ring, lower_32_bits(csa_addr + offsetof(struct v9_gfx_meta_data, de_payload)));
3733 amdgpu_ring_write(ring, upper_32_bits(csa_addr + offsetof(struct v9_gfx_meta_data, de_payload)));
3734 amdgpu_ring_write_multiple(ring, (void *)&de_payload, sizeof(de_payload) >> 2);
3737 static void gfx_v9_ring_emit_cntxcntl(struct amdgpu_ring *ring, uint32_t flags)
3741 if (amdgpu_sriov_vf(ring->adev))
3742 gfx_v9_0_ring_emit_ce_meta(ring);
3744 dw2 |= 0x80000000; /* set load_enable otherwise this package is just NOPs */
3745 if (flags & AMDGPU_HAVE_CTX_SWITCH) {
3746 /* set load_global_config & load_global_uconfig */
3748 /* set load_cs_sh_regs */
3750 /* set load_per_context_state & load_gfx_sh_regs for GFX */
3753 /* set load_ce_ram if preamble presented */
3754 if (AMDGPU_PREAMBLE_IB_PRESENT & flags)
3757 /* still load_ce_ram if this is the first time preamble presented
3758 * although there is no context switch happens.
3760 if (AMDGPU_PREAMBLE_IB_PRESENT_FIRST & flags)
3764 amdgpu_ring_write(ring, PACKET3(PACKET3_CONTEXT_CONTROL, 1));
3765 amdgpu_ring_write(ring, dw2);
3766 amdgpu_ring_write(ring, 0);
3769 static unsigned gfx_v9_0_ring_emit_init_cond_exec(struct amdgpu_ring *ring)
3772 amdgpu_ring_write(ring, PACKET3(PACKET3_COND_EXEC, 3));
3773 amdgpu_ring_write(ring, lower_32_bits(ring->cond_exe_gpu_addr));
3774 amdgpu_ring_write(ring, upper_32_bits(ring->cond_exe_gpu_addr));
3775 amdgpu_ring_write(ring, 0); /* discard following DWs if *cond_exec_gpu_addr==0 */
3776 ret = ring->wptr & ring->buf_mask;
3777 amdgpu_ring_write(ring, 0x55aa55aa); /* patch dummy value later */
3781 static void gfx_v9_0_ring_emit_patch_cond_exec(struct amdgpu_ring *ring, unsigned offset)
3784 BUG_ON(offset > ring->buf_mask);
3785 BUG_ON(ring->ring[offset] != 0x55aa55aa);
3787 cur = (ring->wptr & ring->buf_mask) - 1;
3788 if (likely(cur > offset))
3789 ring->ring[offset] = cur - offset;
3791 ring->ring[offset] = (ring->ring_size>>2) - offset + cur;
3794 static void gfx_v9_0_ring_emit_tmz(struct amdgpu_ring *ring, bool start)
3796 amdgpu_ring_write(ring, PACKET3(PACKET3_FRAME_CONTROL, 0));
3797 amdgpu_ring_write(ring, FRAME_CMD(start ? 0 : 1)); /* frame_end */
3800 static void gfx_v9_0_ring_emit_rreg(struct amdgpu_ring *ring, uint32_t reg)
3802 struct amdgpu_device *adev = ring->adev;
3804 amdgpu_ring_write(ring, PACKET3(PACKET3_COPY_DATA, 4));
3805 amdgpu_ring_write(ring, 0 | /* src: register*/
3806 (5 << 8) | /* dst: memory */
3807 (1 << 20)); /* write confirm */
3808 amdgpu_ring_write(ring, reg);
3809 amdgpu_ring_write(ring, 0);
3810 amdgpu_ring_write(ring, lower_32_bits(adev->wb.gpu_addr +
3811 adev->virt.reg_val_offs * 4));
3812 amdgpu_ring_write(ring, upper_32_bits(adev->wb.gpu_addr +
3813 adev->virt.reg_val_offs * 4));
3816 static void gfx_v9_0_ring_emit_wreg(struct amdgpu_ring *ring, uint32_t reg,
3819 amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
3820 amdgpu_ring_write(ring, (1 << 16)); /* no inc addr */
3821 amdgpu_ring_write(ring, reg);
3822 amdgpu_ring_write(ring, 0);
3823 amdgpu_ring_write(ring, val);
3826 static void gfx_v9_0_set_gfx_eop_interrupt_state(struct amdgpu_device *adev,
3827 enum amdgpu_interrupt_state state)
3830 case AMDGPU_IRQ_STATE_DISABLE:
3831 case AMDGPU_IRQ_STATE_ENABLE:
3832 WREG32_FIELD15(GC, 0, CP_INT_CNTL_RING0,
3833 TIME_STAMP_INT_ENABLE,
3834 state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0);
3841 static void gfx_v9_0_set_compute_eop_interrupt_state(struct amdgpu_device *adev,
3843 enum amdgpu_interrupt_state state)
3845 u32 mec_int_cntl, mec_int_cntl_reg;
3848 * amdgpu controls only the first MEC. That's why this function only
3849 * handles the setting of interrupts for this specific MEC. All other
3850 * pipes' interrupts are set by amdkfd.
3856 mec_int_cntl_reg = SOC15_REG_OFFSET(GC, 0, mmCP_ME1_PIPE0_INT_CNTL);
3859 mec_int_cntl_reg = SOC15_REG_OFFSET(GC, 0, mmCP_ME1_PIPE1_INT_CNTL);
3862 mec_int_cntl_reg = SOC15_REG_OFFSET(GC, 0, mmCP_ME1_PIPE2_INT_CNTL);
3865 mec_int_cntl_reg = SOC15_REG_OFFSET(GC, 0, mmCP_ME1_PIPE3_INT_CNTL);
3868 DRM_DEBUG("invalid pipe %d\n", pipe);
3872 DRM_DEBUG("invalid me %d\n", me);
3877 case AMDGPU_IRQ_STATE_DISABLE:
3878 mec_int_cntl = RREG32(mec_int_cntl_reg);
3879 mec_int_cntl = REG_SET_FIELD(mec_int_cntl, CP_ME1_PIPE0_INT_CNTL,
3880 TIME_STAMP_INT_ENABLE, 0);
3881 WREG32(mec_int_cntl_reg, mec_int_cntl);
3883 case AMDGPU_IRQ_STATE_ENABLE:
3884 mec_int_cntl = RREG32(mec_int_cntl_reg);
3885 mec_int_cntl = REG_SET_FIELD(mec_int_cntl, CP_ME1_PIPE0_INT_CNTL,
3886 TIME_STAMP_INT_ENABLE, 1);
3887 WREG32(mec_int_cntl_reg, mec_int_cntl);
3894 static int gfx_v9_0_set_priv_reg_fault_state(struct amdgpu_device *adev,
3895 struct amdgpu_irq_src *source,
3897 enum amdgpu_interrupt_state state)
3900 case AMDGPU_IRQ_STATE_DISABLE:
3901 case AMDGPU_IRQ_STATE_ENABLE:
3902 WREG32_FIELD15(GC, 0, CP_INT_CNTL_RING0,
3903 PRIV_REG_INT_ENABLE,
3904 state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0);
3913 static int gfx_v9_0_set_priv_inst_fault_state(struct amdgpu_device *adev,
3914 struct amdgpu_irq_src *source,
3916 enum amdgpu_interrupt_state state)
3919 case AMDGPU_IRQ_STATE_DISABLE:
3920 case AMDGPU_IRQ_STATE_ENABLE:
3921 WREG32_FIELD15(GC, 0, CP_INT_CNTL_RING0,
3922 PRIV_INSTR_INT_ENABLE,
3923 state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0);
3931 static int gfx_v9_0_set_eop_interrupt_state(struct amdgpu_device *adev,
3932 struct amdgpu_irq_src *src,
3934 enum amdgpu_interrupt_state state)
3937 case AMDGPU_CP_IRQ_GFX_EOP:
3938 gfx_v9_0_set_gfx_eop_interrupt_state(adev, state);
3940 case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP:
3941 gfx_v9_0_set_compute_eop_interrupt_state(adev, 1, 0, state);
3943 case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE1_EOP:
3944 gfx_v9_0_set_compute_eop_interrupt_state(adev, 1, 1, state);
3946 case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE2_EOP:
3947 gfx_v9_0_set_compute_eop_interrupt_state(adev, 1, 2, state);
3949 case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE3_EOP:
3950 gfx_v9_0_set_compute_eop_interrupt_state(adev, 1, 3, state);
3952 case AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE0_EOP:
3953 gfx_v9_0_set_compute_eop_interrupt_state(adev, 2, 0, state);
3955 case AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE1_EOP:
3956 gfx_v9_0_set_compute_eop_interrupt_state(adev, 2, 1, state);
3958 case AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE2_EOP:
3959 gfx_v9_0_set_compute_eop_interrupt_state(adev, 2, 2, state);
3961 case AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE3_EOP:
3962 gfx_v9_0_set_compute_eop_interrupt_state(adev, 2, 3, state);
3970 static int gfx_v9_0_eop_irq(struct amdgpu_device *adev,
3971 struct amdgpu_irq_src *source,
3972 struct amdgpu_iv_entry *entry)
3975 u8 me_id, pipe_id, queue_id;
3976 struct amdgpu_ring *ring;
3978 DRM_DEBUG("IH: CP EOP\n");
3979 me_id = (entry->ring_id & 0x0c) >> 2;
3980 pipe_id = (entry->ring_id & 0x03) >> 0;
3981 queue_id = (entry->ring_id & 0x70) >> 4;
3985 amdgpu_fence_process(&adev->gfx.gfx_ring[0]);
3989 for (i = 0; i < adev->gfx.num_compute_rings; i++) {
3990 ring = &adev->gfx.compute_ring[i];
3991 /* Per-queue interrupt is supported for MEC starting from VI.
3992 * The interrupt can only be enabled/disabled per pipe instead of per queue.
3994 if ((ring->me == me_id) && (ring->pipe == pipe_id) && (ring->queue == queue_id))
3995 amdgpu_fence_process(ring);
4002 static int gfx_v9_0_priv_reg_irq(struct amdgpu_device *adev,
4003 struct amdgpu_irq_src *source,
4004 struct amdgpu_iv_entry *entry)
4006 DRM_ERROR("Illegal register access in command stream\n");
4007 schedule_work(&adev->reset_work);
4011 static int gfx_v9_0_priv_inst_irq(struct amdgpu_device *adev,
4012 struct amdgpu_irq_src *source,
4013 struct amdgpu_iv_entry *entry)
4015 DRM_ERROR("Illegal instruction in command stream\n");
4016 schedule_work(&adev->reset_work);
4020 static int gfx_v9_0_kiq_set_interrupt_state(struct amdgpu_device *adev,
4021 struct amdgpu_irq_src *src,
4023 enum amdgpu_interrupt_state state)
4025 uint32_t tmp, target;
4026 struct amdgpu_ring *ring = &(adev->gfx.kiq.ring);
4029 target = SOC15_REG_OFFSET(GC, 0, mmCP_ME1_PIPE0_INT_CNTL);
4031 target = SOC15_REG_OFFSET(GC, 0, mmCP_ME2_PIPE0_INT_CNTL);
4032 target += ring->pipe;
4035 case AMDGPU_CP_KIQ_IRQ_DRIVER0:
4036 if (state == AMDGPU_IRQ_STATE_DISABLE) {
4037 tmp = RREG32_SOC15(GC, 0, mmCPC_INT_CNTL);
4038 tmp = REG_SET_FIELD(tmp, CPC_INT_CNTL,
4039 GENERIC2_INT_ENABLE, 0);
4040 WREG32_SOC15(GC, 0, mmCPC_INT_CNTL, tmp);
4042 tmp = RREG32(target);
4043 tmp = REG_SET_FIELD(tmp, CP_ME2_PIPE0_INT_CNTL,
4044 GENERIC2_INT_ENABLE, 0);
4045 WREG32(target, tmp);
4047 tmp = RREG32_SOC15(GC, 0, mmCPC_INT_CNTL);
4048 tmp = REG_SET_FIELD(tmp, CPC_INT_CNTL,
4049 GENERIC2_INT_ENABLE, 1);
4050 WREG32_SOC15(GC, 0, mmCPC_INT_CNTL, tmp);
4052 tmp = RREG32(target);
4053 tmp = REG_SET_FIELD(tmp, CP_ME2_PIPE0_INT_CNTL,
4054 GENERIC2_INT_ENABLE, 1);
4055 WREG32(target, tmp);
4059 BUG(); /* kiq only support GENERIC2_INT now */
4065 static int gfx_v9_0_kiq_irq(struct amdgpu_device *adev,
4066 struct amdgpu_irq_src *source,
4067 struct amdgpu_iv_entry *entry)
4069 u8 me_id, pipe_id, queue_id;
4070 struct amdgpu_ring *ring = &(adev->gfx.kiq.ring);
4072 me_id = (entry->ring_id & 0x0c) >> 2;
4073 pipe_id = (entry->ring_id & 0x03) >> 0;
4074 queue_id = (entry->ring_id & 0x70) >> 4;
4075 DRM_DEBUG("IH: CPC GENERIC2_INT, me:%d, pipe:%d, queue:%d\n",
4076 me_id, pipe_id, queue_id);
4078 amdgpu_fence_process(ring);
4082 static const struct amd_ip_funcs gfx_v9_0_ip_funcs = {
4084 .early_init = gfx_v9_0_early_init,
4085 .late_init = gfx_v9_0_late_init,
4086 .sw_init = gfx_v9_0_sw_init,
4087 .sw_fini = gfx_v9_0_sw_fini,
4088 .hw_init = gfx_v9_0_hw_init,
4089 .hw_fini = gfx_v9_0_hw_fini,
4090 .suspend = gfx_v9_0_suspend,
4091 .resume = gfx_v9_0_resume,
4092 .is_idle = gfx_v9_0_is_idle,
4093 .wait_for_idle = gfx_v9_0_wait_for_idle,
4094 .soft_reset = gfx_v9_0_soft_reset,
4095 .set_clockgating_state = gfx_v9_0_set_clockgating_state,
4096 .set_powergating_state = gfx_v9_0_set_powergating_state,
4097 .get_clockgating_state = gfx_v9_0_get_clockgating_state,
4100 static const struct amdgpu_ring_funcs gfx_v9_0_ring_funcs_gfx = {
4101 .type = AMDGPU_RING_TYPE_GFX,
4103 .nop = PACKET3(PACKET3_NOP, 0x3FFF),
4104 .support_64bit_ptrs = true,
4105 .vmhub = AMDGPU_GFXHUB,
4106 .get_rptr = gfx_v9_0_ring_get_rptr_gfx,
4107 .get_wptr = gfx_v9_0_ring_get_wptr_gfx,
4108 .set_wptr = gfx_v9_0_ring_set_wptr_gfx,
4109 .emit_frame_size = /* totally 242 maximum if 16 IBs */
4111 7 + /* PIPELINE_SYNC */
4113 8 + /* FENCE for VM_FLUSH */
4114 20 + /* GDS switch */
4115 4 + /* double SWITCH_BUFFER,
4116 the first COND_EXEC jump to the place just
4117 prior to this double SWITCH_BUFFER */
4125 8 + 8 + /* FENCE x2 */
4126 2, /* SWITCH_BUFFER */
4127 .emit_ib_size = 4, /* gfx_v9_0_ring_emit_ib_gfx */
4128 .emit_ib = gfx_v9_0_ring_emit_ib_gfx,
4129 .emit_fence = gfx_v9_0_ring_emit_fence,
4130 .emit_pipeline_sync = gfx_v9_0_ring_emit_pipeline_sync,
4131 .emit_vm_flush = gfx_v9_0_ring_emit_vm_flush,
4132 .emit_gds_switch = gfx_v9_0_ring_emit_gds_switch,
4133 .emit_hdp_flush = gfx_v9_0_ring_emit_hdp_flush,
4134 .emit_hdp_invalidate = gfx_v9_0_ring_emit_hdp_invalidate,
4135 .test_ring = gfx_v9_0_ring_test_ring,
4136 .test_ib = gfx_v9_0_ring_test_ib,
4137 .insert_nop = amdgpu_ring_insert_nop,
4138 .pad_ib = amdgpu_ring_generic_pad_ib,
4139 .emit_switch_buffer = gfx_v9_ring_emit_sb,
4140 .emit_cntxcntl = gfx_v9_ring_emit_cntxcntl,
4141 .init_cond_exec = gfx_v9_0_ring_emit_init_cond_exec,
4142 .patch_cond_exec = gfx_v9_0_ring_emit_patch_cond_exec,
4143 .emit_tmz = gfx_v9_0_ring_emit_tmz,
4146 static const struct amdgpu_ring_funcs gfx_v9_0_ring_funcs_compute = {
4147 .type = AMDGPU_RING_TYPE_COMPUTE,
4149 .nop = PACKET3(PACKET3_NOP, 0x3FFF),
4150 .support_64bit_ptrs = true,
4151 .vmhub = AMDGPU_GFXHUB,
4152 .get_rptr = gfx_v9_0_ring_get_rptr_compute,
4153 .get_wptr = gfx_v9_0_ring_get_wptr_compute,
4154 .set_wptr = gfx_v9_0_ring_set_wptr_compute,
4156 20 + /* gfx_v9_0_ring_emit_gds_switch */
4157 7 + /* gfx_v9_0_ring_emit_hdp_flush */
4158 5 + /* gfx_v9_0_ring_emit_hdp_invalidate */
4159 7 + /* gfx_v9_0_ring_emit_pipeline_sync */
4160 24 + /* gfx_v9_0_ring_emit_vm_flush */
4161 8 + 8 + 8, /* gfx_v9_0_ring_emit_fence x3 for user fence, vm fence */
4162 .emit_ib_size = 4, /* gfx_v9_0_ring_emit_ib_compute */
4163 .emit_ib = gfx_v9_0_ring_emit_ib_compute,
4164 .emit_fence = gfx_v9_0_ring_emit_fence,
4165 .emit_pipeline_sync = gfx_v9_0_ring_emit_pipeline_sync,
4166 .emit_vm_flush = gfx_v9_0_ring_emit_vm_flush,
4167 .emit_gds_switch = gfx_v9_0_ring_emit_gds_switch,
4168 .emit_hdp_flush = gfx_v9_0_ring_emit_hdp_flush,
4169 .emit_hdp_invalidate = gfx_v9_0_ring_emit_hdp_invalidate,
4170 .test_ring = gfx_v9_0_ring_test_ring,
4171 .test_ib = gfx_v9_0_ring_test_ib,
4172 .insert_nop = amdgpu_ring_insert_nop,
4173 .pad_ib = amdgpu_ring_generic_pad_ib,
4176 static const struct amdgpu_ring_funcs gfx_v9_0_ring_funcs_kiq = {
4177 .type = AMDGPU_RING_TYPE_KIQ,
4179 .nop = PACKET3(PACKET3_NOP, 0x3FFF),
4180 .support_64bit_ptrs = true,
4181 .vmhub = AMDGPU_GFXHUB,
4182 .get_rptr = gfx_v9_0_ring_get_rptr_compute,
4183 .get_wptr = gfx_v9_0_ring_get_wptr_compute,
4184 .set_wptr = gfx_v9_0_ring_set_wptr_compute,
4186 20 + /* gfx_v9_0_ring_emit_gds_switch */
4187 7 + /* gfx_v9_0_ring_emit_hdp_flush */
4188 5 + /* gfx_v9_0_ring_emit_hdp_invalidate */
4189 7 + /* gfx_v9_0_ring_emit_pipeline_sync */
4190 24 + /* gfx_v9_0_ring_emit_vm_flush */
4191 8 + 8 + 8, /* gfx_v9_0_ring_emit_fence_kiq x3 for user fence, vm fence */
4192 .emit_ib_size = 4, /* gfx_v9_0_ring_emit_ib_compute */
4193 .emit_ib = gfx_v9_0_ring_emit_ib_compute,
4194 .emit_fence = gfx_v9_0_ring_emit_fence_kiq,
4195 .test_ring = gfx_v9_0_ring_test_ring,
4196 .test_ib = gfx_v9_0_ring_test_ib,
4197 .insert_nop = amdgpu_ring_insert_nop,
4198 .pad_ib = amdgpu_ring_generic_pad_ib,
4199 .emit_rreg = gfx_v9_0_ring_emit_rreg,
4200 .emit_wreg = gfx_v9_0_ring_emit_wreg,
4203 static void gfx_v9_0_set_ring_funcs(struct amdgpu_device *adev)
4207 adev->gfx.kiq.ring.funcs = &gfx_v9_0_ring_funcs_kiq;
4209 for (i = 0; i < adev->gfx.num_gfx_rings; i++)
4210 adev->gfx.gfx_ring[i].funcs = &gfx_v9_0_ring_funcs_gfx;
4212 for (i = 0; i < adev->gfx.num_compute_rings; i++)
4213 adev->gfx.compute_ring[i].funcs = &gfx_v9_0_ring_funcs_compute;
4216 static const struct amdgpu_irq_src_funcs gfx_v9_0_kiq_irq_funcs = {
4217 .set = gfx_v9_0_kiq_set_interrupt_state,
4218 .process = gfx_v9_0_kiq_irq,
4221 static const struct amdgpu_irq_src_funcs gfx_v9_0_eop_irq_funcs = {
4222 .set = gfx_v9_0_set_eop_interrupt_state,
4223 .process = gfx_v9_0_eop_irq,
4226 static const struct amdgpu_irq_src_funcs gfx_v9_0_priv_reg_irq_funcs = {
4227 .set = gfx_v9_0_set_priv_reg_fault_state,
4228 .process = gfx_v9_0_priv_reg_irq,
4231 static const struct amdgpu_irq_src_funcs gfx_v9_0_priv_inst_irq_funcs = {
4232 .set = gfx_v9_0_set_priv_inst_fault_state,
4233 .process = gfx_v9_0_priv_inst_irq,
4236 static void gfx_v9_0_set_irq_funcs(struct amdgpu_device *adev)
4238 adev->gfx.eop_irq.num_types = AMDGPU_CP_IRQ_LAST;
4239 adev->gfx.eop_irq.funcs = &gfx_v9_0_eop_irq_funcs;
4241 adev->gfx.priv_reg_irq.num_types = 1;
4242 adev->gfx.priv_reg_irq.funcs = &gfx_v9_0_priv_reg_irq_funcs;
4244 adev->gfx.priv_inst_irq.num_types = 1;
4245 adev->gfx.priv_inst_irq.funcs = &gfx_v9_0_priv_inst_irq_funcs;
4247 adev->gfx.kiq.irq.num_types = AMDGPU_CP_KIQ_IRQ_LAST;
4248 adev->gfx.kiq.irq.funcs = &gfx_v9_0_kiq_irq_funcs;
4251 static void gfx_v9_0_set_rlc_funcs(struct amdgpu_device *adev)
4253 switch (adev->asic_type) {
4256 adev->gfx.rlc.funcs = &gfx_v9_0_rlc_funcs;
4263 static void gfx_v9_0_set_gds_init(struct amdgpu_device *adev)
4265 /* init asci gds info */
4266 adev->gds.mem.total_size = RREG32_SOC15(GC, 0, mmGDS_VMID0_SIZE);
4267 adev->gds.gws.total_size = 64;
4268 adev->gds.oa.total_size = 16;
4270 if (adev->gds.mem.total_size == 64 * 1024) {
4271 adev->gds.mem.gfx_partition_size = 4096;
4272 adev->gds.mem.cs_partition_size = 4096;
4274 adev->gds.gws.gfx_partition_size = 4;
4275 adev->gds.gws.cs_partition_size = 4;
4277 adev->gds.oa.gfx_partition_size = 4;
4278 adev->gds.oa.cs_partition_size = 1;
4280 adev->gds.mem.gfx_partition_size = 1024;
4281 adev->gds.mem.cs_partition_size = 1024;
4283 adev->gds.gws.gfx_partition_size = 16;
4284 adev->gds.gws.cs_partition_size = 16;
4286 adev->gds.oa.gfx_partition_size = 4;
4287 adev->gds.oa.cs_partition_size = 4;
4291 static void gfx_v9_0_set_user_cu_inactive_bitmap(struct amdgpu_device *adev,
4299 data = bitmap << GC_USER_SHADER_ARRAY_CONFIG__INACTIVE_CUS__SHIFT;
4300 data &= GC_USER_SHADER_ARRAY_CONFIG__INACTIVE_CUS_MASK;
4302 WREG32_SOC15(GC, 0, mmGC_USER_SHADER_ARRAY_CONFIG, data);
4305 static u32 gfx_v9_0_get_cu_active_bitmap(struct amdgpu_device *adev)
4309 data = RREG32_SOC15(GC, 0, mmCC_GC_SHADER_ARRAY_CONFIG);
4310 data |= RREG32_SOC15(GC, 0, mmGC_USER_SHADER_ARRAY_CONFIG);
4312 data &= CC_GC_SHADER_ARRAY_CONFIG__INACTIVE_CUS_MASK;
4313 data >>= CC_GC_SHADER_ARRAY_CONFIG__INACTIVE_CUS__SHIFT;
4315 mask = amdgpu_gfx_create_bitmask(adev->gfx.config.max_cu_per_sh);
4317 return (~data) & mask;
4320 static int gfx_v9_0_get_cu_info(struct amdgpu_device *adev,
4321 struct amdgpu_cu_info *cu_info)
4323 int i, j, k, counter, active_cu_number = 0;
4324 u32 mask, bitmap, ao_bitmap, ao_cu_mask = 0;
4325 unsigned disable_masks[4 * 2];
4327 if (!adev || !cu_info)
4330 amdgpu_gfx_parse_disable_cu(disable_masks, 4, 2);
4332 mutex_lock(&adev->grbm_idx_mutex);
4333 for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
4334 for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
4338 gfx_v9_0_select_se_sh(adev, i, j, 0xffffffff);
4340 gfx_v9_0_set_user_cu_inactive_bitmap(
4341 adev, disable_masks[i * 2 + j]);
4342 bitmap = gfx_v9_0_get_cu_active_bitmap(adev);
4343 cu_info->bitmap[i][j] = bitmap;
4345 for (k = 0; k < adev->gfx.config.max_cu_per_sh; k ++) {
4346 if (bitmap & mask) {
4347 if (counter < adev->gfx.config.max_cu_per_sh)
4353 active_cu_number += counter;
4355 ao_cu_mask |= (ao_bitmap << (i * 16 + j * 8));
4356 cu_info->ao_cu_bitmap[i][j] = ao_bitmap;
4359 gfx_v9_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
4360 mutex_unlock(&adev->grbm_idx_mutex);
4362 cu_info->number = active_cu_number;
4363 cu_info->ao_cu_mask = ao_cu_mask;
4368 const struct amdgpu_ip_block_version gfx_v9_0_ip_block =
4370 .type = AMD_IP_BLOCK_TYPE_GFX,
4374 .funcs = &gfx_v9_0_ip_funcs,