GNU Linux-libre 4.14.303-gnu1
[releases.git] / drivers / gpu / drm / amd / amdgpu / gfx_v9_0.c
1 /*
2  * Copyright 2016 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23 #include <linux/firmware.h>
24 #include <drm/drmP.h>
25 #include "amdgpu.h"
26 #include "amdgpu_gfx.h"
27 #include "soc15.h"
28 #include "soc15d.h"
29
30 #include "vega10/soc15ip.h"
31 #include "vega10/GC/gc_9_0_offset.h"
32 #include "vega10/GC/gc_9_0_sh_mask.h"
33 #include "vega10/vega10_enum.h"
34 #include "vega10/HDP/hdp_4_0_offset.h"
35
36 #include "soc15_common.h"
37 #include "clearstate_gfx9.h"
38 #include "v9_structs.h"
39
40 #define GFX9_NUM_GFX_RINGS     1
41 #define GFX9_MEC_HPD_SIZE 2048
42 #define RLCG_UCODE_LOADING_START_ADDRESS 0x00002000L
43 #define RLC_SAVE_RESTORE_ADDR_STARTING_OFFSET 0x00000000L
44 #define GFX9_RLC_FORMAT_DIRECT_REG_LIST_LENGTH 34
45
46 #define mmPWR_MISC_CNTL_STATUS                                  0x0183
47 #define mmPWR_MISC_CNTL_STATUS_BASE_IDX                         0
48 #define PWR_MISC_CNTL_STATUS__PWR_GFX_RLC_CGPG_EN__SHIFT        0x0
49 #define PWR_MISC_CNTL_STATUS__PWR_GFXOFF_STATUS__SHIFT          0x1
50 #define PWR_MISC_CNTL_STATUS__PWR_GFX_RLC_CGPG_EN_MASK          0x00000001L
51 #define PWR_MISC_CNTL_STATUS__PWR_GFXOFF_STATUS_MASK            0x00000006L
52
53 /*(DEBLOBBED)*/
54
55 static const struct amdgpu_gds_reg_offset amdgpu_gds_reg_offset[] =
56 {
57         {SOC15_REG_OFFSET(GC, 0, mmGDS_VMID0_BASE), SOC15_REG_OFFSET(GC, 0, mmGDS_VMID0_SIZE),
58                 SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID0), SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID0)},
59         {SOC15_REG_OFFSET(GC, 0, mmGDS_VMID1_BASE), SOC15_REG_OFFSET(GC, 0, mmGDS_VMID1_SIZE),
60                 SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID1), SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID1)},
61         {SOC15_REG_OFFSET(GC, 0, mmGDS_VMID2_BASE), SOC15_REG_OFFSET(GC, 0, mmGDS_VMID2_SIZE),
62                 SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID2), SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID2)},
63         {SOC15_REG_OFFSET(GC, 0, mmGDS_VMID3_BASE), SOC15_REG_OFFSET(GC, 0, mmGDS_VMID3_SIZE),
64                 SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID3), SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID3)},
65         {SOC15_REG_OFFSET(GC, 0, mmGDS_VMID4_BASE), SOC15_REG_OFFSET(GC, 0, mmGDS_VMID4_SIZE),
66                 SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID4), SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID4)},
67         {SOC15_REG_OFFSET(GC, 0, mmGDS_VMID5_BASE), SOC15_REG_OFFSET(GC, 0, mmGDS_VMID5_SIZE),
68                 SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID5), SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID5)},
69         {SOC15_REG_OFFSET(GC, 0, mmGDS_VMID6_BASE), SOC15_REG_OFFSET(GC, 0, mmGDS_VMID6_SIZE),
70                 SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID6), SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID6)},
71         {SOC15_REG_OFFSET(GC, 0, mmGDS_VMID7_BASE), SOC15_REG_OFFSET(GC, 0, mmGDS_VMID7_SIZE),
72                 SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID7), SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID7)},
73         {SOC15_REG_OFFSET(GC, 0, mmGDS_VMID8_BASE), SOC15_REG_OFFSET(GC, 0, mmGDS_VMID8_SIZE),
74                 SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID8), SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID8)},
75         {SOC15_REG_OFFSET(GC, 0, mmGDS_VMID9_BASE), SOC15_REG_OFFSET(GC, 0, mmGDS_VMID9_SIZE),
76                 SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID9), SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID9)},
77         {SOC15_REG_OFFSET(GC, 0, mmGDS_VMID10_BASE), SOC15_REG_OFFSET(GC, 0, mmGDS_VMID10_SIZE),
78                 SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID10), SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID10)},
79         {SOC15_REG_OFFSET(GC, 0, mmGDS_VMID11_BASE), SOC15_REG_OFFSET(GC, 0, mmGDS_VMID11_SIZE),
80                 SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID11), SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID11)},
81         {SOC15_REG_OFFSET(GC, 0, mmGDS_VMID12_BASE), SOC15_REG_OFFSET(GC, 0, mmGDS_VMID12_SIZE),
82                 SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID12), SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID12)},
83         {SOC15_REG_OFFSET(GC, 0, mmGDS_VMID13_BASE), SOC15_REG_OFFSET(GC, 0, mmGDS_VMID13_SIZE),
84                 SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID13), SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID13)},
85         {SOC15_REG_OFFSET(GC, 0, mmGDS_VMID14_BASE), SOC15_REG_OFFSET(GC, 0, mmGDS_VMID14_SIZE),
86                 SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID14), SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID14)},
87         {SOC15_REG_OFFSET(GC, 0, mmGDS_VMID15_BASE), SOC15_REG_OFFSET(GC, 0, mmGDS_VMID15_SIZE),
88                 SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID15), SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID15)}
89 };
90
91 static const u32 golden_settings_gc_9_0[] =
92 {
93         SOC15_REG_OFFSET(GC, 0, mmCPC_UTCL1_CNTL), 0x08000000, 0x08000080,
94         SOC15_REG_OFFSET(GC, 0, mmCPF_UTCL1_CNTL), 0x08000000, 0x08000080,
95         SOC15_REG_OFFSET(GC, 0, mmCPG_UTCL1_CNTL), 0x08000000, 0x08000080,
96         SOC15_REG_OFFSET(GC, 0, mmDB_DEBUG2), 0xf00fffff, 0x00000420,
97         SOC15_REG_OFFSET(GC, 0, mmGB_GPU_ID), 0x0000000f, 0x00000000,
98         SOC15_REG_OFFSET(GC, 0, mmIA_UTCL1_CNTL), 0x08000000, 0x08000080,
99         SOC15_REG_OFFSET(GC, 0, mmPA_SC_BINNER_EVENT_CNTL_3), 0x00000003, 0x82400024,
100         SOC15_REG_OFFSET(GC, 0, mmPA_SC_ENHANCE), 0x3fffffff, 0x00000001,
101         SOC15_REG_OFFSET(GC, 0, mmPA_SC_LINE_STIPPLE_STATE), 0x0000ff0f, 0x00000000,
102         SOC15_REG_OFFSET(GC, 0, mmRLC_GPM_UTCL1_CNTL_0), 0x08000000, 0x08000080,
103         SOC15_REG_OFFSET(GC, 0, mmRLC_GPM_UTCL1_CNTL_1), 0x08000000, 0x08000080,
104         SOC15_REG_OFFSET(GC, 0, mmRLC_GPM_UTCL1_CNTL_2), 0x08000000, 0x08000080,
105         SOC15_REG_OFFSET(GC, 0, mmRLC_PREWALKER_UTCL1_CNTL), 0x08000000, 0x08000080,
106         SOC15_REG_OFFSET(GC, 0, mmRLC_SPM_UTCL1_CNTL), 0x08000000, 0x08000080,
107         SOC15_REG_OFFSET(GC, 0, mmSH_MEM_CONFIG), 0x00001000, 0x00001000,
108         SOC15_REG_OFFSET(GC, 0, mmSPI_CONFIG_CNTL_1), 0x0000000f, 0x01000107,
109         SOC15_REG_OFFSET(GC, 0, mmSQC_CONFIG), 0x03000000, 0x020a2000,
110         SOC15_REG_OFFSET(GC, 0, mmTA_CNTL_AUX), 0xfffffeef, 0x010b0000,
111         SOC15_REG_OFFSET(GC, 0, mmTCP_CHAN_STEER_HI), 0xffffffff, 0x4a2c0e68,
112         SOC15_REG_OFFSET(GC, 0, mmTCP_CHAN_STEER_LO), 0xffffffff, 0xb5d3f197,
113         SOC15_REG_OFFSET(GC, 0, mmVGT_CACHE_INVALIDATION), 0x3fff3af3, 0x19200000,
114         SOC15_REG_OFFSET(GC, 0, mmVGT_GS_MAX_WAVE_ID), 0x00000fff, 0x000003ff,
115         SOC15_REG_OFFSET(GC, 0, mmWD_UTCL1_CNTL), 0x08000000, 0x08000080
116 };
117
118 static const u32 golden_settings_gc_9_0_vg10[] =
119 {
120         SOC15_REG_OFFSET(GC, 0, mmCB_HW_CONTROL), 0x0000f000, 0x00012107,
121         SOC15_REG_OFFSET(GC, 0, mmCB_HW_CONTROL_3), 0x30000000, 0x10000000,
122         SOC15_REG_OFFSET(GC, 0, mmGB_ADDR_CONFIG), 0xffff77ff, 0x2a114042,
123         SOC15_REG_OFFSET(GC, 0, mmGB_ADDR_CONFIG_READ), 0xffff77ff, 0x2a114042,
124         SOC15_REG_OFFSET(GC, 0, mmPA_SC_ENHANCE_1), 0x00008000, 0x00048000,
125         SOC15_REG_OFFSET(GC, 0, mmRMI_UTCL1_CNTL2), 0x00030000, 0x00020000,
126         SOC15_REG_OFFSET(GC, 0, mmTD_CNTL), 0x00001800, 0x00000800
127 };
128
129 static const u32 golden_settings_gc_9_1[] =
130 {
131         SOC15_REG_OFFSET(GC, 0, mmCB_HW_CONTROL), 0xfffdf3cf, 0x00014104,
132         SOC15_REG_OFFSET(GC, 0, mmCPC_UTCL1_CNTL), 0x08000000, 0x08000080,
133         SOC15_REG_OFFSET(GC, 0, mmCPF_UTCL1_CNTL), 0x08000000, 0x08000080,
134         SOC15_REG_OFFSET(GC, 0, mmCPG_UTCL1_CNTL), 0x08000000, 0x08000080,
135         SOC15_REG_OFFSET(GC, 0, mmDB_DEBUG2), 0xf00fffff, 0x00000420,
136         SOC15_REG_OFFSET(GC, 0, mmGB_GPU_ID), 0x0000000f, 0x00000000,
137         SOC15_REG_OFFSET(GC, 0, mmIA_UTCL1_CNTL), 0x08000000, 0x08000080,
138         SOC15_REG_OFFSET(GC, 0, mmPA_SC_BINNER_EVENT_CNTL_3), 0x00000003, 0x82400024,
139         SOC15_REG_OFFSET(GC, 0, mmPA_SC_ENHANCE), 0x3fffffff, 0x00000001,
140         SOC15_REG_OFFSET(GC, 0, mmPA_SC_LINE_STIPPLE_STATE), 0x0000ff0f, 0x00000000,
141         SOC15_REG_OFFSET(GC, 0, mmRLC_GPM_UTCL1_CNTL_0), 0x08000000, 0x08000080,
142         SOC15_REG_OFFSET(GC, 0, mmRLC_GPM_UTCL1_CNTL_1), 0x08000000, 0x08000080,
143         SOC15_REG_OFFSET(GC, 0, mmRLC_GPM_UTCL1_CNTL_2), 0x08000000, 0x08000080,
144         SOC15_REG_OFFSET(GC, 0, mmRLC_PREWALKER_UTCL1_CNTL), 0x08000000, 0x08000080,
145         SOC15_REG_OFFSET(GC, 0, mmRLC_SPM_UTCL1_CNTL), 0x08000000, 0x08000080,
146         SOC15_REG_OFFSET(GC, 0, mmTA_CNTL_AUX), 0xfffffeef, 0x010b0000,
147         SOC15_REG_OFFSET(GC, 0, mmTCP_CHAN_STEER_HI), 0xffffffff, 0x00000000,
148         SOC15_REG_OFFSET(GC, 0, mmTCP_CHAN_STEER_LO), 0xffffffff, 0x00003120,
149         SOC15_REG_OFFSET(GC, 0, mmVGT_CACHE_INVALIDATION), 0x3fff3af3, 0x19200000,
150         SOC15_REG_OFFSET(GC, 0, mmVGT_GS_MAX_WAVE_ID), 0x00000fff, 0x000000ff,
151         SOC15_REG_OFFSET(GC, 0, mmWD_UTCL1_CNTL), 0x08000000, 0x08000080
152 };
153
154 static const u32 golden_settings_gc_9_1_rv1[] =
155 {
156         SOC15_REG_OFFSET(GC, 0, mmCB_HW_CONTROL_3), 0x30000000, 0x10000000,
157         SOC15_REG_OFFSET(GC, 0, mmGB_ADDR_CONFIG), 0xffff77ff, 0x24000042,
158         SOC15_REG_OFFSET(GC, 0, mmGB_ADDR_CONFIG_READ), 0xffff77ff, 0x24000042,
159         SOC15_REG_OFFSET(GC, 0, mmPA_SC_ENHANCE_1), 0xffffffff, 0x04048000,
160         SOC15_REG_OFFSET(GC, 0, mmPA_SC_MODE_CNTL_1), 0x06000000, 0x06000000,
161         SOC15_REG_OFFSET(GC, 0, mmRMI_UTCL1_CNTL2), 0x00030000, 0x00020000,
162         SOC15_REG_OFFSET(GC, 0, mmTD_CNTL), 0x01bd9f33, 0x00000800
163 };
164
165 #define VEGA10_GB_ADDR_CONFIG_GOLDEN 0x2a114042
166 #define RAVEN_GB_ADDR_CONFIG_GOLDEN 0x24000042
167
168 static void gfx_v9_0_set_ring_funcs(struct amdgpu_device *adev);
169 static void gfx_v9_0_set_irq_funcs(struct amdgpu_device *adev);
170 static void gfx_v9_0_set_gds_init(struct amdgpu_device *adev);
171 static void gfx_v9_0_set_rlc_funcs(struct amdgpu_device *adev);
172 static int gfx_v9_0_get_cu_info(struct amdgpu_device *adev,
173                                  struct amdgpu_cu_info *cu_info);
174 static uint64_t gfx_v9_0_get_gpu_clock_counter(struct amdgpu_device *adev);
175 static void gfx_v9_0_select_se_sh(struct amdgpu_device *adev, u32 se_num, u32 sh_num, u32 instance);
176 static void gfx_v9_0_ring_emit_de_meta(struct amdgpu_ring *ring);
177
178 static void gfx_v9_0_init_golden_registers(struct amdgpu_device *adev)
179 {
180         switch (adev->asic_type) {
181         case CHIP_VEGA10:
182                 amdgpu_program_register_sequence(adev,
183                                                  golden_settings_gc_9_0,
184                                                  (const u32)ARRAY_SIZE(golden_settings_gc_9_0));
185                 amdgpu_program_register_sequence(adev,
186                                                  golden_settings_gc_9_0_vg10,
187                                                  (const u32)ARRAY_SIZE(golden_settings_gc_9_0_vg10));
188                 break;
189         case CHIP_RAVEN:
190                 amdgpu_program_register_sequence(adev,
191                                                  golden_settings_gc_9_1,
192                                                  (const u32)ARRAY_SIZE(golden_settings_gc_9_1));
193                 amdgpu_program_register_sequence(adev,
194                                                  golden_settings_gc_9_1_rv1,
195                                                  (const u32)ARRAY_SIZE(golden_settings_gc_9_1_rv1));
196                 break;
197         default:
198                 break;
199         }
200 }
201
202 static void gfx_v9_0_scratch_init(struct amdgpu_device *adev)
203 {
204         adev->gfx.scratch.num_reg = 8;
205         adev->gfx.scratch.reg_base = SOC15_REG_OFFSET(GC, 0, mmSCRATCH_REG0);
206         adev->gfx.scratch.free_mask = (1u << adev->gfx.scratch.num_reg) - 1;
207 }
208
209 static void gfx_v9_0_write_data_to_reg(struct amdgpu_ring *ring, int eng_sel,
210                                        bool wc, uint32_t reg, uint32_t val)
211 {
212         amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
213         amdgpu_ring_write(ring, WRITE_DATA_ENGINE_SEL(eng_sel) |
214                                 WRITE_DATA_DST_SEL(0) |
215                                 (wc ? WR_CONFIRM : 0));
216         amdgpu_ring_write(ring, reg);
217         amdgpu_ring_write(ring, 0);
218         amdgpu_ring_write(ring, val);
219 }
220
221 static void gfx_v9_0_wait_reg_mem(struct amdgpu_ring *ring, int eng_sel,
222                                   int mem_space, int opt, uint32_t addr0,
223                                   uint32_t addr1, uint32_t ref, uint32_t mask,
224                                   uint32_t inv)
225 {
226         amdgpu_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5));
227         amdgpu_ring_write(ring,
228                                  /* memory (1) or register (0) */
229                                  (WAIT_REG_MEM_MEM_SPACE(mem_space) |
230                                  WAIT_REG_MEM_OPERATION(opt) | /* wait */
231                                  WAIT_REG_MEM_FUNCTION(3) |  /* equal */
232                                  WAIT_REG_MEM_ENGINE(eng_sel)));
233
234         if (mem_space)
235                 BUG_ON(addr0 & 0x3); /* Dword align */
236         amdgpu_ring_write(ring, addr0);
237         amdgpu_ring_write(ring, addr1);
238         amdgpu_ring_write(ring, ref);
239         amdgpu_ring_write(ring, mask);
240         amdgpu_ring_write(ring, inv); /* poll interval */
241 }
242
243 static int gfx_v9_0_ring_test_ring(struct amdgpu_ring *ring)
244 {
245         struct amdgpu_device *adev = ring->adev;
246         uint32_t scratch;
247         uint32_t tmp = 0;
248         unsigned i;
249         int r;
250
251         r = amdgpu_gfx_scratch_get(adev, &scratch);
252         if (r) {
253                 DRM_ERROR("amdgpu: cp failed to get scratch reg (%d).\n", r);
254                 return r;
255         }
256         WREG32(scratch, 0xCAFEDEAD);
257         r = amdgpu_ring_alloc(ring, 3);
258         if (r) {
259                 DRM_ERROR("amdgpu: cp failed to lock ring %d (%d).\n",
260                           ring->idx, r);
261                 amdgpu_gfx_scratch_free(adev, scratch);
262                 return r;
263         }
264         amdgpu_ring_write(ring, PACKET3(PACKET3_SET_UCONFIG_REG, 1));
265         amdgpu_ring_write(ring, (scratch - PACKET3_SET_UCONFIG_REG_START));
266         amdgpu_ring_write(ring, 0xDEADBEEF);
267         amdgpu_ring_commit(ring);
268
269         for (i = 0; i < adev->usec_timeout; i++) {
270                 tmp = RREG32(scratch);
271                 if (tmp == 0xDEADBEEF)
272                         break;
273                 DRM_UDELAY(1);
274         }
275         if (i < adev->usec_timeout) {
276                 DRM_INFO("ring test on %d succeeded in %d usecs\n",
277                          ring->idx, i);
278         } else {
279                 DRM_ERROR("amdgpu: ring %d test failed (scratch(0x%04X)=0x%08X)\n",
280                           ring->idx, scratch, tmp);
281                 r = -EINVAL;
282         }
283         amdgpu_gfx_scratch_free(adev, scratch);
284         return r;
285 }
286
287 static int gfx_v9_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
288 {
289         struct amdgpu_device *adev = ring->adev;
290         struct amdgpu_ib ib;
291         struct dma_fence *f = NULL;
292         uint32_t scratch;
293         uint32_t tmp = 0;
294         long r;
295
296         r = amdgpu_gfx_scratch_get(adev, &scratch);
297         if (r) {
298                 DRM_ERROR("amdgpu: failed to get scratch reg (%ld).\n", r);
299                 return r;
300         }
301         WREG32(scratch, 0xCAFEDEAD);
302         memset(&ib, 0, sizeof(ib));
303         r = amdgpu_ib_get(adev, NULL, 256, &ib);
304         if (r) {
305                 DRM_ERROR("amdgpu: failed to get ib (%ld).\n", r);
306                 goto err1;
307         }
308         ib.ptr[0] = PACKET3(PACKET3_SET_UCONFIG_REG, 1);
309         ib.ptr[1] = ((scratch - PACKET3_SET_UCONFIG_REG_START));
310         ib.ptr[2] = 0xDEADBEEF;
311         ib.length_dw = 3;
312
313         r = amdgpu_ib_schedule(ring, 1, &ib, NULL, &f);
314         if (r)
315                 goto err2;
316
317         r = dma_fence_wait_timeout(f, false, timeout);
318         if (r == 0) {
319                 DRM_ERROR("amdgpu: IB test timed out.\n");
320                 r = -ETIMEDOUT;
321                 goto err2;
322         } else if (r < 0) {
323                 DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r);
324                 goto err2;
325         }
326         tmp = RREG32(scratch);
327         if (tmp == 0xDEADBEEF) {
328                 DRM_INFO("ib test on ring %d succeeded\n", ring->idx);
329                 r = 0;
330         } else {
331                 DRM_ERROR("amdgpu: ib test failed (scratch(0x%04X)=0x%08X)\n",
332                           scratch, tmp);
333                 r = -EINVAL;
334         }
335 err2:
336         amdgpu_ib_free(adev, &ib, NULL);
337         dma_fence_put(f);
338 err1:
339         amdgpu_gfx_scratch_free(adev, scratch);
340         return r;
341 }
342
343 static int gfx_v9_0_init_microcode(struct amdgpu_device *adev)
344 {
345         const char *chip_name;
346         char fw_name[30];
347         int err;
348         struct amdgpu_firmware_info *info = NULL;
349         const struct common_firmware_header *header = NULL;
350         const struct gfx_firmware_header_v1_0 *cp_hdr;
351         const struct rlc_firmware_header_v2_0 *rlc_hdr;
352         unsigned int *tmp = NULL;
353         unsigned int i = 0;
354
355         DRM_DEBUG("\n");
356
357         switch (adev->asic_type) {
358         case CHIP_VEGA10:
359                 chip_name = "vega10";
360                 break;
361         case CHIP_RAVEN:
362                 chip_name = "raven";
363                 break;
364         default:
365                 BUG();
366         }
367
368         snprintf(fw_name, sizeof(fw_name), "/*(DEBLOBBED)*/", chip_name);
369         err = reject_firmware(&adev->gfx.pfp_fw, fw_name, adev->dev);
370         if (err)
371                 goto out;
372         err = amdgpu_ucode_validate(adev->gfx.pfp_fw);
373         if (err)
374                 goto out;
375         cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.pfp_fw->data;
376         adev->gfx.pfp_fw_version = le32_to_cpu(cp_hdr->header.ucode_version);
377         adev->gfx.pfp_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
378
379         snprintf(fw_name, sizeof(fw_name), "/*(DEBLOBBED)*/", chip_name);
380         err = reject_firmware(&adev->gfx.me_fw, fw_name, adev->dev);
381         if (err)
382                 goto out;
383         err = amdgpu_ucode_validate(adev->gfx.me_fw);
384         if (err)
385                 goto out;
386         cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.me_fw->data;
387         adev->gfx.me_fw_version = le32_to_cpu(cp_hdr->header.ucode_version);
388         adev->gfx.me_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
389
390         snprintf(fw_name, sizeof(fw_name), "/*(DEBLOBBED)*/", chip_name);
391         err = reject_firmware(&adev->gfx.ce_fw, fw_name, adev->dev);
392         if (err)
393                 goto out;
394         err = amdgpu_ucode_validate(adev->gfx.ce_fw);
395         if (err)
396                 goto out;
397         cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.ce_fw->data;
398         adev->gfx.ce_fw_version = le32_to_cpu(cp_hdr->header.ucode_version);
399         adev->gfx.ce_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
400
401         snprintf(fw_name, sizeof(fw_name), "/*(DEBLOBBED)*/", chip_name);
402         err = reject_firmware(&adev->gfx.rlc_fw, fw_name, adev->dev);
403         if (err)
404                 goto out;
405         err = amdgpu_ucode_validate(adev->gfx.rlc_fw);
406         rlc_hdr = (const struct rlc_firmware_header_v2_0 *)adev->gfx.rlc_fw->data;
407         adev->gfx.rlc_fw_version = le32_to_cpu(rlc_hdr->header.ucode_version);
408         adev->gfx.rlc_feature_version = le32_to_cpu(rlc_hdr->ucode_feature_version);
409         adev->gfx.rlc.save_and_restore_offset =
410                         le32_to_cpu(rlc_hdr->save_and_restore_offset);
411         adev->gfx.rlc.clear_state_descriptor_offset =
412                         le32_to_cpu(rlc_hdr->clear_state_descriptor_offset);
413         adev->gfx.rlc.avail_scratch_ram_locations =
414                         le32_to_cpu(rlc_hdr->avail_scratch_ram_locations);
415         adev->gfx.rlc.reg_restore_list_size =
416                         le32_to_cpu(rlc_hdr->reg_restore_list_size);
417         adev->gfx.rlc.reg_list_format_start =
418                         le32_to_cpu(rlc_hdr->reg_list_format_start);
419         adev->gfx.rlc.reg_list_format_separate_start =
420                         le32_to_cpu(rlc_hdr->reg_list_format_separate_start);
421         adev->gfx.rlc.starting_offsets_start =
422                         le32_to_cpu(rlc_hdr->starting_offsets_start);
423         adev->gfx.rlc.reg_list_format_size_bytes =
424                         le32_to_cpu(rlc_hdr->reg_list_format_size_bytes);
425         adev->gfx.rlc.reg_list_size_bytes =
426                         le32_to_cpu(rlc_hdr->reg_list_size_bytes);
427         adev->gfx.rlc.register_list_format =
428                         kmalloc(adev->gfx.rlc.reg_list_format_size_bytes +
429                                 adev->gfx.rlc.reg_list_size_bytes, GFP_KERNEL);
430         if (!adev->gfx.rlc.register_list_format) {
431                 err = -ENOMEM;
432                 goto out;
433         }
434
435         tmp = (unsigned int *)((uintptr_t)rlc_hdr +
436                         le32_to_cpu(rlc_hdr->reg_list_format_array_offset_bytes));
437         for (i = 0 ; i < (rlc_hdr->reg_list_format_size_bytes >> 2); i++)
438                 adev->gfx.rlc.register_list_format[i] = le32_to_cpu(tmp[i]);
439
440         adev->gfx.rlc.register_restore = adev->gfx.rlc.register_list_format + i;
441
442         tmp = (unsigned int *)((uintptr_t)rlc_hdr +
443                         le32_to_cpu(rlc_hdr->reg_list_array_offset_bytes));
444         for (i = 0 ; i < (rlc_hdr->reg_list_size_bytes >> 2); i++)
445                 adev->gfx.rlc.register_restore[i] = le32_to_cpu(tmp[i]);
446
447         snprintf(fw_name, sizeof(fw_name), "/*(DEBLOBBED)*/", chip_name);
448         err = reject_firmware(&adev->gfx.mec_fw, fw_name, adev->dev);
449         if (err)
450                 goto out;
451         err = amdgpu_ucode_validate(adev->gfx.mec_fw);
452         if (err)
453                 goto out;
454         cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data;
455         adev->gfx.mec_fw_version = le32_to_cpu(cp_hdr->header.ucode_version);
456         adev->gfx.mec_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
457
458
459         snprintf(fw_name, sizeof(fw_name), "/*(DEBLOBBED)*/", chip_name);
460         err = reject_firmware(&adev->gfx.mec2_fw, fw_name, adev->dev);
461         if (!err) {
462                 err = amdgpu_ucode_validate(adev->gfx.mec2_fw);
463                 if (err)
464                         goto out;
465                 cp_hdr = (const struct gfx_firmware_header_v1_0 *)
466                 adev->gfx.mec2_fw->data;
467                 adev->gfx.mec2_fw_version =
468                 le32_to_cpu(cp_hdr->header.ucode_version);
469                 adev->gfx.mec2_feature_version =
470                 le32_to_cpu(cp_hdr->ucode_feature_version);
471         } else {
472                 err = 0;
473                 adev->gfx.mec2_fw = NULL;
474         }
475
476         if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
477                 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_PFP];
478                 info->ucode_id = AMDGPU_UCODE_ID_CP_PFP;
479                 info->fw = adev->gfx.pfp_fw;
480                 header = (const struct common_firmware_header *)info->fw->data;
481                 adev->firmware.fw_size +=
482                         ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
483
484                 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_ME];
485                 info->ucode_id = AMDGPU_UCODE_ID_CP_ME;
486                 info->fw = adev->gfx.me_fw;
487                 header = (const struct common_firmware_header *)info->fw->data;
488                 adev->firmware.fw_size +=
489                         ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
490
491                 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_CE];
492                 info->ucode_id = AMDGPU_UCODE_ID_CP_CE;
493                 info->fw = adev->gfx.ce_fw;
494                 header = (const struct common_firmware_header *)info->fw->data;
495                 adev->firmware.fw_size +=
496                         ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
497
498                 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_G];
499                 info->ucode_id = AMDGPU_UCODE_ID_RLC_G;
500                 info->fw = adev->gfx.rlc_fw;
501                 header = (const struct common_firmware_header *)info->fw->data;
502                 adev->firmware.fw_size +=
503                         ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
504
505                 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_MEC1];
506                 info->ucode_id = AMDGPU_UCODE_ID_CP_MEC1;
507                 info->fw = adev->gfx.mec_fw;
508                 header = (const struct common_firmware_header *)info->fw->data;
509                 cp_hdr = (const struct gfx_firmware_header_v1_0 *)info->fw->data;
510                 adev->firmware.fw_size +=
511                         ALIGN(le32_to_cpu(header->ucode_size_bytes) - le32_to_cpu(cp_hdr->jt_size) * 4, PAGE_SIZE);
512
513                 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_MEC1_JT];
514                 info->ucode_id = AMDGPU_UCODE_ID_CP_MEC1_JT;
515                 info->fw = adev->gfx.mec_fw;
516                 adev->firmware.fw_size +=
517                         ALIGN(le32_to_cpu(cp_hdr->jt_size) * 4, PAGE_SIZE);
518
519                 if (adev->gfx.mec2_fw) {
520                         info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_MEC2];
521                         info->ucode_id = AMDGPU_UCODE_ID_CP_MEC2;
522                         info->fw = adev->gfx.mec2_fw;
523                         header = (const struct common_firmware_header *)info->fw->data;
524                         cp_hdr = (const struct gfx_firmware_header_v1_0 *)info->fw->data;
525                         adev->firmware.fw_size +=
526                                 ALIGN(le32_to_cpu(header->ucode_size_bytes) - le32_to_cpu(cp_hdr->jt_size) * 4, PAGE_SIZE);
527                         info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_MEC2_JT];
528                         info->ucode_id = AMDGPU_UCODE_ID_CP_MEC2_JT;
529                         info->fw = adev->gfx.mec2_fw;
530                         adev->firmware.fw_size +=
531                                 ALIGN(le32_to_cpu(cp_hdr->jt_size) * 4, PAGE_SIZE);
532                 }
533
534         }
535
536 out:
537         if (err) {
538                 dev_err(adev->dev,
539                         "gfx9: Failed to load firmware \"%s\"\n",
540                         fw_name);
541                 release_firmware(adev->gfx.pfp_fw);
542                 adev->gfx.pfp_fw = NULL;
543                 release_firmware(adev->gfx.me_fw);
544                 adev->gfx.me_fw = NULL;
545                 release_firmware(adev->gfx.ce_fw);
546                 adev->gfx.ce_fw = NULL;
547                 release_firmware(adev->gfx.rlc_fw);
548                 adev->gfx.rlc_fw = NULL;
549                 release_firmware(adev->gfx.mec_fw);
550                 adev->gfx.mec_fw = NULL;
551                 release_firmware(adev->gfx.mec2_fw);
552                 adev->gfx.mec2_fw = NULL;
553         }
554         return err;
555 }
556
557 static u32 gfx_v9_0_get_csb_size(struct amdgpu_device *adev)
558 {
559         u32 count = 0;
560         const struct cs_section_def *sect = NULL;
561         const struct cs_extent_def *ext = NULL;
562
563         /* begin clear state */
564         count += 2;
565         /* context control state */
566         count += 3;
567
568         for (sect = gfx9_cs_data; sect->section != NULL; ++sect) {
569                 for (ext = sect->section; ext->extent != NULL; ++ext) {
570                         if (sect->id == SECT_CONTEXT)
571                                 count += 2 + ext->reg_count;
572                         else
573                                 return 0;
574                 }
575         }
576
577         /* end clear state */
578         count += 2;
579         /* clear state */
580         count += 2;
581
582         return count;
583 }
584
585 static void gfx_v9_0_get_csb_buffer(struct amdgpu_device *adev,
586                                     volatile u32 *buffer)
587 {
588         u32 count = 0, i;
589         const struct cs_section_def *sect = NULL;
590         const struct cs_extent_def *ext = NULL;
591
592         if (adev->gfx.rlc.cs_data == NULL)
593                 return;
594         if (buffer == NULL)
595                 return;
596
597         buffer[count++] = cpu_to_le32(PACKET3(PACKET3_PREAMBLE_CNTL, 0));
598         buffer[count++] = cpu_to_le32(PACKET3_PREAMBLE_BEGIN_CLEAR_STATE);
599
600         buffer[count++] = cpu_to_le32(PACKET3(PACKET3_CONTEXT_CONTROL, 1));
601         buffer[count++] = cpu_to_le32(0x80000000);
602         buffer[count++] = cpu_to_le32(0x80000000);
603
604         for (sect = adev->gfx.rlc.cs_data; sect->section != NULL; ++sect) {
605                 for (ext = sect->section; ext->extent != NULL; ++ext) {
606                         if (sect->id == SECT_CONTEXT) {
607                                 buffer[count++] =
608                                         cpu_to_le32(PACKET3(PACKET3_SET_CONTEXT_REG, ext->reg_count));
609                                 buffer[count++] = cpu_to_le32(ext->reg_index -
610                                                 PACKET3_SET_CONTEXT_REG_START);
611                                 for (i = 0; i < ext->reg_count; i++)
612                                         buffer[count++] = cpu_to_le32(ext->extent[i]);
613                         } else {
614                                 return;
615                         }
616                 }
617         }
618
619         buffer[count++] = cpu_to_le32(PACKET3(PACKET3_PREAMBLE_CNTL, 0));
620         buffer[count++] = cpu_to_le32(PACKET3_PREAMBLE_END_CLEAR_STATE);
621
622         buffer[count++] = cpu_to_le32(PACKET3(PACKET3_CLEAR_STATE, 0));
623         buffer[count++] = cpu_to_le32(0);
624 }
625
626 static void gfx_v9_0_init_lbpw(struct amdgpu_device *adev)
627 {
628         uint32_t data;
629
630         /* set mmRLC_LB_THR_CONFIG_1/2/3/4 */
631         WREG32_SOC15(GC, 0, mmRLC_LB_THR_CONFIG_1, 0x0000007F);
632         WREG32_SOC15(GC, 0, mmRLC_LB_THR_CONFIG_2, 0x0333A5A7);
633         WREG32_SOC15(GC, 0, mmRLC_LB_THR_CONFIG_3, 0x00000077);
634         WREG32_SOC15(GC, 0, mmRLC_LB_THR_CONFIG_4, (0x30 | 0x40 << 8 | 0x02FA << 16));
635
636         /* set mmRLC_LB_CNTR_INIT = 0x0000_0000 */
637         WREG32_SOC15(GC, 0, mmRLC_LB_CNTR_INIT, 0x00000000);
638
639         /* set mmRLC_LB_CNTR_MAX = 0x0000_0500 */
640         WREG32_SOC15(GC, 0, mmRLC_LB_CNTR_MAX, 0x00000500);
641
642         mutex_lock(&adev->grbm_idx_mutex);
643         /* set mmRLC_LB_INIT_CU_MASK thru broadcast mode to enable all SE/SH*/
644         gfx_v9_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
645         WREG32_SOC15(GC, 0, mmRLC_LB_INIT_CU_MASK, 0xffffffff);
646
647         /* set mmRLC_LB_PARAMS = 0x003F_1006 */
648         data = REG_SET_FIELD(0, RLC_LB_PARAMS, FIFO_SAMPLES, 0x0003);
649         data |= REG_SET_FIELD(data, RLC_LB_PARAMS, PG_IDLE_SAMPLES, 0x0010);
650         data |= REG_SET_FIELD(data, RLC_LB_PARAMS, PG_IDLE_SAMPLE_INTERVAL, 0x033F);
651         WREG32_SOC15(GC, 0, mmRLC_LB_PARAMS, data);
652
653         /* set mmRLC_GPM_GENERAL_7[31-16] = 0x00C0 */
654         data = RREG32_SOC15(GC, 0, mmRLC_GPM_GENERAL_7);
655         data &= 0x0000FFFF;
656         data |= 0x00C00000;
657         WREG32_SOC15(GC, 0, mmRLC_GPM_GENERAL_7, data);
658
659         /* set RLC_LB_ALWAYS_ACTIVE_CU_MASK = 0xFFF */
660         WREG32_SOC15(GC, 0, mmRLC_LB_ALWAYS_ACTIVE_CU_MASK, 0xFFF);
661
662         /* set RLC_LB_CNTL = 0x8000_0095, 31 bit is reserved,
663          * but used for RLC_LB_CNTL configuration */
664         data = RLC_LB_CNTL__LB_CNT_SPIM_ACTIVE_MASK;
665         data |= REG_SET_FIELD(data, RLC_LB_CNTL, CU_MASK_USED_OFF_HYST, 0x09);
666         data |= REG_SET_FIELD(data, RLC_LB_CNTL, RESERVED, 0x80000);
667         WREG32_SOC15(GC, 0, mmRLC_LB_CNTL, data);
668         mutex_unlock(&adev->grbm_idx_mutex);
669 }
670
671 static void gfx_v9_0_enable_lbpw(struct amdgpu_device *adev, bool enable)
672 {
673         WREG32_FIELD15(GC, 0, RLC_LB_CNTL, LOAD_BALANCE_ENABLE, enable ? 1 : 0);
674 }
675
676 static void rv_init_cp_jump_table(struct amdgpu_device *adev)
677 {
678         const __le32 *fw_data;
679         volatile u32 *dst_ptr;
680         int me, i, max_me = 5;
681         u32 bo_offset = 0;
682         u32 table_offset, table_size;
683
684         /* write the cp table buffer */
685         dst_ptr = adev->gfx.rlc.cp_table_ptr;
686         for (me = 0; me < max_me; me++) {
687                 if (me == 0) {
688                         const struct gfx_firmware_header_v1_0 *hdr =
689                                 (const struct gfx_firmware_header_v1_0 *)adev->gfx.ce_fw->data;
690                         fw_data = (const __le32 *)
691                                 (adev->gfx.ce_fw->data +
692                                  le32_to_cpu(hdr->header.ucode_array_offset_bytes));
693                         table_offset = le32_to_cpu(hdr->jt_offset);
694                         table_size = le32_to_cpu(hdr->jt_size);
695                 } else if (me == 1) {
696                         const struct gfx_firmware_header_v1_0 *hdr =
697                                 (const struct gfx_firmware_header_v1_0 *)adev->gfx.pfp_fw->data;
698                         fw_data = (const __le32 *)
699                                 (adev->gfx.pfp_fw->data +
700                                  le32_to_cpu(hdr->header.ucode_array_offset_bytes));
701                         table_offset = le32_to_cpu(hdr->jt_offset);
702                         table_size = le32_to_cpu(hdr->jt_size);
703                 } else if (me == 2) {
704                         const struct gfx_firmware_header_v1_0 *hdr =
705                                 (const struct gfx_firmware_header_v1_0 *)adev->gfx.me_fw->data;
706                         fw_data = (const __le32 *)
707                                 (adev->gfx.me_fw->data +
708                                  le32_to_cpu(hdr->header.ucode_array_offset_bytes));
709                         table_offset = le32_to_cpu(hdr->jt_offset);
710                         table_size = le32_to_cpu(hdr->jt_size);
711                 } else if (me == 3) {
712                         const struct gfx_firmware_header_v1_0 *hdr =
713                                 (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data;
714                         fw_data = (const __le32 *)
715                                 (adev->gfx.mec_fw->data +
716                                  le32_to_cpu(hdr->header.ucode_array_offset_bytes));
717                         table_offset = le32_to_cpu(hdr->jt_offset);
718                         table_size = le32_to_cpu(hdr->jt_size);
719                 } else  if (me == 4) {
720                         const struct gfx_firmware_header_v1_0 *hdr =
721                                 (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec2_fw->data;
722                         fw_data = (const __le32 *)
723                                 (adev->gfx.mec2_fw->data +
724                                  le32_to_cpu(hdr->header.ucode_array_offset_bytes));
725                         table_offset = le32_to_cpu(hdr->jt_offset);
726                         table_size = le32_to_cpu(hdr->jt_size);
727                 }
728
729                 for (i = 0; i < table_size; i ++) {
730                         dst_ptr[bo_offset + i] =
731                                 cpu_to_le32(le32_to_cpu(fw_data[table_offset + i]));
732                 }
733
734                 bo_offset += table_size;
735         }
736 }
737
738 static void gfx_v9_0_rlc_fini(struct amdgpu_device *adev)
739 {
740         /* clear state block */
741         amdgpu_bo_free_kernel(&adev->gfx.rlc.clear_state_obj,
742                         &adev->gfx.rlc.clear_state_gpu_addr,
743                         (void **)&adev->gfx.rlc.cs_ptr);
744
745         /* jump table block */
746         amdgpu_bo_free_kernel(&adev->gfx.rlc.cp_table_obj,
747                         &adev->gfx.rlc.cp_table_gpu_addr,
748                         (void **)&adev->gfx.rlc.cp_table_ptr);
749 }
750
751 static int gfx_v9_0_rlc_init(struct amdgpu_device *adev)
752 {
753         volatile u32 *dst_ptr;
754         u32 dws;
755         const struct cs_section_def *cs_data;
756         int r;
757
758         adev->gfx.rlc.cs_data = gfx9_cs_data;
759
760         cs_data = adev->gfx.rlc.cs_data;
761
762         if (cs_data) {
763                 /* clear state block */
764                 adev->gfx.rlc.clear_state_size = dws = gfx_v9_0_get_csb_size(adev);
765                 r = amdgpu_bo_create_reserved(adev, dws * 4, PAGE_SIZE,
766                                               AMDGPU_GEM_DOMAIN_VRAM,
767                                               &adev->gfx.rlc.clear_state_obj,
768                                               &adev->gfx.rlc.clear_state_gpu_addr,
769                                               (void **)&adev->gfx.rlc.cs_ptr);
770                 if (r) {
771                         dev_err(adev->dev, "(%d) failed to create rlc csb bo\n",
772                                 r);
773                         gfx_v9_0_rlc_fini(adev);
774                         return r;
775                 }
776                 /* set up the cs buffer */
777                 dst_ptr = adev->gfx.rlc.cs_ptr;
778                 gfx_v9_0_get_csb_buffer(adev, dst_ptr);
779                 amdgpu_bo_kunmap(adev->gfx.rlc.clear_state_obj);
780                 amdgpu_bo_unreserve(adev->gfx.rlc.clear_state_obj);
781         }
782
783         if (adev->asic_type == CHIP_RAVEN) {
784                 /* TODO: double check the cp_table_size for RV */
785                 adev->gfx.rlc.cp_table_size = ALIGN(96 * 5 * 4, 2048) + (64 * 1024); /* JT + GDS */
786                 r = amdgpu_bo_create_reserved(adev, adev->gfx.rlc.cp_table_size,
787                                               PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM,
788                                               &adev->gfx.rlc.cp_table_obj,
789                                               &adev->gfx.rlc.cp_table_gpu_addr,
790                                               (void **)&adev->gfx.rlc.cp_table_ptr);
791                 if (r) {
792                         dev_err(adev->dev,
793                                 "(%d) failed to create cp table bo\n", r);
794                         gfx_v9_0_rlc_fini(adev);
795                         return r;
796                 }
797
798                 rv_init_cp_jump_table(adev);
799                 amdgpu_bo_kunmap(adev->gfx.rlc.cp_table_obj);
800                 amdgpu_bo_unreserve(adev->gfx.rlc.cp_table_obj);
801
802                 gfx_v9_0_init_lbpw(adev);
803         }
804
805         return 0;
806 }
807
808 static void gfx_v9_0_mec_fini(struct amdgpu_device *adev)
809 {
810         amdgpu_bo_free_kernel(&adev->gfx.mec.hpd_eop_obj, NULL, NULL);
811         amdgpu_bo_free_kernel(&adev->gfx.mec.mec_fw_obj, NULL, NULL);
812 }
813
814 static int gfx_v9_0_mec_init(struct amdgpu_device *adev)
815 {
816         int r;
817         u32 *hpd;
818         const __le32 *fw_data;
819         unsigned fw_size;
820         u32 *fw;
821         size_t mec_hpd_size;
822
823         const struct gfx_firmware_header_v1_0 *mec_hdr;
824
825         bitmap_zero(adev->gfx.mec.queue_bitmap, AMDGPU_MAX_COMPUTE_QUEUES);
826
827         /* take ownership of the relevant compute queues */
828         amdgpu_gfx_compute_queue_acquire(adev);
829         mec_hpd_size = adev->gfx.num_compute_rings * GFX9_MEC_HPD_SIZE;
830
831         r = amdgpu_bo_create_reserved(adev, mec_hpd_size, PAGE_SIZE,
832                                       AMDGPU_GEM_DOMAIN_GTT,
833                                       &adev->gfx.mec.hpd_eop_obj,
834                                       &adev->gfx.mec.hpd_eop_gpu_addr,
835                                       (void **)&hpd);
836         if (r) {
837                 dev_warn(adev->dev, "(%d) create HDP EOP bo failed\n", r);
838                 gfx_v9_0_mec_fini(adev);
839                 return r;
840         }
841
842         memset(hpd, 0, adev->gfx.mec.hpd_eop_obj->tbo.mem.size);
843
844         amdgpu_bo_kunmap(adev->gfx.mec.hpd_eop_obj);
845         amdgpu_bo_unreserve(adev->gfx.mec.hpd_eop_obj);
846
847         mec_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data;
848
849         fw_data = (const __le32 *)
850                 (adev->gfx.mec_fw->data +
851                  le32_to_cpu(mec_hdr->header.ucode_array_offset_bytes));
852         fw_size = le32_to_cpu(mec_hdr->header.ucode_size_bytes) / 4;
853
854         r = amdgpu_bo_create_reserved(adev, mec_hdr->header.ucode_size_bytes,
855                                       PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT,
856                                       &adev->gfx.mec.mec_fw_obj,
857                                       &adev->gfx.mec.mec_fw_gpu_addr,
858                                       (void **)&fw);
859         if (r) {
860                 dev_warn(adev->dev, "(%d) create mec firmware bo failed\n", r);
861                 gfx_v9_0_mec_fini(adev);
862                 return r;
863         }
864
865         memcpy(fw, fw_data, fw_size);
866
867         amdgpu_bo_kunmap(adev->gfx.mec.mec_fw_obj);
868         amdgpu_bo_unreserve(adev->gfx.mec.mec_fw_obj);
869
870         return 0;
871 }
872
873 static uint32_t wave_read_ind(struct amdgpu_device *adev, uint32_t simd, uint32_t wave, uint32_t address)
874 {
875         WREG32_SOC15(GC, 0, mmSQ_IND_INDEX,
876                 (wave << SQ_IND_INDEX__WAVE_ID__SHIFT) |
877                 (simd << SQ_IND_INDEX__SIMD_ID__SHIFT) |
878                 (address << SQ_IND_INDEX__INDEX__SHIFT) |
879                 (SQ_IND_INDEX__FORCE_READ_MASK));
880         return RREG32_SOC15(GC, 0, mmSQ_IND_DATA);
881 }
882
883 static void wave_read_regs(struct amdgpu_device *adev, uint32_t simd,
884                            uint32_t wave, uint32_t thread,
885                            uint32_t regno, uint32_t num, uint32_t *out)
886 {
887         WREG32_SOC15(GC, 0, mmSQ_IND_INDEX,
888                 (wave << SQ_IND_INDEX__WAVE_ID__SHIFT) |
889                 (simd << SQ_IND_INDEX__SIMD_ID__SHIFT) |
890                 (regno << SQ_IND_INDEX__INDEX__SHIFT) |
891                 (thread << SQ_IND_INDEX__THREAD_ID__SHIFT) |
892                 (SQ_IND_INDEX__FORCE_READ_MASK) |
893                 (SQ_IND_INDEX__AUTO_INCR_MASK));
894         while (num--)
895                 *(out++) = RREG32_SOC15(GC, 0, mmSQ_IND_DATA);
896 }
897
898 static void gfx_v9_0_read_wave_data(struct amdgpu_device *adev, uint32_t simd, uint32_t wave, uint32_t *dst, int *no_fields)
899 {
900         /* type 1 wave data */
901         dst[(*no_fields)++] = 1;
902         dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_STATUS);
903         dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_PC_LO);
904         dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_PC_HI);
905         dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_EXEC_LO);
906         dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_EXEC_HI);
907         dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_HW_ID);
908         dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_INST_DW0);
909         dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_INST_DW1);
910         dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_GPR_ALLOC);
911         dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_LDS_ALLOC);
912         dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_TRAPSTS);
913         dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_IB_STS);
914         dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_IB_DBG0);
915         dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_M0);
916 }
917
918 static void gfx_v9_0_read_wave_sgprs(struct amdgpu_device *adev, uint32_t simd,
919                                      uint32_t wave, uint32_t start,
920                                      uint32_t size, uint32_t *dst)
921 {
922         wave_read_regs(
923                 adev, simd, wave, 0,
924                 start + SQIND_WAVE_SGPRS_OFFSET, size, dst);
925 }
926
927
928 static const struct amdgpu_gfx_funcs gfx_v9_0_gfx_funcs = {
929         .get_gpu_clock_counter = &gfx_v9_0_get_gpu_clock_counter,
930         .select_se_sh = &gfx_v9_0_select_se_sh,
931         .read_wave_data = &gfx_v9_0_read_wave_data,
932         .read_wave_sgprs = &gfx_v9_0_read_wave_sgprs,
933 };
934
935 static void gfx_v9_0_gpu_early_init(struct amdgpu_device *adev)
936 {
937         u32 gb_addr_config;
938
939         adev->gfx.funcs = &gfx_v9_0_gfx_funcs;
940
941         switch (adev->asic_type) {
942         case CHIP_VEGA10:
943                 adev->gfx.config.max_hw_contexts = 8;
944                 adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
945                 adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
946                 adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
947                 adev->gfx.config.sc_earlyz_tile_fifo_size = 0x4C0;
948                 gb_addr_config = VEGA10_GB_ADDR_CONFIG_GOLDEN;
949                 break;
950         case CHIP_RAVEN:
951                 adev->gfx.config.max_hw_contexts = 8;
952                 adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
953                 adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
954                 adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
955                 adev->gfx.config.sc_earlyz_tile_fifo_size = 0x4C0;
956                 gb_addr_config = RAVEN_GB_ADDR_CONFIG_GOLDEN;
957                 break;
958         default:
959                 BUG();
960                 break;
961         }
962
963         adev->gfx.config.gb_addr_config = gb_addr_config;
964
965         adev->gfx.config.gb_addr_config_fields.num_pipes = 1 <<
966                         REG_GET_FIELD(
967                                         adev->gfx.config.gb_addr_config,
968                                         GB_ADDR_CONFIG,
969                                         NUM_PIPES);
970
971         adev->gfx.config.max_tile_pipes =
972                 adev->gfx.config.gb_addr_config_fields.num_pipes;
973
974         adev->gfx.config.gb_addr_config_fields.num_banks = 1 <<
975                         REG_GET_FIELD(
976                                         adev->gfx.config.gb_addr_config,
977                                         GB_ADDR_CONFIG,
978                                         NUM_BANKS);
979         adev->gfx.config.gb_addr_config_fields.max_compress_frags = 1 <<
980                         REG_GET_FIELD(
981                                         adev->gfx.config.gb_addr_config,
982                                         GB_ADDR_CONFIG,
983                                         MAX_COMPRESSED_FRAGS);
984         adev->gfx.config.gb_addr_config_fields.num_rb_per_se = 1 <<
985                         REG_GET_FIELD(
986                                         adev->gfx.config.gb_addr_config,
987                                         GB_ADDR_CONFIG,
988                                         NUM_RB_PER_SE);
989         adev->gfx.config.gb_addr_config_fields.num_se = 1 <<
990                         REG_GET_FIELD(
991                                         adev->gfx.config.gb_addr_config,
992                                         GB_ADDR_CONFIG,
993                                         NUM_SHADER_ENGINES);
994         adev->gfx.config.gb_addr_config_fields.pipe_interleave_size = 1 << (8 +
995                         REG_GET_FIELD(
996                                         adev->gfx.config.gb_addr_config,
997                                         GB_ADDR_CONFIG,
998                                         PIPE_INTERLEAVE_SIZE));
999 }
1000
1001 static int gfx_v9_0_ngg_create_buf(struct amdgpu_device *adev,
1002                                    struct amdgpu_ngg_buf *ngg_buf,
1003                                    int size_se,
1004                                    int default_size_se)
1005 {
1006         int r;
1007
1008         if (size_se < 0) {
1009                 dev_err(adev->dev, "Buffer size is invalid: %d\n", size_se);
1010                 return -EINVAL;
1011         }
1012         size_se = size_se ? size_se : default_size_se;
1013
1014         ngg_buf->size = size_se * adev->gfx.config.max_shader_engines;
1015         r = amdgpu_bo_create_kernel(adev, ngg_buf->size,
1016                                     PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM,
1017                                     &ngg_buf->bo,
1018                                     &ngg_buf->gpu_addr,
1019                                     NULL);
1020         if (r) {
1021                 dev_err(adev->dev, "(%d) failed to create NGG buffer\n", r);
1022                 return r;
1023         }
1024         ngg_buf->bo_size = amdgpu_bo_size(ngg_buf->bo);
1025
1026         return r;
1027 }
1028
1029 static int gfx_v9_0_ngg_fini(struct amdgpu_device *adev)
1030 {
1031         int i;
1032
1033         for (i = 0; i < NGG_BUF_MAX; i++)
1034                 amdgpu_bo_free_kernel(&adev->gfx.ngg.buf[i].bo,
1035                                       &adev->gfx.ngg.buf[i].gpu_addr,
1036                                       NULL);
1037
1038         memset(&adev->gfx.ngg.buf[0], 0,
1039                         sizeof(struct amdgpu_ngg_buf) * NGG_BUF_MAX);
1040
1041         adev->gfx.ngg.init = false;
1042
1043         return 0;
1044 }
1045
1046 static int gfx_v9_0_ngg_init(struct amdgpu_device *adev)
1047 {
1048         int r;
1049
1050         if (!amdgpu_ngg || adev->gfx.ngg.init == true)
1051                 return 0;
1052
1053         /* GDS reserve memory: 64 bytes alignment */
1054         adev->gfx.ngg.gds_reserve_size = ALIGN(5 * 4, 0x40);
1055         adev->gds.mem.total_size -= adev->gfx.ngg.gds_reserve_size;
1056         adev->gds.mem.gfx_partition_size -= adev->gfx.ngg.gds_reserve_size;
1057         adev->gfx.ngg.gds_reserve_addr = amdgpu_gds_reg_offset[0].mem_base;
1058         adev->gfx.ngg.gds_reserve_addr += adev->gds.mem.gfx_partition_size;
1059
1060         /* Primitive Buffer */
1061         r = gfx_v9_0_ngg_create_buf(adev, &adev->gfx.ngg.buf[NGG_PRIM],
1062                                     amdgpu_prim_buf_per_se,
1063                                     64 * 1024);
1064         if (r) {
1065                 dev_err(adev->dev, "Failed to create Primitive Buffer\n");
1066                 goto err;
1067         }
1068
1069         /* Position Buffer */
1070         r = gfx_v9_0_ngg_create_buf(adev, &adev->gfx.ngg.buf[NGG_POS],
1071                                     amdgpu_pos_buf_per_se,
1072                                     256 * 1024);
1073         if (r) {
1074                 dev_err(adev->dev, "Failed to create Position Buffer\n");
1075                 goto err;
1076         }
1077
1078         /* Control Sideband */
1079         r = gfx_v9_0_ngg_create_buf(adev, &adev->gfx.ngg.buf[NGG_CNTL],
1080                                     amdgpu_cntl_sb_buf_per_se,
1081                                     256);
1082         if (r) {
1083                 dev_err(adev->dev, "Failed to create Control Sideband Buffer\n");
1084                 goto err;
1085         }
1086
1087         /* Parameter Cache, not created by default */
1088         if (amdgpu_param_buf_per_se <= 0)
1089                 goto out;
1090
1091         r = gfx_v9_0_ngg_create_buf(adev, &adev->gfx.ngg.buf[NGG_PARAM],
1092                                     amdgpu_param_buf_per_se,
1093                                     512 * 1024);
1094         if (r) {
1095                 dev_err(adev->dev, "Failed to create Parameter Cache\n");
1096                 goto err;
1097         }
1098
1099 out:
1100         adev->gfx.ngg.init = true;
1101         return 0;
1102 err:
1103         gfx_v9_0_ngg_fini(adev);
1104         return r;
1105 }
1106
1107 static int gfx_v9_0_ngg_en(struct amdgpu_device *adev)
1108 {
1109         struct amdgpu_ring *ring = &adev->gfx.gfx_ring[0];
1110         int r;
1111         u32 data;
1112         u32 size;
1113         u32 base;
1114
1115         if (!amdgpu_ngg)
1116                 return 0;
1117
1118         /* Program buffer size */
1119         data = 0;
1120         size = adev->gfx.ngg.buf[NGG_PRIM].size / 256;
1121         data = REG_SET_FIELD(data, WD_BUF_RESOURCE_1, INDEX_BUF_SIZE, size);
1122
1123         size = adev->gfx.ngg.buf[NGG_POS].size / 256;
1124         data = REG_SET_FIELD(data, WD_BUF_RESOURCE_1, POS_BUF_SIZE, size);
1125
1126         WREG32_SOC15(GC, 0, mmWD_BUF_RESOURCE_1, data);
1127
1128         data = 0;
1129         size = adev->gfx.ngg.buf[NGG_CNTL].size / 256;
1130         data = REG_SET_FIELD(data, WD_BUF_RESOURCE_2, CNTL_SB_BUF_SIZE, size);
1131
1132         size = adev->gfx.ngg.buf[NGG_PARAM].size / 1024;
1133         data = REG_SET_FIELD(data, WD_BUF_RESOURCE_2, PARAM_BUF_SIZE, size);
1134
1135         WREG32_SOC15(GC, 0, mmWD_BUF_RESOURCE_2, data);
1136
1137         /* Program buffer base address */
1138         base = lower_32_bits(adev->gfx.ngg.buf[NGG_PRIM].gpu_addr);
1139         data = REG_SET_FIELD(0, WD_INDEX_BUF_BASE, BASE, base);
1140         WREG32_SOC15(GC, 0, mmWD_INDEX_BUF_BASE, data);
1141
1142         base = upper_32_bits(adev->gfx.ngg.buf[NGG_PRIM].gpu_addr);
1143         data = REG_SET_FIELD(0, WD_INDEX_BUF_BASE_HI, BASE_HI, base);
1144         WREG32_SOC15(GC, 0, mmWD_INDEX_BUF_BASE_HI, data);
1145
1146         base = lower_32_bits(adev->gfx.ngg.buf[NGG_POS].gpu_addr);
1147         data = REG_SET_FIELD(0, WD_POS_BUF_BASE, BASE, base);
1148         WREG32_SOC15(GC, 0, mmWD_POS_BUF_BASE, data);
1149
1150         base = upper_32_bits(adev->gfx.ngg.buf[NGG_POS].gpu_addr);
1151         data = REG_SET_FIELD(0, WD_POS_BUF_BASE_HI, BASE_HI, base);
1152         WREG32_SOC15(GC, 0, mmWD_POS_BUF_BASE_HI, data);
1153
1154         base = lower_32_bits(adev->gfx.ngg.buf[NGG_CNTL].gpu_addr);
1155         data = REG_SET_FIELD(0, WD_CNTL_SB_BUF_BASE, BASE, base);
1156         WREG32_SOC15(GC, 0, mmWD_CNTL_SB_BUF_BASE, data);
1157
1158         base = upper_32_bits(adev->gfx.ngg.buf[NGG_CNTL].gpu_addr);
1159         data = REG_SET_FIELD(0, WD_CNTL_SB_BUF_BASE_HI, BASE_HI, base);
1160         WREG32_SOC15(GC, 0, mmWD_CNTL_SB_BUF_BASE_HI, data);
1161
1162         /* Clear GDS reserved memory */
1163         r = amdgpu_ring_alloc(ring, 17);
1164         if (r) {
1165                 DRM_ERROR("amdgpu: NGG failed to lock ring %d (%d).\n",
1166                           ring->idx, r);
1167                 return r;
1168         }
1169
1170         gfx_v9_0_write_data_to_reg(ring, 0, false,
1171                                    amdgpu_gds_reg_offset[0].mem_size,
1172                                    (adev->gds.mem.total_size +
1173                                     adev->gfx.ngg.gds_reserve_size) >>
1174                                    AMDGPU_GDS_SHIFT);
1175
1176         amdgpu_ring_write(ring, PACKET3(PACKET3_DMA_DATA, 5));
1177         amdgpu_ring_write(ring, (PACKET3_DMA_DATA_CP_SYNC |
1178                                 PACKET3_DMA_DATA_SRC_SEL(2)));
1179         amdgpu_ring_write(ring, 0);
1180         amdgpu_ring_write(ring, 0);
1181         amdgpu_ring_write(ring, adev->gfx.ngg.gds_reserve_addr);
1182         amdgpu_ring_write(ring, 0);
1183         amdgpu_ring_write(ring, adev->gfx.ngg.gds_reserve_size);
1184
1185
1186         gfx_v9_0_write_data_to_reg(ring, 0, false,
1187                                    amdgpu_gds_reg_offset[0].mem_size, 0);
1188
1189         amdgpu_ring_commit(ring);
1190
1191         return 0;
1192 }
1193
1194 static int gfx_v9_0_compute_ring_init(struct amdgpu_device *adev, int ring_id,
1195                                       int mec, int pipe, int queue)
1196 {
1197         int r;
1198         unsigned irq_type;
1199         struct amdgpu_ring *ring = &adev->gfx.compute_ring[ring_id];
1200
1201         ring = &adev->gfx.compute_ring[ring_id];
1202
1203         /* mec0 is me1 */
1204         ring->me = mec + 1;
1205         ring->pipe = pipe;
1206         ring->queue = queue;
1207
1208         ring->ring_obj = NULL;
1209         ring->use_doorbell = true;
1210         ring->doorbell_index = (AMDGPU_DOORBELL_MEC_RING0 + ring_id) << 1;
1211         ring->eop_gpu_addr = adev->gfx.mec.hpd_eop_gpu_addr
1212                                 + (ring_id * GFX9_MEC_HPD_SIZE);
1213         sprintf(ring->name, "comp_%d.%d.%d", ring->me, ring->pipe, ring->queue);
1214
1215         irq_type = AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP
1216                 + ((ring->me - 1) * adev->gfx.mec.num_pipe_per_mec)
1217                 + ring->pipe;
1218
1219         /* type-2 packets are deprecated on MEC, use type-3 instead */
1220         r = amdgpu_ring_init(adev, ring, 1024,
1221                              &adev->gfx.eop_irq, irq_type);
1222         if (r)
1223                 return r;
1224
1225
1226         return 0;
1227 }
1228
1229 static int gfx_v9_0_sw_init(void *handle)
1230 {
1231         int i, j, k, r, ring_id;
1232         struct amdgpu_ring *ring;
1233         struct amdgpu_kiq *kiq;
1234         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1235
1236         switch (adev->asic_type) {
1237         case CHIP_VEGA10:
1238         case CHIP_RAVEN:
1239                 adev->gfx.mec.num_mec = 2;
1240                 break;
1241         default:
1242                 adev->gfx.mec.num_mec = 1;
1243                 break;
1244         }
1245
1246         adev->gfx.mec.num_pipe_per_mec = 4;
1247         adev->gfx.mec.num_queue_per_pipe = 8;
1248
1249         /* KIQ event */
1250         r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_GRBM_CP, 178, &adev->gfx.kiq.irq);
1251         if (r)
1252                 return r;
1253
1254         /* EOP Event */
1255         r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_GRBM_CP, 181, &adev->gfx.eop_irq);
1256         if (r)
1257                 return r;
1258
1259         /* Privileged reg */
1260         r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_GRBM_CP, 184,
1261                               &adev->gfx.priv_reg_irq);
1262         if (r)
1263                 return r;
1264
1265         /* Privileged inst */
1266         r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_GRBM_CP, 185,
1267                               &adev->gfx.priv_inst_irq);
1268         if (r)
1269                 return r;
1270
1271         adev->gfx.gfx_current_status = AMDGPU_GFX_NORMAL_MODE;
1272
1273         gfx_v9_0_scratch_init(adev);
1274
1275         r = gfx_v9_0_init_microcode(adev);
1276         if (r) {
1277                 DRM_ERROR("Failed to load gfx firmware!\n");
1278                 return r;
1279         }
1280
1281         r = gfx_v9_0_rlc_init(adev);
1282         if (r) {
1283                 DRM_ERROR("Failed to init rlc BOs!\n");
1284                 return r;
1285         }
1286
1287         r = gfx_v9_0_mec_init(adev);
1288         if (r) {
1289                 DRM_ERROR("Failed to init MEC BOs!\n");
1290                 return r;
1291         }
1292
1293         /* set up the gfx ring */
1294         for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
1295                 ring = &adev->gfx.gfx_ring[i];
1296                 ring->ring_obj = NULL;
1297                 sprintf(ring->name, "gfx");
1298                 ring->use_doorbell = true;
1299                 ring->doorbell_index = AMDGPU_DOORBELL64_GFX_RING0 << 1;
1300                 r = amdgpu_ring_init(adev, ring, 1024,
1301                                      &adev->gfx.eop_irq, AMDGPU_CP_IRQ_GFX_EOP);
1302                 if (r)
1303                         return r;
1304         }
1305
1306         /* set up the compute queues - allocate horizontally across pipes */
1307         ring_id = 0;
1308         for (i = 0; i < adev->gfx.mec.num_mec; ++i) {
1309                 for (j = 0; j < adev->gfx.mec.num_queue_per_pipe; j++) {
1310                         for (k = 0; k < adev->gfx.mec.num_pipe_per_mec; k++) {
1311                                 if (!amdgpu_gfx_is_mec_queue_enabled(adev, i, k, j))
1312                                         continue;
1313
1314                                 r = gfx_v9_0_compute_ring_init(adev,
1315                                                                ring_id,
1316                                                                i, k, j);
1317                                 if (r)
1318                                         return r;
1319
1320                                 ring_id++;
1321                         }
1322                 }
1323         }
1324
1325         r = amdgpu_gfx_kiq_init(adev, GFX9_MEC_HPD_SIZE);
1326         if (r) {
1327                 DRM_ERROR("Failed to init KIQ BOs!\n");
1328                 return r;
1329         }
1330
1331         kiq = &adev->gfx.kiq;
1332         r = amdgpu_gfx_kiq_init_ring(adev, &kiq->ring, &kiq->irq);
1333         if (r)
1334                 return r;
1335
1336         /* create MQD for all compute queues as wel as KIQ for SRIOV case */
1337         r = amdgpu_gfx_compute_mqd_sw_init(adev, sizeof(struct v9_mqd));
1338         if (r)
1339                 return r;
1340
1341         /* reserve GDS, GWS and OA resource for gfx */
1342         r = amdgpu_bo_create_kernel(adev, adev->gds.mem.gfx_partition_size,
1343                                     PAGE_SIZE, AMDGPU_GEM_DOMAIN_GDS,
1344                                     &adev->gds.gds_gfx_bo, NULL, NULL);
1345         if (r)
1346                 return r;
1347
1348         r = amdgpu_bo_create_kernel(adev, adev->gds.gws.gfx_partition_size,
1349                                     PAGE_SIZE, AMDGPU_GEM_DOMAIN_GWS,
1350                                     &adev->gds.gws_gfx_bo, NULL, NULL);
1351         if (r)
1352                 return r;
1353
1354         r = amdgpu_bo_create_kernel(adev, adev->gds.oa.gfx_partition_size,
1355                                     PAGE_SIZE, AMDGPU_GEM_DOMAIN_OA,
1356                                     &adev->gds.oa_gfx_bo, NULL, NULL);
1357         if (r)
1358                 return r;
1359
1360         adev->gfx.ce_ram_size = 0x8000;
1361
1362         gfx_v9_0_gpu_early_init(adev);
1363
1364         r = gfx_v9_0_ngg_init(adev);
1365         if (r)
1366                 return r;
1367
1368         return 0;
1369 }
1370
1371
1372 static int gfx_v9_0_sw_fini(void *handle)
1373 {
1374         int i;
1375         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1376
1377         amdgpu_bo_free_kernel(&adev->gds.oa_gfx_bo, NULL, NULL);
1378         amdgpu_bo_free_kernel(&adev->gds.gws_gfx_bo, NULL, NULL);
1379         amdgpu_bo_free_kernel(&adev->gds.gds_gfx_bo, NULL, NULL);
1380
1381         for (i = 0; i < adev->gfx.num_gfx_rings; i++)
1382                 amdgpu_ring_fini(&adev->gfx.gfx_ring[i]);
1383         for (i = 0; i < adev->gfx.num_compute_rings; i++)
1384                 amdgpu_ring_fini(&adev->gfx.compute_ring[i]);
1385
1386         amdgpu_gfx_compute_mqd_sw_fini(adev);
1387         amdgpu_gfx_kiq_free_ring(&adev->gfx.kiq.ring, &adev->gfx.kiq.irq);
1388         amdgpu_gfx_kiq_fini(adev);
1389
1390         gfx_v9_0_mec_fini(adev);
1391         gfx_v9_0_ngg_fini(adev);
1392
1393         return 0;
1394 }
1395
1396
1397 static void gfx_v9_0_tiling_mode_table_init(struct amdgpu_device *adev)
1398 {
1399         /* TODO */
1400 }
1401
1402 static void gfx_v9_0_select_se_sh(struct amdgpu_device *adev, u32 se_num, u32 sh_num, u32 instance)
1403 {
1404         u32 data;
1405
1406         if (instance == 0xffffffff)
1407                 data = REG_SET_FIELD(0, GRBM_GFX_INDEX, INSTANCE_BROADCAST_WRITES, 1);
1408         else
1409                 data = REG_SET_FIELD(0, GRBM_GFX_INDEX, INSTANCE_INDEX, instance);
1410
1411         if (se_num == 0xffffffff)
1412                 data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SE_BROADCAST_WRITES, 1);
1413         else
1414                 data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SE_INDEX, se_num);
1415
1416         if (sh_num == 0xffffffff)
1417                 data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SH_BROADCAST_WRITES, 1);
1418         else
1419                 data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SH_INDEX, sh_num);
1420
1421         WREG32_SOC15(GC, 0, mmGRBM_GFX_INDEX, data);
1422 }
1423
1424 static u32 gfx_v9_0_get_rb_active_bitmap(struct amdgpu_device *adev)
1425 {
1426         u32 data, mask;
1427
1428         data = RREG32_SOC15(GC, 0, mmCC_RB_BACKEND_DISABLE);
1429         data |= RREG32_SOC15(GC, 0, mmGC_USER_RB_BACKEND_DISABLE);
1430
1431         data &= CC_RB_BACKEND_DISABLE__BACKEND_DISABLE_MASK;
1432         data >>= GC_USER_RB_BACKEND_DISABLE__BACKEND_DISABLE__SHIFT;
1433
1434         mask = amdgpu_gfx_create_bitmask(adev->gfx.config.max_backends_per_se /
1435                                          adev->gfx.config.max_sh_per_se);
1436
1437         return (~data) & mask;
1438 }
1439
1440 static void gfx_v9_0_setup_rb(struct amdgpu_device *adev)
1441 {
1442         int i, j;
1443         u32 data;
1444         u32 active_rbs = 0;
1445         u32 rb_bitmap_width_per_sh = adev->gfx.config.max_backends_per_se /
1446                                         adev->gfx.config.max_sh_per_se;
1447
1448         mutex_lock(&adev->grbm_idx_mutex);
1449         for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
1450                 for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
1451                         gfx_v9_0_select_se_sh(adev, i, j, 0xffffffff);
1452                         data = gfx_v9_0_get_rb_active_bitmap(adev);
1453                         active_rbs |= data << ((i * adev->gfx.config.max_sh_per_se + j) *
1454                                                rb_bitmap_width_per_sh);
1455                 }
1456         }
1457         gfx_v9_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
1458         mutex_unlock(&adev->grbm_idx_mutex);
1459
1460         adev->gfx.config.backend_enable_mask = active_rbs;
1461         adev->gfx.config.num_rbs = hweight32(active_rbs);
1462 }
1463
1464 #define DEFAULT_SH_MEM_BASES    (0x6000)
1465 #define FIRST_COMPUTE_VMID      (8)
1466 #define LAST_COMPUTE_VMID       (16)
1467 static void gfx_v9_0_init_compute_vmid(struct amdgpu_device *adev)
1468 {
1469         int i;
1470         uint32_t sh_mem_config;
1471         uint32_t sh_mem_bases;
1472
1473         /*
1474          * Configure apertures:
1475          * LDS:         0x60000000'00000000 - 0x60000001'00000000 (4GB)
1476          * Scratch:     0x60000001'00000000 - 0x60000002'00000000 (4GB)
1477          * GPUVM:       0x60010000'00000000 - 0x60020000'00000000 (1TB)
1478          */
1479         sh_mem_bases = DEFAULT_SH_MEM_BASES | (DEFAULT_SH_MEM_BASES << 16);
1480
1481         sh_mem_config = SH_MEM_ADDRESS_MODE_64 |
1482                         SH_MEM_ALIGNMENT_MODE_UNALIGNED <<
1483                         SH_MEM_CONFIG__ALIGNMENT_MODE__SHIFT;
1484
1485         mutex_lock(&adev->srbm_mutex);
1486         for (i = FIRST_COMPUTE_VMID; i < LAST_COMPUTE_VMID; i++) {
1487                 soc15_grbm_select(adev, 0, 0, 0, i);
1488                 /* CP and shaders */
1489                 WREG32_SOC15(GC, 0, mmSH_MEM_CONFIG, sh_mem_config);
1490                 WREG32_SOC15(GC, 0, mmSH_MEM_BASES, sh_mem_bases);
1491         }
1492         soc15_grbm_select(adev, 0, 0, 0, 0);
1493         mutex_unlock(&adev->srbm_mutex);
1494 }
1495
1496 static void gfx_v9_0_gpu_init(struct amdgpu_device *adev)
1497 {
1498         u32 tmp;
1499         int i;
1500
1501         WREG32_FIELD15(GC, 0, GRBM_CNTL, READ_TIMEOUT, 0xff);
1502
1503         gfx_v9_0_tiling_mode_table_init(adev);
1504
1505         if (adev->gfx.num_gfx_rings)
1506                 gfx_v9_0_setup_rb(adev);
1507         gfx_v9_0_get_cu_info(adev, &adev->gfx.cu_info);
1508
1509         /* XXX SH_MEM regs */
1510         /* where to put LDS, scratch, GPUVM in FSA64 space */
1511         mutex_lock(&adev->srbm_mutex);
1512         for (i = 0; i < 16; i++) {
1513                 soc15_grbm_select(adev, 0, 0, 0, i);
1514                 /* CP and shaders */
1515                 tmp = 0;
1516                 tmp = REG_SET_FIELD(tmp, SH_MEM_CONFIG, ALIGNMENT_MODE,
1517                                     SH_MEM_ALIGNMENT_MODE_UNALIGNED);
1518                 WREG32_SOC15(GC, 0, mmSH_MEM_CONFIG, tmp);
1519                 WREG32_SOC15(GC, 0, mmSH_MEM_BASES, 0);
1520         }
1521         soc15_grbm_select(adev, 0, 0, 0, 0);
1522
1523         mutex_unlock(&adev->srbm_mutex);
1524
1525         gfx_v9_0_init_compute_vmid(adev);
1526 }
1527
1528 static void gfx_v9_0_wait_for_rlc_serdes(struct amdgpu_device *adev)
1529 {
1530         u32 i, j, k;
1531         u32 mask;
1532
1533         mutex_lock(&adev->grbm_idx_mutex);
1534         for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
1535                 for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
1536                         gfx_v9_0_select_se_sh(adev, i, j, 0xffffffff);
1537                         for (k = 0; k < adev->usec_timeout; k++) {
1538                                 if (RREG32_SOC15(GC, 0, mmRLC_SERDES_CU_MASTER_BUSY) == 0)
1539                                         break;
1540                                 udelay(1);
1541                         }
1542                 }
1543         }
1544         gfx_v9_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
1545         mutex_unlock(&adev->grbm_idx_mutex);
1546
1547         mask = RLC_SERDES_NONCU_MASTER_BUSY__SE_MASTER_BUSY_MASK |
1548                 RLC_SERDES_NONCU_MASTER_BUSY__GC_MASTER_BUSY_MASK |
1549                 RLC_SERDES_NONCU_MASTER_BUSY__TC0_MASTER_BUSY_MASK |
1550                 RLC_SERDES_NONCU_MASTER_BUSY__TC1_MASTER_BUSY_MASK;
1551         for (k = 0; k < adev->usec_timeout; k++) {
1552                 if ((RREG32_SOC15(GC, 0, mmRLC_SERDES_NONCU_MASTER_BUSY) & mask) == 0)
1553                         break;
1554                 udelay(1);
1555         }
1556 }
1557
1558 static void gfx_v9_0_enable_gui_idle_interrupt(struct amdgpu_device *adev,
1559                                                bool enable)
1560 {
1561         u32 tmp = RREG32_SOC15(GC, 0, mmCP_INT_CNTL_RING0);
1562
1563         tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CNTX_BUSY_INT_ENABLE, enable ? 1 : 0);
1564         tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CNTX_EMPTY_INT_ENABLE, enable ? 1 : 0);
1565         tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CMP_BUSY_INT_ENABLE, enable ? 1 : 0);
1566         tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, GFX_IDLE_INT_ENABLE, enable ? 1 : 0);
1567
1568         WREG32_SOC15(GC, 0, mmCP_INT_CNTL_RING0, tmp);
1569 }
1570
1571 static void gfx_v9_0_init_csb(struct amdgpu_device *adev)
1572 {
1573         /* csib */
1574         WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_CSIB_ADDR_HI),
1575                         adev->gfx.rlc.clear_state_gpu_addr >> 32);
1576         WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_CSIB_ADDR_LO),
1577                         adev->gfx.rlc.clear_state_gpu_addr & 0xfffffffc);
1578         WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_CSIB_LENGTH),
1579                         adev->gfx.rlc.clear_state_size);
1580 }
1581
1582 static void gfx_v9_0_parse_ind_reg_list(int *register_list_format,
1583                                 int indirect_offset,
1584                                 int list_size,
1585                                 int *unique_indirect_regs,
1586                                 int *unique_indirect_reg_count,
1587                                 int max_indirect_reg_count,
1588                                 int *indirect_start_offsets,
1589                                 int *indirect_start_offsets_count,
1590                                 int max_indirect_start_offsets_count)
1591 {
1592         int idx;
1593         bool new_entry = true;
1594
1595         for (; indirect_offset < list_size; indirect_offset++) {
1596
1597                 if (new_entry) {
1598                         new_entry = false;
1599                         indirect_start_offsets[*indirect_start_offsets_count] = indirect_offset;
1600                         *indirect_start_offsets_count = *indirect_start_offsets_count + 1;
1601                         BUG_ON(*indirect_start_offsets_count >= max_indirect_start_offsets_count);
1602                 }
1603
1604                 if (register_list_format[indirect_offset] == 0xFFFFFFFF) {
1605                         new_entry = true;
1606                         continue;
1607                 }
1608
1609                 indirect_offset += 2;
1610
1611                 /* look for the matching indice */
1612                 for (idx = 0; idx < *unique_indirect_reg_count; idx++) {
1613                         if (unique_indirect_regs[idx] ==
1614                                 register_list_format[indirect_offset])
1615                                 break;
1616                 }
1617
1618                 if (idx >= *unique_indirect_reg_count) {
1619                         unique_indirect_regs[*unique_indirect_reg_count] =
1620                                 register_list_format[indirect_offset];
1621                         idx = *unique_indirect_reg_count;
1622                         *unique_indirect_reg_count = *unique_indirect_reg_count + 1;
1623                         BUG_ON(*unique_indirect_reg_count >= max_indirect_reg_count);
1624                 }
1625
1626                 register_list_format[indirect_offset] = idx;
1627         }
1628 }
1629
1630 static int gfx_v9_0_init_rlc_save_restore_list(struct amdgpu_device *adev)
1631 {
1632         int unique_indirect_regs[] = {0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0};
1633         int unique_indirect_reg_count = 0;
1634
1635         int indirect_start_offsets[] = {0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0};
1636         int indirect_start_offsets_count = 0;
1637
1638         int list_size = 0;
1639         int i = 0;
1640         u32 tmp = 0;
1641
1642         u32 *register_list_format =
1643                 kmalloc(adev->gfx.rlc.reg_list_format_size_bytes, GFP_KERNEL);
1644         if (!register_list_format)
1645                 return -ENOMEM;
1646         memcpy(register_list_format, adev->gfx.rlc.register_list_format,
1647                 adev->gfx.rlc.reg_list_format_size_bytes);
1648
1649         /* setup unique_indirect_regs array and indirect_start_offsets array */
1650         gfx_v9_0_parse_ind_reg_list(register_list_format,
1651                                 GFX9_RLC_FORMAT_DIRECT_REG_LIST_LENGTH,
1652                                 adev->gfx.rlc.reg_list_format_size_bytes >> 2,
1653                                 unique_indirect_regs,
1654                                 &unique_indirect_reg_count,
1655                                 sizeof(unique_indirect_regs)/sizeof(int),
1656                                 indirect_start_offsets,
1657                                 &indirect_start_offsets_count,
1658                                 sizeof(indirect_start_offsets)/sizeof(int));
1659
1660         /* enable auto inc in case it is disabled */
1661         tmp = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_SRM_CNTL));
1662         tmp |= RLC_SRM_CNTL__AUTO_INCR_ADDR_MASK;
1663         WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_SRM_CNTL), tmp);
1664
1665         /* write register_restore table to offset 0x0 using RLC_SRM_ARAM_ADDR/DATA */
1666         WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_SRM_ARAM_ADDR),
1667                 RLC_SAVE_RESTORE_ADDR_STARTING_OFFSET);
1668         for (i = 0; i < adev->gfx.rlc.reg_list_size_bytes >> 2; i++)
1669                 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_SRM_ARAM_DATA),
1670                         adev->gfx.rlc.register_restore[i]);
1671
1672         /* load direct register */
1673         WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_SRM_ARAM_ADDR), 0);
1674         for (i = 0; i < adev->gfx.rlc.reg_list_size_bytes >> 2; i++)
1675                 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_SRM_ARAM_DATA),
1676                         adev->gfx.rlc.register_restore[i]);
1677
1678         /* load indirect register */
1679         WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_GPM_SCRATCH_ADDR),
1680                 adev->gfx.rlc.reg_list_format_start);
1681         for (i = 0; i < adev->gfx.rlc.reg_list_format_size_bytes >> 2; i++)
1682                 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_GPM_SCRATCH_DATA),
1683                         register_list_format[i]);
1684
1685         /* set save/restore list size */
1686         list_size = adev->gfx.rlc.reg_list_size_bytes >> 2;
1687         list_size = list_size >> 1;
1688         WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_GPM_SCRATCH_ADDR),
1689                 adev->gfx.rlc.reg_restore_list_size);
1690         WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_GPM_SCRATCH_DATA), list_size);
1691
1692         /* write the starting offsets to RLC scratch ram */
1693         WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_GPM_SCRATCH_ADDR),
1694                 adev->gfx.rlc.starting_offsets_start);
1695         for (i = 0; i < sizeof(indirect_start_offsets)/sizeof(int); i++)
1696                 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_GPM_SCRATCH_DATA),
1697                         indirect_start_offsets[i]);
1698
1699         /* load unique indirect regs*/
1700         for (i = 0; i < sizeof(unique_indirect_regs)/sizeof(int); i++) {
1701                 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_SRM_INDEX_CNTL_ADDR_0) + i,
1702                         unique_indirect_regs[i] & 0x3FFFF);
1703                 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_SRM_INDEX_CNTL_DATA_0) + i,
1704                         unique_indirect_regs[i] >> 20);
1705         }
1706
1707         kfree(register_list_format);
1708         return 0;
1709 }
1710
1711 static void gfx_v9_0_enable_save_restore_machine(struct amdgpu_device *adev)
1712 {
1713         u32 tmp = 0;
1714
1715         tmp = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_SRM_CNTL));
1716         tmp |= RLC_SRM_CNTL__SRM_ENABLE_MASK;
1717         WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_SRM_CNTL), tmp);
1718 }
1719
1720 static void pwr_10_0_gfxip_control_over_cgpg(struct amdgpu_device *adev,
1721                                              bool enable)
1722 {
1723         uint32_t data = 0;
1724         uint32_t default_data = 0;
1725
1726         default_data = data = RREG32(SOC15_REG_OFFSET(PWR, 0, mmPWR_MISC_CNTL_STATUS));
1727         if (enable == true) {
1728                 /* enable GFXIP control over CGPG */
1729                 data |= PWR_MISC_CNTL_STATUS__PWR_GFX_RLC_CGPG_EN_MASK;
1730                 if(default_data != data)
1731                         WREG32(SOC15_REG_OFFSET(PWR, 0, mmPWR_MISC_CNTL_STATUS), data);
1732
1733                 /* update status */
1734                 data &= ~PWR_MISC_CNTL_STATUS__PWR_GFXOFF_STATUS_MASK;
1735                 data |= (2 << PWR_MISC_CNTL_STATUS__PWR_GFXOFF_STATUS__SHIFT);
1736                 if(default_data != data)
1737                         WREG32(SOC15_REG_OFFSET(PWR, 0, mmPWR_MISC_CNTL_STATUS), data);
1738         } else {
1739                 /* restore GFXIP control over GCPG */
1740                 data &= ~PWR_MISC_CNTL_STATUS__PWR_GFX_RLC_CGPG_EN_MASK;
1741                 if(default_data != data)
1742                         WREG32(SOC15_REG_OFFSET(PWR, 0, mmPWR_MISC_CNTL_STATUS), data);
1743         }
1744 }
1745
1746 static void gfx_v9_0_init_gfx_power_gating(struct amdgpu_device *adev)
1747 {
1748         uint32_t data = 0;
1749
1750         if (adev->pg_flags & (AMD_PG_SUPPORT_GFX_PG |
1751                               AMD_PG_SUPPORT_GFX_SMG |
1752                               AMD_PG_SUPPORT_GFX_DMG)) {
1753                 /* init IDLE_POLL_COUNT = 60 */
1754                 data = RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_RB_WPTR_POLL_CNTL));
1755                 data &= ~CP_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT_MASK;
1756                 data |= (0x60 << CP_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT);
1757                 WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_RB_WPTR_POLL_CNTL), data);
1758
1759                 /* init RLC PG Delay */
1760                 data = 0;
1761                 data |= (0x10 << RLC_PG_DELAY__POWER_UP_DELAY__SHIFT);
1762                 data |= (0x10 << RLC_PG_DELAY__POWER_DOWN_DELAY__SHIFT);
1763                 data |= (0x10 << RLC_PG_DELAY__CMD_PROPAGATE_DELAY__SHIFT);
1764                 data |= (0x40 << RLC_PG_DELAY__MEM_SLEEP_DELAY__SHIFT);
1765                 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_DELAY), data);
1766
1767                 data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_DELAY_2));
1768                 data &= ~RLC_PG_DELAY_2__SERDES_CMD_DELAY_MASK;
1769                 data |= (0x4 << RLC_PG_DELAY_2__SERDES_CMD_DELAY__SHIFT);
1770                 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_DELAY_2), data);
1771
1772                 data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_DELAY_3));
1773                 data &= ~RLC_PG_DELAY_3__CGCG_ACTIVE_BEFORE_CGPG_MASK;
1774                 data |= (0xff << RLC_PG_DELAY_3__CGCG_ACTIVE_BEFORE_CGPG__SHIFT);
1775                 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_DELAY_3), data);
1776
1777                 data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_AUTO_PG_CTRL));
1778                 data &= ~RLC_AUTO_PG_CTRL__GRBM_REG_SAVE_GFX_IDLE_THRESHOLD_MASK;
1779
1780                 /* program GRBM_REG_SAVE_GFX_IDLE_THRESHOLD to 0x55f0 */
1781                 data |= (0x55f0 << RLC_AUTO_PG_CTRL__GRBM_REG_SAVE_GFX_IDLE_THRESHOLD__SHIFT);
1782                 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_AUTO_PG_CTRL), data);
1783
1784                 pwr_10_0_gfxip_control_over_cgpg(adev, true);
1785         }
1786 }
1787
1788 static void gfx_v9_0_enable_sck_slow_down_on_power_up(struct amdgpu_device *adev,
1789                                                 bool enable)
1790 {
1791         uint32_t data = 0;
1792         uint32_t default_data = 0;
1793
1794         default_data = data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL));
1795
1796         if (enable == true) {
1797                 data |= RLC_PG_CNTL__SMU_CLK_SLOWDOWN_ON_PU_ENABLE_MASK;
1798                 if (default_data != data)
1799                         WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL), data);
1800         } else {
1801                 data &= ~RLC_PG_CNTL__SMU_CLK_SLOWDOWN_ON_PU_ENABLE_MASK;
1802                 if(default_data != data)
1803                         WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL), data);
1804         }
1805 }
1806
1807 static void gfx_v9_0_enable_sck_slow_down_on_power_down(struct amdgpu_device *adev,
1808                                                 bool enable)
1809 {
1810         uint32_t data = 0;
1811         uint32_t default_data = 0;
1812
1813         default_data = data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL));
1814
1815         if (enable == true) {
1816                 data |= RLC_PG_CNTL__SMU_CLK_SLOWDOWN_ON_PD_ENABLE_MASK;
1817                 if(default_data != data)
1818                         WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL), data);
1819         } else {
1820                 data &= ~RLC_PG_CNTL__SMU_CLK_SLOWDOWN_ON_PD_ENABLE_MASK;
1821                 if(default_data != data)
1822                         WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL), data);
1823         }
1824 }
1825
1826 static void gfx_v9_0_enable_cp_power_gating(struct amdgpu_device *adev,
1827                                         bool enable)
1828 {
1829         uint32_t data = 0;
1830         uint32_t default_data = 0;
1831
1832         default_data = data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL));
1833
1834         if (enable == true) {
1835                 data &= ~RLC_PG_CNTL__CP_PG_DISABLE_MASK;
1836                 if(default_data != data)
1837                         WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL), data);
1838         } else {
1839                 data |= RLC_PG_CNTL__CP_PG_DISABLE_MASK;
1840                 if(default_data != data)
1841                         WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL), data);
1842         }
1843 }
1844
1845 static void gfx_v9_0_enable_gfx_cg_power_gating(struct amdgpu_device *adev,
1846                                                 bool enable)
1847 {
1848         uint32_t data, default_data;
1849
1850         default_data = data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL));
1851         if (enable == true)
1852                 data |= RLC_PG_CNTL__GFX_POWER_GATING_ENABLE_MASK;
1853         else
1854                 data &= ~RLC_PG_CNTL__GFX_POWER_GATING_ENABLE_MASK;
1855         if(default_data != data)
1856                 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL), data);
1857 }
1858
1859 static void gfx_v9_0_enable_gfx_pipeline_powergating(struct amdgpu_device *adev,
1860                                                 bool enable)
1861 {
1862         uint32_t data, default_data;
1863
1864         default_data = data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL));
1865         if (enable == true)
1866                 data |= RLC_PG_CNTL__GFX_PIPELINE_PG_ENABLE_MASK;
1867         else
1868                 data &= ~RLC_PG_CNTL__GFX_PIPELINE_PG_ENABLE_MASK;
1869         if(default_data != data)
1870                 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL), data);
1871
1872         if (!enable)
1873                 /* read any GFX register to wake up GFX */
1874                 data = RREG32(SOC15_REG_OFFSET(GC, 0, mmDB_RENDER_CONTROL));
1875 }
1876
1877 static void gfx_v9_0_enable_gfx_static_mg_power_gating(struct amdgpu_device *adev,
1878                                                        bool enable)
1879 {
1880         uint32_t data, default_data;
1881
1882         default_data = data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL));
1883         if (enable == true)
1884                 data |= RLC_PG_CNTL__STATIC_PER_CU_PG_ENABLE_MASK;
1885         else
1886                 data &= ~RLC_PG_CNTL__STATIC_PER_CU_PG_ENABLE_MASK;
1887         if(default_data != data)
1888                 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL), data);
1889 }
1890
1891 static void gfx_v9_0_enable_gfx_dynamic_mg_power_gating(struct amdgpu_device *adev,
1892                                                 bool enable)
1893 {
1894         uint32_t data, default_data;
1895
1896         default_data = data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL));
1897         if (enable == true)
1898                 data |= RLC_PG_CNTL__DYN_PER_CU_PG_ENABLE_MASK;
1899         else
1900                 data &= ~RLC_PG_CNTL__DYN_PER_CU_PG_ENABLE_MASK;
1901         if(default_data != data)
1902                 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL), data);
1903 }
1904
1905 static void gfx_v9_0_init_pg(struct amdgpu_device *adev)
1906 {
1907         if (adev->pg_flags & (AMD_PG_SUPPORT_GFX_PG |
1908                               AMD_PG_SUPPORT_GFX_SMG |
1909                               AMD_PG_SUPPORT_GFX_DMG |
1910                               AMD_PG_SUPPORT_CP |
1911                               AMD_PG_SUPPORT_GDS |
1912                               AMD_PG_SUPPORT_RLC_SMU_HS)) {
1913                 gfx_v9_0_init_csb(adev);
1914                 gfx_v9_0_init_rlc_save_restore_list(adev);
1915                 gfx_v9_0_enable_save_restore_machine(adev);
1916
1917                 if (adev->asic_type == CHIP_RAVEN) {
1918                         WREG32(mmRLC_JUMP_TABLE_RESTORE,
1919                                 adev->gfx.rlc.cp_table_gpu_addr >> 8);
1920                         gfx_v9_0_init_gfx_power_gating(adev);
1921
1922                         if (adev->pg_flags & AMD_PG_SUPPORT_RLC_SMU_HS) {
1923                                 gfx_v9_0_enable_sck_slow_down_on_power_up(adev, true);
1924                                 gfx_v9_0_enable_sck_slow_down_on_power_down(adev, true);
1925                         } else {
1926                                 gfx_v9_0_enable_sck_slow_down_on_power_up(adev, false);
1927                                 gfx_v9_0_enable_sck_slow_down_on_power_down(adev, false);
1928                         }
1929
1930                         if (adev->pg_flags & AMD_PG_SUPPORT_CP)
1931                                 gfx_v9_0_enable_cp_power_gating(adev, true);
1932                         else
1933                                 gfx_v9_0_enable_cp_power_gating(adev, false);
1934                 }
1935         }
1936 }
1937
1938 void gfx_v9_0_rlc_stop(struct amdgpu_device *adev)
1939 {
1940         u32 tmp = RREG32_SOC15(GC, 0, mmRLC_CNTL);
1941
1942         tmp = REG_SET_FIELD(tmp, RLC_CNTL, RLC_ENABLE_F32, 0);
1943         WREG32_SOC15(GC, 0, mmRLC_CNTL, tmp);
1944
1945         gfx_v9_0_enable_gui_idle_interrupt(adev, false);
1946
1947         gfx_v9_0_wait_for_rlc_serdes(adev);
1948 }
1949
1950 static void gfx_v9_0_rlc_reset(struct amdgpu_device *adev)
1951 {
1952         WREG32_FIELD15(GC, 0, GRBM_SOFT_RESET, SOFT_RESET_RLC, 1);
1953         udelay(50);
1954         WREG32_FIELD15(GC, 0, GRBM_SOFT_RESET, SOFT_RESET_RLC, 0);
1955         udelay(50);
1956 }
1957
1958 static void gfx_v9_0_rlc_start(struct amdgpu_device *adev)
1959 {
1960 #ifdef AMDGPU_RLC_DEBUG_RETRY
1961         u32 rlc_ucode_ver;
1962 #endif
1963
1964         WREG32_FIELD15(GC, 0, RLC_CNTL, RLC_ENABLE_F32, 1);
1965         udelay(50);
1966
1967         /* carrizo do enable cp interrupt after cp inited */
1968         if (!(adev->flags & AMD_IS_APU)) {
1969                 gfx_v9_0_enable_gui_idle_interrupt(adev, true);
1970                 udelay(50);
1971         }
1972
1973 #ifdef AMDGPU_RLC_DEBUG_RETRY
1974         /* RLC_GPM_GENERAL_6 : RLC Ucode version */
1975         rlc_ucode_ver = RREG32_SOC15(GC, 0, mmRLC_GPM_GENERAL_6);
1976         if(rlc_ucode_ver == 0x108) {
1977                 DRM_INFO("Using rlc debug ucode. mmRLC_GPM_GENERAL_6 ==0x08%x / fw_ver == %i \n",
1978                                 rlc_ucode_ver, adev->gfx.rlc_fw_version);
1979                 /* RLC_GPM_TIMER_INT_3 : Timer interval in RefCLK cycles,
1980                  * default is 0x9C4 to create a 100us interval */
1981                 WREG32_SOC15(GC, 0, mmRLC_GPM_TIMER_INT_3, 0x9C4);
1982                 /* RLC_GPM_GENERAL_12 : Minimum gap between wptr and rptr
1983                  * to disable the page fault retry interrupts, default is
1984                  * 0x100 (256) */
1985                 WREG32_SOC15(GC, 0, mmRLC_GPM_GENERAL_12, 0x100);
1986         }
1987 #endif
1988 }
1989
1990 static int gfx_v9_0_rlc_load_microcode(struct amdgpu_device *adev)
1991 {
1992         const struct rlc_firmware_header_v2_0 *hdr;
1993         const __le32 *fw_data;
1994         unsigned i, fw_size;
1995
1996         if (!adev->gfx.rlc_fw)
1997                 return -EINVAL;
1998
1999         hdr = (const struct rlc_firmware_header_v2_0 *)adev->gfx.rlc_fw->data;
2000         amdgpu_ucode_print_rlc_hdr(&hdr->header);
2001
2002         fw_data = (const __le32 *)(adev->gfx.rlc_fw->data +
2003                            le32_to_cpu(hdr->header.ucode_array_offset_bytes));
2004         fw_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4;
2005
2006         WREG32_SOC15(GC, 0, mmRLC_GPM_UCODE_ADDR,
2007                         RLCG_UCODE_LOADING_START_ADDRESS);
2008         for (i = 0; i < fw_size; i++)
2009                 WREG32_SOC15(GC, 0, mmRLC_GPM_UCODE_DATA, le32_to_cpup(fw_data++));
2010         WREG32_SOC15(GC, 0, mmRLC_GPM_UCODE_ADDR, adev->gfx.rlc_fw_version);
2011
2012         return 0;
2013 }
2014
2015 static int gfx_v9_0_rlc_resume(struct amdgpu_device *adev)
2016 {
2017         int r;
2018
2019         if (amdgpu_sriov_vf(adev))
2020                 return 0;
2021
2022         gfx_v9_0_rlc_stop(adev);
2023
2024         /* disable CG */
2025         WREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL, 0);
2026
2027         /* disable PG */
2028         WREG32_SOC15(GC, 0, mmRLC_PG_CNTL, 0);
2029
2030         gfx_v9_0_rlc_reset(adev);
2031
2032         gfx_v9_0_init_pg(adev);
2033
2034         if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
2035                 /* legacy rlc firmware loading */
2036                 r = gfx_v9_0_rlc_load_microcode(adev);
2037                 if (r)
2038                         return r;
2039         }
2040
2041         if (adev->asic_type == CHIP_RAVEN) {
2042                 if (amdgpu_lbpw != 0)
2043                         gfx_v9_0_enable_lbpw(adev, true);
2044                 else
2045                         gfx_v9_0_enable_lbpw(adev, false);
2046         }
2047
2048         gfx_v9_0_rlc_start(adev);
2049
2050         return 0;
2051 }
2052
2053 static void gfx_v9_0_cp_gfx_enable(struct amdgpu_device *adev, bool enable)
2054 {
2055         int i;
2056         u32 tmp = RREG32_SOC15(GC, 0, mmCP_ME_CNTL);
2057
2058         tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, ME_HALT, enable ? 0 : 1);
2059         tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, PFP_HALT, enable ? 0 : 1);
2060         tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, CE_HALT, enable ? 0 : 1);
2061         if (!enable) {
2062                 for (i = 0; i < adev->gfx.num_gfx_rings; i++)
2063                         adev->gfx.gfx_ring[i].ready = false;
2064         }
2065         WREG32_SOC15(GC, 0, mmCP_ME_CNTL, tmp);
2066         udelay(50);
2067 }
2068
2069 static int gfx_v9_0_cp_gfx_load_microcode(struct amdgpu_device *adev)
2070 {
2071         const struct gfx_firmware_header_v1_0 *pfp_hdr;
2072         const struct gfx_firmware_header_v1_0 *ce_hdr;
2073         const struct gfx_firmware_header_v1_0 *me_hdr;
2074         const __le32 *fw_data;
2075         unsigned i, fw_size;
2076
2077         if (!adev->gfx.me_fw || !adev->gfx.pfp_fw || !adev->gfx.ce_fw)
2078                 return -EINVAL;
2079
2080         pfp_hdr = (const struct gfx_firmware_header_v1_0 *)
2081                 adev->gfx.pfp_fw->data;
2082         ce_hdr = (const struct gfx_firmware_header_v1_0 *)
2083                 adev->gfx.ce_fw->data;
2084         me_hdr = (const struct gfx_firmware_header_v1_0 *)
2085                 adev->gfx.me_fw->data;
2086
2087         amdgpu_ucode_print_gfx_hdr(&pfp_hdr->header);
2088         amdgpu_ucode_print_gfx_hdr(&ce_hdr->header);
2089         amdgpu_ucode_print_gfx_hdr(&me_hdr->header);
2090
2091         gfx_v9_0_cp_gfx_enable(adev, false);
2092
2093         /* PFP */
2094         fw_data = (const __le32 *)
2095                 (adev->gfx.pfp_fw->data +
2096                  le32_to_cpu(pfp_hdr->header.ucode_array_offset_bytes));
2097         fw_size = le32_to_cpu(pfp_hdr->header.ucode_size_bytes) / 4;
2098         WREG32_SOC15(GC, 0, mmCP_PFP_UCODE_ADDR, 0);
2099         for (i = 0; i < fw_size; i++)
2100                 WREG32_SOC15(GC, 0, mmCP_PFP_UCODE_DATA, le32_to_cpup(fw_data++));
2101         WREG32_SOC15(GC, 0, mmCP_PFP_UCODE_ADDR, adev->gfx.pfp_fw_version);
2102
2103         /* CE */
2104         fw_data = (const __le32 *)
2105                 (adev->gfx.ce_fw->data +
2106                  le32_to_cpu(ce_hdr->header.ucode_array_offset_bytes));
2107         fw_size = le32_to_cpu(ce_hdr->header.ucode_size_bytes) / 4;
2108         WREG32_SOC15(GC, 0, mmCP_CE_UCODE_ADDR, 0);
2109         for (i = 0; i < fw_size; i++)
2110                 WREG32_SOC15(GC, 0, mmCP_CE_UCODE_DATA, le32_to_cpup(fw_data++));
2111         WREG32_SOC15(GC, 0, mmCP_CE_UCODE_ADDR, adev->gfx.ce_fw_version);
2112
2113         /* ME */
2114         fw_data = (const __le32 *)
2115                 (adev->gfx.me_fw->data +
2116                  le32_to_cpu(me_hdr->header.ucode_array_offset_bytes));
2117         fw_size = le32_to_cpu(me_hdr->header.ucode_size_bytes) / 4;
2118         WREG32_SOC15(GC, 0, mmCP_ME_RAM_WADDR, 0);
2119         for (i = 0; i < fw_size; i++)
2120                 WREG32_SOC15(GC, 0, mmCP_ME_RAM_DATA, le32_to_cpup(fw_data++));
2121         WREG32_SOC15(GC, 0, mmCP_ME_RAM_WADDR, adev->gfx.me_fw_version);
2122
2123         return 0;
2124 }
2125
2126 static int gfx_v9_0_cp_gfx_start(struct amdgpu_device *adev)
2127 {
2128         struct amdgpu_ring *ring = &adev->gfx.gfx_ring[0];
2129         const struct cs_section_def *sect = NULL;
2130         const struct cs_extent_def *ext = NULL;
2131         int r, i, tmp;
2132
2133         /* init the CP */
2134         WREG32_SOC15(GC, 0, mmCP_MAX_CONTEXT, adev->gfx.config.max_hw_contexts - 1);
2135         WREG32_SOC15(GC, 0, mmCP_DEVICE_ID, 1);
2136
2137         gfx_v9_0_cp_gfx_enable(adev, true);
2138
2139         r = amdgpu_ring_alloc(ring, gfx_v9_0_get_csb_size(adev) + 4 + 3);
2140         if (r) {
2141                 DRM_ERROR("amdgpu: cp failed to lock ring (%d).\n", r);
2142                 return r;
2143         }
2144
2145         amdgpu_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
2146         amdgpu_ring_write(ring, PACKET3_PREAMBLE_BEGIN_CLEAR_STATE);
2147
2148         amdgpu_ring_write(ring, PACKET3(PACKET3_CONTEXT_CONTROL, 1));
2149         amdgpu_ring_write(ring, 0x80000000);
2150         amdgpu_ring_write(ring, 0x80000000);
2151
2152         for (sect = gfx9_cs_data; sect->section != NULL; ++sect) {
2153                 for (ext = sect->section; ext->extent != NULL; ++ext) {
2154                         if (sect->id == SECT_CONTEXT) {
2155                                 amdgpu_ring_write(ring,
2156                                        PACKET3(PACKET3_SET_CONTEXT_REG,
2157                                                ext->reg_count));
2158                                 amdgpu_ring_write(ring,
2159                                        ext->reg_index - PACKET3_SET_CONTEXT_REG_START);
2160                                 for (i = 0; i < ext->reg_count; i++)
2161                                         amdgpu_ring_write(ring, ext->extent[i]);
2162                         }
2163                 }
2164         }
2165
2166         amdgpu_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
2167         amdgpu_ring_write(ring, PACKET3_PREAMBLE_END_CLEAR_STATE);
2168
2169         amdgpu_ring_write(ring, PACKET3(PACKET3_CLEAR_STATE, 0));
2170         amdgpu_ring_write(ring, 0);
2171
2172         amdgpu_ring_write(ring, PACKET3(PACKET3_SET_BASE, 2));
2173         amdgpu_ring_write(ring, PACKET3_BASE_INDEX(CE_PARTITION_BASE));
2174         amdgpu_ring_write(ring, 0x8000);
2175         amdgpu_ring_write(ring, 0x8000);
2176
2177         amdgpu_ring_write(ring, PACKET3(PACKET3_SET_UCONFIG_REG,1));
2178         tmp = (PACKET3_SET_UCONFIG_REG_INDEX_TYPE |
2179                 (SOC15_REG_OFFSET(GC, 0, mmVGT_INDEX_TYPE) - PACKET3_SET_UCONFIG_REG_START));
2180         amdgpu_ring_write(ring, tmp);
2181         amdgpu_ring_write(ring, 0);
2182
2183         amdgpu_ring_commit(ring);
2184
2185         return 0;
2186 }
2187
2188 static int gfx_v9_0_cp_gfx_resume(struct amdgpu_device *adev)
2189 {
2190         struct amdgpu_ring *ring;
2191         u32 tmp;
2192         u32 rb_bufsz;
2193         u64 rb_addr, rptr_addr, wptr_gpu_addr;
2194
2195         /* Set the write pointer delay */
2196         WREG32_SOC15(GC, 0, mmCP_RB_WPTR_DELAY, 0);
2197
2198         /* set the RB to use vmid 0 */
2199         WREG32_SOC15(GC, 0, mmCP_RB_VMID, 0);
2200
2201         /* Set ring buffer size */
2202         ring = &adev->gfx.gfx_ring[0];
2203         rb_bufsz = order_base_2(ring->ring_size / 8);
2204         tmp = REG_SET_FIELD(0, CP_RB0_CNTL, RB_BUFSZ, rb_bufsz);
2205         tmp = REG_SET_FIELD(tmp, CP_RB0_CNTL, RB_BLKSZ, rb_bufsz - 2);
2206 #ifdef __BIG_ENDIAN
2207         tmp = REG_SET_FIELD(tmp, CP_RB0_CNTL, BUF_SWAP, 1);
2208 #endif
2209         WREG32_SOC15(GC, 0, mmCP_RB0_CNTL, tmp);
2210
2211         /* Initialize the ring buffer's write pointers */
2212         ring->wptr = 0;
2213         WREG32_SOC15(GC, 0, mmCP_RB0_WPTR, lower_32_bits(ring->wptr));
2214         WREG32_SOC15(GC, 0, mmCP_RB0_WPTR_HI, upper_32_bits(ring->wptr));
2215
2216         /* set the wb address wether it's enabled or not */
2217         rptr_addr = adev->wb.gpu_addr + (ring->rptr_offs * 4);
2218         WREG32_SOC15(GC, 0, mmCP_RB0_RPTR_ADDR, lower_32_bits(rptr_addr));
2219         WREG32_SOC15(GC, 0, mmCP_RB0_RPTR_ADDR_HI, upper_32_bits(rptr_addr) & CP_RB_RPTR_ADDR_HI__RB_RPTR_ADDR_HI_MASK);
2220
2221         wptr_gpu_addr = adev->wb.gpu_addr + (ring->wptr_offs * 4);
2222         WREG32_SOC15(GC, 0, mmCP_RB_WPTR_POLL_ADDR_LO, lower_32_bits(wptr_gpu_addr));
2223         WREG32_SOC15(GC, 0, mmCP_RB_WPTR_POLL_ADDR_HI, upper_32_bits(wptr_gpu_addr));
2224
2225         mdelay(1);
2226         WREG32_SOC15(GC, 0, mmCP_RB0_CNTL, tmp);
2227
2228         rb_addr = ring->gpu_addr >> 8;
2229         WREG32_SOC15(GC, 0, mmCP_RB0_BASE, rb_addr);
2230         WREG32_SOC15(GC, 0, mmCP_RB0_BASE_HI, upper_32_bits(rb_addr));
2231
2232         tmp = RREG32_SOC15(GC, 0, mmCP_RB_DOORBELL_CONTROL);
2233         if (ring->use_doorbell) {
2234                 tmp = REG_SET_FIELD(tmp, CP_RB_DOORBELL_CONTROL,
2235                                     DOORBELL_OFFSET, ring->doorbell_index);
2236                 tmp = REG_SET_FIELD(tmp, CP_RB_DOORBELL_CONTROL,
2237                                     DOORBELL_EN, 1);
2238         } else {
2239                 tmp = REG_SET_FIELD(tmp, CP_RB_DOORBELL_CONTROL, DOORBELL_EN, 0);
2240         }
2241         WREG32_SOC15(GC, 0, mmCP_RB_DOORBELL_CONTROL, tmp);
2242
2243         tmp = REG_SET_FIELD(0, CP_RB_DOORBELL_RANGE_LOWER,
2244                         DOORBELL_RANGE_LOWER, ring->doorbell_index);
2245         WREG32_SOC15(GC, 0, mmCP_RB_DOORBELL_RANGE_LOWER, tmp);
2246
2247         WREG32_SOC15(GC, 0, mmCP_RB_DOORBELL_RANGE_UPPER,
2248                        CP_RB_DOORBELL_RANGE_UPPER__DOORBELL_RANGE_UPPER_MASK);
2249
2250
2251         /* start the ring */
2252         gfx_v9_0_cp_gfx_start(adev);
2253         ring->ready = true;
2254
2255         return 0;
2256 }
2257
2258 static void gfx_v9_0_cp_compute_enable(struct amdgpu_device *adev, bool enable)
2259 {
2260         int i;
2261
2262         if (enable) {
2263                 WREG32_SOC15(GC, 0, mmCP_MEC_CNTL, 0);
2264         } else {
2265                 WREG32_SOC15(GC, 0, mmCP_MEC_CNTL,
2266                         (CP_MEC_CNTL__MEC_ME1_HALT_MASK | CP_MEC_CNTL__MEC_ME2_HALT_MASK));
2267                 for (i = 0; i < adev->gfx.num_compute_rings; i++)
2268                         adev->gfx.compute_ring[i].ready = false;
2269                 adev->gfx.kiq.ring.ready = false;
2270         }
2271         udelay(50);
2272 }
2273
2274 static int gfx_v9_0_cp_compute_load_microcode(struct amdgpu_device *adev)
2275 {
2276         const struct gfx_firmware_header_v1_0 *mec_hdr;
2277         const __le32 *fw_data;
2278         unsigned i;
2279         u32 tmp;
2280
2281         if (!adev->gfx.mec_fw)
2282                 return -EINVAL;
2283
2284         gfx_v9_0_cp_compute_enable(adev, false);
2285
2286         mec_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data;
2287         amdgpu_ucode_print_gfx_hdr(&mec_hdr->header);
2288
2289         fw_data = (const __le32 *)
2290                 (adev->gfx.mec_fw->data +
2291                  le32_to_cpu(mec_hdr->header.ucode_array_offset_bytes));
2292         tmp = 0;
2293         tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, VMID, 0);
2294         tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, CACHE_POLICY, 0);
2295         WREG32_SOC15(GC, 0, mmCP_CPC_IC_BASE_CNTL, tmp);
2296
2297         WREG32_SOC15(GC, 0, mmCP_CPC_IC_BASE_LO,
2298                 adev->gfx.mec.mec_fw_gpu_addr & 0xFFFFF000);
2299         WREG32_SOC15(GC, 0, mmCP_CPC_IC_BASE_HI,
2300                 upper_32_bits(adev->gfx.mec.mec_fw_gpu_addr));
2301
2302         /* MEC1 */
2303         WREG32_SOC15(GC, 0, mmCP_MEC_ME1_UCODE_ADDR,
2304                          mec_hdr->jt_offset);
2305         for (i = 0; i < mec_hdr->jt_size; i++)
2306                 WREG32_SOC15(GC, 0, mmCP_MEC_ME1_UCODE_DATA,
2307                         le32_to_cpup(fw_data + mec_hdr->jt_offset + i));
2308
2309         WREG32_SOC15(GC, 0, mmCP_MEC_ME1_UCODE_ADDR,
2310                         adev->gfx.mec_fw_version);
2311         /* Todo : Loading MEC2 firmware is only necessary if MEC2 should run different microcode than MEC1. */
2312
2313         return 0;
2314 }
2315
2316 /* KIQ functions */
2317 static void gfx_v9_0_kiq_setting(struct amdgpu_ring *ring)
2318 {
2319         uint32_t tmp;
2320         struct amdgpu_device *adev = ring->adev;
2321
2322         /* tell RLC which is KIQ queue */
2323         tmp = RREG32_SOC15(GC, 0, mmRLC_CP_SCHEDULERS);
2324         tmp &= 0xffffff00;
2325         tmp |= (ring->me << 5) | (ring->pipe << 3) | (ring->queue);
2326         WREG32_SOC15(GC, 0, mmRLC_CP_SCHEDULERS, tmp);
2327         tmp |= 0x80;
2328         WREG32_SOC15(GC, 0, mmRLC_CP_SCHEDULERS, tmp);
2329 }
2330
2331 static int gfx_v9_0_kiq_kcq_enable(struct amdgpu_device *adev)
2332 {
2333         struct amdgpu_ring *kiq_ring = &adev->gfx.kiq.ring;
2334         uint32_t scratch, tmp = 0;
2335         uint64_t queue_mask = 0;
2336         int r, i;
2337
2338         for (i = 0; i < AMDGPU_MAX_COMPUTE_QUEUES; ++i) {
2339                 if (!test_bit(i, adev->gfx.mec.queue_bitmap))
2340                         continue;
2341
2342                 /* This situation may be hit in the future if a new HW
2343                  * generation exposes more than 64 queues. If so, the
2344                  * definition of queue_mask needs updating */
2345                 if (WARN_ON(i >= (sizeof(queue_mask)*8))) {
2346                         DRM_ERROR("Invalid KCQ enabled: %d\n", i);
2347                         break;
2348                 }
2349
2350                 queue_mask |= (1ull << i);
2351         }
2352
2353         r = amdgpu_gfx_scratch_get(adev, &scratch);
2354         if (r) {
2355                 DRM_ERROR("Failed to get scratch reg (%d).\n", r);
2356                 return r;
2357         }
2358         WREG32(scratch, 0xCAFEDEAD);
2359
2360         r = amdgpu_ring_alloc(kiq_ring, (7 * adev->gfx.num_compute_rings) + 11);
2361         if (r) {
2362                 DRM_ERROR("Failed to lock KIQ (%d).\n", r);
2363                 amdgpu_gfx_scratch_free(adev, scratch);
2364                 return r;
2365         }
2366
2367         /* set resources */
2368         amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_SET_RESOURCES, 6));
2369         amdgpu_ring_write(kiq_ring, PACKET3_SET_RESOURCES_VMID_MASK(0) |
2370                           PACKET3_SET_RESOURCES_QUEUE_TYPE(0)); /* vmid_mask:0 queue_type:0 (KIQ) */
2371         amdgpu_ring_write(kiq_ring, lower_32_bits(queue_mask)); /* queue mask lo */
2372         amdgpu_ring_write(kiq_ring, upper_32_bits(queue_mask)); /* queue mask hi */
2373         amdgpu_ring_write(kiq_ring, 0); /* gws mask lo */
2374         amdgpu_ring_write(kiq_ring, 0); /* gws mask hi */
2375         amdgpu_ring_write(kiq_ring, 0); /* oac mask */
2376         amdgpu_ring_write(kiq_ring, 0); /* gds heap base:0, gds heap size:0 */
2377         for (i = 0; i < adev->gfx.num_compute_rings; i++) {
2378                 struct amdgpu_ring *ring = &adev->gfx.compute_ring[i];
2379                 uint64_t mqd_addr = amdgpu_bo_gpu_offset(ring->mqd_obj);
2380                 uint64_t wptr_addr = adev->wb.gpu_addr + (ring->wptr_offs * 4);
2381
2382                 amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_MAP_QUEUES, 5));
2383                 /* Q_sel:0, vmid:0, vidmem: 1, engine:0, num_Q:1*/
2384                 amdgpu_ring_write(kiq_ring, /* Q_sel: 0, vmid: 0, engine: 0, num_Q: 1 */
2385                                   PACKET3_MAP_QUEUES_QUEUE_SEL(0) | /* Queue_Sel */
2386                                   PACKET3_MAP_QUEUES_VMID(0) | /* VMID */
2387                                   PACKET3_MAP_QUEUES_QUEUE(ring->queue) |
2388                                   PACKET3_MAP_QUEUES_PIPE(ring->pipe) |
2389                                   PACKET3_MAP_QUEUES_ME((ring->me == 1 ? 0 : 1)) |
2390                                   PACKET3_MAP_QUEUES_QUEUE_TYPE(0) | /*queue_type: normal compute queue */
2391                                   PACKET3_MAP_QUEUES_ALLOC_FORMAT(1) | /* alloc format: all_on_one_pipe */
2392                                   PACKET3_MAP_QUEUES_ENGINE_SEL(0) | /* engine_sel: compute */
2393                                   PACKET3_MAP_QUEUES_NUM_QUEUES(1)); /* num_queues: must be 1 */
2394                 amdgpu_ring_write(kiq_ring, PACKET3_MAP_QUEUES_DOORBELL_OFFSET(ring->doorbell_index));
2395                 amdgpu_ring_write(kiq_ring, lower_32_bits(mqd_addr));
2396                 amdgpu_ring_write(kiq_ring, upper_32_bits(mqd_addr));
2397                 amdgpu_ring_write(kiq_ring, lower_32_bits(wptr_addr));
2398                 amdgpu_ring_write(kiq_ring, upper_32_bits(wptr_addr));
2399         }
2400         /* write to scratch for completion */
2401         amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_SET_UCONFIG_REG, 1));
2402         amdgpu_ring_write(kiq_ring, (scratch - PACKET3_SET_UCONFIG_REG_START));
2403         amdgpu_ring_write(kiq_ring, 0xDEADBEEF);
2404         amdgpu_ring_commit(kiq_ring);
2405
2406         for (i = 0; i < adev->usec_timeout; i++) {
2407                 tmp = RREG32(scratch);
2408                 if (tmp == 0xDEADBEEF)
2409                         break;
2410                 DRM_UDELAY(1);
2411         }
2412         if (i >= adev->usec_timeout) {
2413                 DRM_ERROR("KCQ enable failed (scratch(0x%04X)=0x%08X)\n",
2414                           scratch, tmp);
2415                 r = -EINVAL;
2416         }
2417         amdgpu_gfx_scratch_free(adev, scratch);
2418
2419         return r;
2420 }
2421
2422 static int gfx_v9_0_mqd_init(struct amdgpu_ring *ring)
2423 {
2424         struct amdgpu_device *adev = ring->adev;
2425         struct v9_mqd *mqd = ring->mqd_ptr;
2426         uint64_t hqd_gpu_addr, wb_gpu_addr, eop_base_addr;
2427         uint32_t tmp;
2428
2429         mqd->header = 0xC0310800;
2430         mqd->compute_pipelinestat_enable = 0x00000001;
2431         mqd->compute_static_thread_mgmt_se0 = 0xffffffff;
2432         mqd->compute_static_thread_mgmt_se1 = 0xffffffff;
2433         mqd->compute_static_thread_mgmt_se2 = 0xffffffff;
2434         mqd->compute_static_thread_mgmt_se3 = 0xffffffff;
2435         mqd->compute_misc_reserved = 0x00000003;
2436
2437         eop_base_addr = ring->eop_gpu_addr >> 8;
2438         mqd->cp_hqd_eop_base_addr_lo = eop_base_addr;
2439         mqd->cp_hqd_eop_base_addr_hi = upper_32_bits(eop_base_addr);
2440
2441         /* set the EOP size, register value is 2^(EOP_SIZE+1) dwords */
2442         tmp = RREG32_SOC15(GC, 0, mmCP_HQD_EOP_CONTROL);
2443         tmp = REG_SET_FIELD(tmp, CP_HQD_EOP_CONTROL, EOP_SIZE,
2444                         (order_base_2(GFX9_MEC_HPD_SIZE / 4) - 1));
2445
2446         mqd->cp_hqd_eop_control = tmp;
2447
2448         /* enable doorbell? */
2449         tmp = RREG32_SOC15(GC, 0, mmCP_HQD_PQ_DOORBELL_CONTROL);
2450
2451         if (ring->use_doorbell) {
2452                 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
2453                                     DOORBELL_OFFSET, ring->doorbell_index);
2454                 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
2455                                     DOORBELL_EN, 1);
2456                 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
2457                                     DOORBELL_SOURCE, 0);
2458                 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
2459                                     DOORBELL_HIT, 0);
2460         }
2461         else
2462                 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
2463                                          DOORBELL_EN, 0);
2464
2465         mqd->cp_hqd_pq_doorbell_control = tmp;
2466
2467         /* disable the queue if it's active */
2468         ring->wptr = 0;
2469         mqd->cp_hqd_dequeue_request = 0;
2470         mqd->cp_hqd_pq_rptr = 0;
2471         mqd->cp_hqd_pq_wptr_lo = 0;
2472         mqd->cp_hqd_pq_wptr_hi = 0;
2473
2474         /* set the pointer to the MQD */
2475         mqd->cp_mqd_base_addr_lo = ring->mqd_gpu_addr & 0xfffffffc;
2476         mqd->cp_mqd_base_addr_hi = upper_32_bits(ring->mqd_gpu_addr);
2477
2478         /* set MQD vmid to 0 */
2479         tmp = RREG32_SOC15(GC, 0, mmCP_MQD_CONTROL);
2480         tmp = REG_SET_FIELD(tmp, CP_MQD_CONTROL, VMID, 0);
2481         mqd->cp_mqd_control = tmp;
2482
2483         /* set the pointer to the HQD, this is similar CP_RB0_BASE/_HI */
2484         hqd_gpu_addr = ring->gpu_addr >> 8;
2485         mqd->cp_hqd_pq_base_lo = hqd_gpu_addr;
2486         mqd->cp_hqd_pq_base_hi = upper_32_bits(hqd_gpu_addr);
2487
2488         /* set up the HQD, this is similar to CP_RB0_CNTL */
2489         tmp = RREG32_SOC15(GC, 0, mmCP_HQD_PQ_CONTROL);
2490         tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, QUEUE_SIZE,
2491                             (order_base_2(ring->ring_size / 4) - 1));
2492         tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, RPTR_BLOCK_SIZE,
2493                         ((order_base_2(AMDGPU_GPU_PAGE_SIZE / 4) - 1) << 8));
2494 #ifdef __BIG_ENDIAN
2495         tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, ENDIAN_SWAP, 1);
2496 #endif
2497         tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, UNORD_DISPATCH, 0);
2498         tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, ROQ_PQ_IB_FLIP, 0);
2499         tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, PRIV_STATE, 1);
2500         tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, KMD_QUEUE, 1);
2501         mqd->cp_hqd_pq_control = tmp;
2502
2503         /* set the wb address whether it's enabled or not */
2504         wb_gpu_addr = adev->wb.gpu_addr + (ring->rptr_offs * 4);
2505         mqd->cp_hqd_pq_rptr_report_addr_lo = wb_gpu_addr & 0xfffffffc;
2506         mqd->cp_hqd_pq_rptr_report_addr_hi =
2507                 upper_32_bits(wb_gpu_addr) & 0xffff;
2508
2509         /* only used if CP_PQ_WPTR_POLL_CNTL.CP_PQ_WPTR_POLL_CNTL__EN_MASK=1 */
2510         wb_gpu_addr = adev->wb.gpu_addr + (ring->wptr_offs * 4);
2511         mqd->cp_hqd_pq_wptr_poll_addr_lo = wb_gpu_addr & 0xfffffffc;
2512         mqd->cp_hqd_pq_wptr_poll_addr_hi = upper_32_bits(wb_gpu_addr) & 0xffff;
2513
2514         tmp = 0;
2515         /* enable the doorbell if requested */
2516         if (ring->use_doorbell) {
2517                 tmp = RREG32_SOC15(GC, 0, mmCP_HQD_PQ_DOORBELL_CONTROL);
2518                 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
2519                                 DOORBELL_OFFSET, ring->doorbell_index);
2520
2521                 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
2522                                          DOORBELL_EN, 1);
2523                 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
2524                                          DOORBELL_SOURCE, 0);
2525                 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
2526                                          DOORBELL_HIT, 0);
2527         }
2528
2529         mqd->cp_hqd_pq_doorbell_control = tmp;
2530
2531         /* reset read and write pointers, similar to CP_RB0_WPTR/_RPTR */
2532         ring->wptr = 0;
2533         mqd->cp_hqd_pq_rptr = RREG32_SOC15(GC, 0, mmCP_HQD_PQ_RPTR);
2534
2535         /* set the vmid for the queue */
2536         mqd->cp_hqd_vmid = 0;
2537
2538         tmp = RREG32_SOC15(GC, 0, mmCP_HQD_PERSISTENT_STATE);
2539         tmp = REG_SET_FIELD(tmp, CP_HQD_PERSISTENT_STATE, PRELOAD_SIZE, 0x53);
2540         mqd->cp_hqd_persistent_state = tmp;
2541
2542         /* set MIN_IB_AVAIL_SIZE */
2543         tmp = RREG32_SOC15(GC, 0, mmCP_HQD_IB_CONTROL);
2544         tmp = REG_SET_FIELD(tmp, CP_HQD_IB_CONTROL, MIN_IB_AVAIL_SIZE, 3);
2545         mqd->cp_hqd_ib_control = tmp;
2546
2547         /* activate the queue */
2548         mqd->cp_hqd_active = 1;
2549
2550         return 0;
2551 }
2552
2553 static int gfx_v9_0_kiq_init_register(struct amdgpu_ring *ring)
2554 {
2555         struct amdgpu_device *adev = ring->adev;
2556         struct v9_mqd *mqd = ring->mqd_ptr;
2557         int j;
2558
2559         /* disable wptr polling */
2560         WREG32_FIELD15(GC, 0, CP_PQ_WPTR_POLL_CNTL, EN, 0);
2561
2562         WREG32_SOC15(GC, 0, mmCP_HQD_EOP_BASE_ADDR,
2563                mqd->cp_hqd_eop_base_addr_lo);
2564         WREG32_SOC15(GC, 0, mmCP_HQD_EOP_BASE_ADDR_HI,
2565                mqd->cp_hqd_eop_base_addr_hi);
2566
2567         /* set the EOP size, register value is 2^(EOP_SIZE+1) dwords */
2568         WREG32_SOC15(GC, 0, mmCP_HQD_EOP_CONTROL,
2569                mqd->cp_hqd_eop_control);
2570
2571         /* enable doorbell? */
2572         WREG32_SOC15(GC, 0, mmCP_HQD_PQ_DOORBELL_CONTROL,
2573                mqd->cp_hqd_pq_doorbell_control);
2574
2575         /* disable the queue if it's active */
2576         if (RREG32_SOC15(GC, 0, mmCP_HQD_ACTIVE) & 1) {
2577                 WREG32_SOC15(GC, 0, mmCP_HQD_DEQUEUE_REQUEST, 1);
2578                 for (j = 0; j < adev->usec_timeout; j++) {
2579                         if (!(RREG32_SOC15(GC, 0, mmCP_HQD_ACTIVE) & 1))
2580                                 break;
2581                         udelay(1);
2582                 }
2583                 WREG32_SOC15(GC, 0, mmCP_HQD_DEQUEUE_REQUEST,
2584                        mqd->cp_hqd_dequeue_request);
2585                 WREG32_SOC15(GC, 0, mmCP_HQD_PQ_RPTR,
2586                        mqd->cp_hqd_pq_rptr);
2587                 WREG32_SOC15(GC, 0, mmCP_HQD_PQ_WPTR_LO,
2588                        mqd->cp_hqd_pq_wptr_lo);
2589                 WREG32_SOC15(GC, 0, mmCP_HQD_PQ_WPTR_HI,
2590                        mqd->cp_hqd_pq_wptr_hi);
2591         }
2592
2593         /* set the pointer to the MQD */
2594         WREG32_SOC15(GC, 0, mmCP_MQD_BASE_ADDR,
2595                mqd->cp_mqd_base_addr_lo);
2596         WREG32_SOC15(GC, 0, mmCP_MQD_BASE_ADDR_HI,
2597                mqd->cp_mqd_base_addr_hi);
2598
2599         /* set MQD vmid to 0 */
2600         WREG32_SOC15(GC, 0, mmCP_MQD_CONTROL,
2601                mqd->cp_mqd_control);
2602
2603         /* set the pointer to the HQD, this is similar CP_RB0_BASE/_HI */
2604         WREG32_SOC15(GC, 0, mmCP_HQD_PQ_BASE,
2605                mqd->cp_hqd_pq_base_lo);
2606         WREG32_SOC15(GC, 0, mmCP_HQD_PQ_BASE_HI,
2607                mqd->cp_hqd_pq_base_hi);
2608
2609         /* set up the HQD, this is similar to CP_RB0_CNTL */
2610         WREG32_SOC15(GC, 0, mmCP_HQD_PQ_CONTROL,
2611                mqd->cp_hqd_pq_control);
2612
2613         /* set the wb address whether it's enabled or not */
2614         WREG32_SOC15(GC, 0, mmCP_HQD_PQ_RPTR_REPORT_ADDR,
2615                                 mqd->cp_hqd_pq_rptr_report_addr_lo);
2616         WREG32_SOC15(GC, 0, mmCP_HQD_PQ_RPTR_REPORT_ADDR_HI,
2617                                 mqd->cp_hqd_pq_rptr_report_addr_hi);
2618
2619         /* only used if CP_PQ_WPTR_POLL_CNTL.CP_PQ_WPTR_POLL_CNTL__EN_MASK=1 */
2620         WREG32_SOC15(GC, 0, mmCP_HQD_PQ_WPTR_POLL_ADDR,
2621                mqd->cp_hqd_pq_wptr_poll_addr_lo);
2622         WREG32_SOC15(GC, 0, mmCP_HQD_PQ_WPTR_POLL_ADDR_HI,
2623                mqd->cp_hqd_pq_wptr_poll_addr_hi);
2624
2625         /* enable the doorbell if requested */
2626         if (ring->use_doorbell) {
2627                 WREG32_SOC15(GC, 0, mmCP_MEC_DOORBELL_RANGE_LOWER,
2628                                         (AMDGPU_DOORBELL64_KIQ *2) << 2);
2629                 WREG32_SOC15(GC, 0, mmCP_MEC_DOORBELL_RANGE_UPPER,
2630                                         (AMDGPU_DOORBELL64_USERQUEUE_END * 2) << 2);
2631         }
2632
2633         WREG32_SOC15(GC, 0, mmCP_HQD_PQ_DOORBELL_CONTROL,
2634                mqd->cp_hqd_pq_doorbell_control);
2635
2636         /* reset read and write pointers, similar to CP_RB0_WPTR/_RPTR */
2637         WREG32_SOC15(GC, 0, mmCP_HQD_PQ_WPTR_LO,
2638                mqd->cp_hqd_pq_wptr_lo);
2639         WREG32_SOC15(GC, 0, mmCP_HQD_PQ_WPTR_HI,
2640                mqd->cp_hqd_pq_wptr_hi);
2641
2642         /* set the vmid for the queue */
2643         WREG32_SOC15(GC, 0, mmCP_HQD_VMID, mqd->cp_hqd_vmid);
2644
2645         WREG32_SOC15(GC, 0, mmCP_HQD_PERSISTENT_STATE,
2646                mqd->cp_hqd_persistent_state);
2647
2648         /* activate the queue */
2649         WREG32_SOC15(GC, 0, mmCP_HQD_ACTIVE,
2650                mqd->cp_hqd_active);
2651
2652         if (ring->use_doorbell)
2653                 WREG32_FIELD15(GC, 0, CP_PQ_STATUS, DOORBELL_ENABLE, 1);
2654
2655         return 0;
2656 }
2657
2658 static int gfx_v9_0_kiq_init_queue(struct amdgpu_ring *ring)
2659 {
2660         struct amdgpu_device *adev = ring->adev;
2661         struct v9_mqd *mqd = ring->mqd_ptr;
2662         int mqd_idx = AMDGPU_MAX_COMPUTE_RINGS;
2663
2664         gfx_v9_0_kiq_setting(ring);
2665
2666         if (adev->gfx.in_reset) { /* for GPU_RESET case */
2667                 /* reset MQD to a clean status */
2668                 if (adev->gfx.mec.mqd_backup[mqd_idx])
2669                         memcpy(mqd, adev->gfx.mec.mqd_backup[mqd_idx], sizeof(*mqd));
2670
2671                 /* reset ring buffer */
2672                 ring->wptr = 0;
2673                 amdgpu_ring_clear_ring(ring);
2674
2675                 mutex_lock(&adev->srbm_mutex);
2676                 soc15_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
2677                 gfx_v9_0_kiq_init_register(ring);
2678                 soc15_grbm_select(adev, 0, 0, 0, 0);
2679                 mutex_unlock(&adev->srbm_mutex);
2680         } else {
2681                 memset((void *)mqd, 0, sizeof(*mqd));
2682                 mutex_lock(&adev->srbm_mutex);
2683                 soc15_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
2684                 gfx_v9_0_mqd_init(ring);
2685                 gfx_v9_0_kiq_init_register(ring);
2686                 soc15_grbm_select(adev, 0, 0, 0, 0);
2687                 mutex_unlock(&adev->srbm_mutex);
2688
2689                 if (adev->gfx.mec.mqd_backup[mqd_idx])
2690                         memcpy(adev->gfx.mec.mqd_backup[mqd_idx], mqd, sizeof(*mqd));
2691         }
2692
2693         return 0;
2694 }
2695
2696 static int gfx_v9_0_kcq_init_queue(struct amdgpu_ring *ring)
2697 {
2698         struct amdgpu_device *adev = ring->adev;
2699         struct v9_mqd *mqd = ring->mqd_ptr;
2700         int mqd_idx = ring - &adev->gfx.compute_ring[0];
2701
2702         if (!adev->gfx.in_reset && !adev->gfx.in_suspend) {
2703                 memset((void *)mqd, 0, sizeof(*mqd));
2704                 mutex_lock(&adev->srbm_mutex);
2705                 soc15_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
2706                 gfx_v9_0_mqd_init(ring);
2707                 soc15_grbm_select(adev, 0, 0, 0, 0);
2708                 mutex_unlock(&adev->srbm_mutex);
2709
2710                 if (adev->gfx.mec.mqd_backup[mqd_idx])
2711                         memcpy(adev->gfx.mec.mqd_backup[mqd_idx], mqd, sizeof(*mqd));
2712         } else if (adev->gfx.in_reset) { /* for GPU_RESET case */
2713                 /* reset MQD to a clean status */
2714                 if (adev->gfx.mec.mqd_backup[mqd_idx])
2715                         memcpy(mqd, adev->gfx.mec.mqd_backup[mqd_idx], sizeof(*mqd));
2716
2717                 /* reset ring buffer */
2718                 ring->wptr = 0;
2719                 amdgpu_ring_clear_ring(ring);
2720         } else {
2721                 amdgpu_ring_clear_ring(ring);
2722         }
2723
2724         return 0;
2725 }
2726
2727 static int gfx_v9_0_kiq_resume(struct amdgpu_device *adev)
2728 {
2729         struct amdgpu_ring *ring = NULL;
2730         int r = 0, i;
2731
2732         gfx_v9_0_cp_compute_enable(adev, true);
2733
2734         ring = &adev->gfx.kiq.ring;
2735
2736         r = amdgpu_bo_reserve(ring->mqd_obj, false);
2737         if (unlikely(r != 0))
2738                 goto done;
2739
2740         r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr);
2741         if (!r) {
2742                 r = gfx_v9_0_kiq_init_queue(ring);
2743                 amdgpu_bo_kunmap(ring->mqd_obj);
2744                 ring->mqd_ptr = NULL;
2745         }
2746         amdgpu_bo_unreserve(ring->mqd_obj);
2747         if (r)
2748                 goto done;
2749
2750         for (i = 0; i < adev->gfx.num_compute_rings; i++) {
2751                 ring = &adev->gfx.compute_ring[i];
2752
2753                 r = amdgpu_bo_reserve(ring->mqd_obj, false);
2754                 if (unlikely(r != 0))
2755                         goto done;
2756                 r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr);
2757                 if (!r) {
2758                         r = gfx_v9_0_kcq_init_queue(ring);
2759                         amdgpu_bo_kunmap(ring->mqd_obj);
2760                         ring->mqd_ptr = NULL;
2761                 }
2762                 amdgpu_bo_unreserve(ring->mqd_obj);
2763                 if (r)
2764                         goto done;
2765         }
2766
2767         r = gfx_v9_0_kiq_kcq_enable(adev);
2768 done:
2769         return r;
2770 }
2771
2772 static int gfx_v9_0_cp_resume(struct amdgpu_device *adev)
2773 {
2774         int r, i;
2775         struct amdgpu_ring *ring;
2776
2777         if (!(adev->flags & AMD_IS_APU))
2778                 gfx_v9_0_enable_gui_idle_interrupt(adev, false);
2779
2780         if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
2781                 /* legacy firmware loading */
2782                 r = gfx_v9_0_cp_gfx_load_microcode(adev);
2783                 if (r)
2784                         return r;
2785
2786                 r = gfx_v9_0_cp_compute_load_microcode(adev);
2787                 if (r)
2788                         return r;
2789         }
2790
2791         r = gfx_v9_0_cp_gfx_resume(adev);
2792         if (r)
2793                 return r;
2794
2795         r = gfx_v9_0_kiq_resume(adev);
2796         if (r)
2797                 return r;
2798
2799         ring = &adev->gfx.gfx_ring[0];
2800         r = amdgpu_ring_test_ring(ring);
2801         if (r) {
2802                 ring->ready = false;
2803                 return r;
2804         }
2805
2806         ring = &adev->gfx.kiq.ring;
2807         ring->ready = true;
2808         r = amdgpu_ring_test_ring(ring);
2809         if (r)
2810                 ring->ready = false;
2811
2812         for (i = 0; i < adev->gfx.num_compute_rings; i++) {
2813                 ring = &adev->gfx.compute_ring[i];
2814
2815                 ring->ready = true;
2816                 r = amdgpu_ring_test_ring(ring);
2817                 if (r)
2818                         ring->ready = false;
2819         }
2820
2821         gfx_v9_0_enable_gui_idle_interrupt(adev, true);
2822
2823         return 0;
2824 }
2825
2826 static void gfx_v9_0_cp_enable(struct amdgpu_device *adev, bool enable)
2827 {
2828         gfx_v9_0_cp_gfx_enable(adev, enable);
2829         gfx_v9_0_cp_compute_enable(adev, enable);
2830 }
2831
2832 static int gfx_v9_0_hw_init(void *handle)
2833 {
2834         int r;
2835         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2836
2837         gfx_v9_0_init_golden_registers(adev);
2838
2839         gfx_v9_0_gpu_init(adev);
2840
2841         r = gfx_v9_0_rlc_resume(adev);
2842         if (r)
2843                 return r;
2844
2845         r = gfx_v9_0_cp_resume(adev);
2846         if (r)
2847                 return r;
2848
2849         r = gfx_v9_0_ngg_en(adev);
2850         if (r)
2851                 return r;
2852
2853         return r;
2854 }
2855
2856 static int gfx_v9_0_hw_fini(void *handle)
2857 {
2858         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2859
2860         amdgpu_irq_put(adev, &adev->gfx.priv_reg_irq, 0);
2861         amdgpu_irq_put(adev, &adev->gfx.priv_inst_irq, 0);
2862         if (amdgpu_sriov_vf(adev)) {
2863                 gfx_v9_0_cp_gfx_enable(adev, false);
2864                 /* must disable polling for SRIOV when hw finished, otherwise
2865                  * CPC engine may still keep fetching WB address which is already
2866                  * invalid after sw finished and trigger DMAR reading error in
2867                  * hypervisor side.
2868                  */
2869                 WREG32_FIELD15(GC, 0, CP_PQ_WPTR_POLL_CNTL, EN, 0);
2870                 return 0;
2871         }
2872         gfx_v9_0_cp_enable(adev, false);
2873         gfx_v9_0_rlc_stop(adev);
2874
2875         return 0;
2876 }
2877
2878 static int gfx_v9_0_suspend(void *handle)
2879 {
2880         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2881
2882         adev->gfx.in_suspend = true;
2883         return gfx_v9_0_hw_fini(adev);
2884 }
2885
2886 static int gfx_v9_0_resume(void *handle)
2887 {
2888         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2889         int r;
2890
2891         r = gfx_v9_0_hw_init(adev);
2892         adev->gfx.in_suspend = false;
2893         return r;
2894 }
2895
2896 static bool gfx_v9_0_is_idle(void *handle)
2897 {
2898         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2899
2900         if (REG_GET_FIELD(RREG32_SOC15(GC, 0, mmGRBM_STATUS),
2901                                 GRBM_STATUS, GUI_ACTIVE))
2902                 return false;
2903         else
2904                 return true;
2905 }
2906
2907 static int gfx_v9_0_wait_for_idle(void *handle)
2908 {
2909         unsigned i;
2910         u32 tmp;
2911         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2912
2913         for (i = 0; i < adev->usec_timeout; i++) {
2914                 /* read MC_STATUS */
2915                 tmp = RREG32_SOC15(GC, 0, mmGRBM_STATUS) &
2916                         GRBM_STATUS__GUI_ACTIVE_MASK;
2917
2918                 if (!REG_GET_FIELD(tmp, GRBM_STATUS, GUI_ACTIVE))
2919                         return 0;
2920                 udelay(1);
2921         }
2922         return -ETIMEDOUT;
2923 }
2924
2925 static int gfx_v9_0_soft_reset(void *handle)
2926 {
2927         u32 grbm_soft_reset = 0;
2928         u32 tmp;
2929         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2930
2931         /* GRBM_STATUS */
2932         tmp = RREG32_SOC15(GC, 0, mmGRBM_STATUS);
2933         if (tmp & (GRBM_STATUS__PA_BUSY_MASK | GRBM_STATUS__SC_BUSY_MASK |
2934                    GRBM_STATUS__BCI_BUSY_MASK | GRBM_STATUS__SX_BUSY_MASK |
2935                    GRBM_STATUS__TA_BUSY_MASK | GRBM_STATUS__VGT_BUSY_MASK |
2936                    GRBM_STATUS__DB_BUSY_MASK | GRBM_STATUS__CB_BUSY_MASK |
2937                    GRBM_STATUS__GDS_BUSY_MASK | GRBM_STATUS__SPI_BUSY_MASK |
2938                    GRBM_STATUS__IA_BUSY_MASK | GRBM_STATUS__IA_BUSY_NO_DMA_MASK)) {
2939                 grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset,
2940                                                 GRBM_SOFT_RESET, SOFT_RESET_CP, 1);
2941                 grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset,
2942                                                 GRBM_SOFT_RESET, SOFT_RESET_GFX, 1);
2943         }
2944
2945         if (tmp & (GRBM_STATUS__CP_BUSY_MASK | GRBM_STATUS__CP_COHERENCY_BUSY_MASK)) {
2946                 grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset,
2947                                                 GRBM_SOFT_RESET, SOFT_RESET_CP, 1);
2948         }
2949
2950         /* GRBM_STATUS2 */
2951         tmp = RREG32_SOC15(GC, 0, mmGRBM_STATUS2);
2952         if (REG_GET_FIELD(tmp, GRBM_STATUS2, RLC_BUSY))
2953                 grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset,
2954                                                 GRBM_SOFT_RESET, SOFT_RESET_RLC, 1);
2955
2956
2957         if (grbm_soft_reset) {
2958                 /* stop the rlc */
2959                 gfx_v9_0_rlc_stop(adev);
2960
2961                 /* Disable GFX parsing/prefetching */
2962                 gfx_v9_0_cp_gfx_enable(adev, false);
2963
2964                 /* Disable MEC parsing/prefetching */
2965                 gfx_v9_0_cp_compute_enable(adev, false);
2966
2967                 if (grbm_soft_reset) {
2968                         tmp = RREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET);
2969                         tmp |= grbm_soft_reset;
2970                         dev_info(adev->dev, "GRBM_SOFT_RESET=0x%08X\n", tmp);
2971                         WREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET, tmp);
2972                         tmp = RREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET);
2973
2974                         udelay(50);
2975
2976                         tmp &= ~grbm_soft_reset;
2977                         WREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET, tmp);
2978                         tmp = RREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET);
2979                 }
2980
2981                 /* Wait a little for things to settle down */
2982                 udelay(50);
2983         }
2984         return 0;
2985 }
2986
2987 static uint64_t gfx_v9_0_get_gpu_clock_counter(struct amdgpu_device *adev)
2988 {
2989         uint64_t clock;
2990
2991         mutex_lock(&adev->gfx.gpu_clock_mutex);
2992         WREG32_SOC15(GC, 0, mmRLC_CAPTURE_GPU_CLOCK_COUNT, 1);
2993         clock = (uint64_t)RREG32_SOC15(GC, 0, mmRLC_GPU_CLOCK_COUNT_LSB) |
2994                 ((uint64_t)RREG32_SOC15(GC, 0, mmRLC_GPU_CLOCK_COUNT_MSB) << 32ULL);
2995         mutex_unlock(&adev->gfx.gpu_clock_mutex);
2996         return clock;
2997 }
2998
2999 static void gfx_v9_0_ring_emit_gds_switch(struct amdgpu_ring *ring,
3000                                           uint32_t vmid,
3001                                           uint32_t gds_base, uint32_t gds_size,
3002                                           uint32_t gws_base, uint32_t gws_size,
3003                                           uint32_t oa_base, uint32_t oa_size)
3004 {
3005         gds_base = gds_base >> AMDGPU_GDS_SHIFT;
3006         gds_size = gds_size >> AMDGPU_GDS_SHIFT;
3007
3008         gws_base = gws_base >> AMDGPU_GWS_SHIFT;
3009         gws_size = gws_size >> AMDGPU_GWS_SHIFT;
3010
3011         oa_base = oa_base >> AMDGPU_OA_SHIFT;
3012         oa_size = oa_size >> AMDGPU_OA_SHIFT;
3013
3014         /* GDS Base */
3015         gfx_v9_0_write_data_to_reg(ring, 0, false,
3016                                    amdgpu_gds_reg_offset[vmid].mem_base,
3017                                    gds_base);
3018
3019         /* GDS Size */
3020         gfx_v9_0_write_data_to_reg(ring, 0, false,
3021                                    amdgpu_gds_reg_offset[vmid].mem_size,
3022                                    gds_size);
3023
3024         /* GWS */
3025         gfx_v9_0_write_data_to_reg(ring, 0, false,
3026                                    amdgpu_gds_reg_offset[vmid].gws,
3027                                    gws_size << GDS_GWS_VMID0__SIZE__SHIFT | gws_base);
3028
3029         /* OA */
3030         gfx_v9_0_write_data_to_reg(ring, 0, false,
3031                                    amdgpu_gds_reg_offset[vmid].oa,
3032                                    (1 << (oa_size + oa_base)) - (1 << oa_base));
3033 }
3034
3035 static int gfx_v9_0_early_init(void *handle)
3036 {
3037         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3038
3039         adev->gfx.num_gfx_rings = GFX9_NUM_GFX_RINGS;
3040         adev->gfx.num_compute_rings = AMDGPU_MAX_COMPUTE_RINGS;
3041         gfx_v9_0_set_ring_funcs(adev);
3042         gfx_v9_0_set_irq_funcs(adev);
3043         gfx_v9_0_set_gds_init(adev);
3044         gfx_v9_0_set_rlc_funcs(adev);
3045
3046         return 0;
3047 }
3048
3049 static int gfx_v9_0_late_init(void *handle)
3050 {
3051         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3052         int r;
3053
3054         r = amdgpu_irq_get(adev, &adev->gfx.priv_reg_irq, 0);
3055         if (r)
3056                 return r;
3057
3058         r = amdgpu_irq_get(adev, &adev->gfx.priv_inst_irq, 0);
3059         if (r)
3060                 return r;
3061
3062         return 0;
3063 }
3064
3065 static void gfx_v9_0_enter_rlc_safe_mode(struct amdgpu_device *adev)
3066 {
3067         uint32_t rlc_setting, data;
3068         unsigned i;
3069
3070         if (adev->gfx.rlc.in_safe_mode)
3071                 return;
3072
3073         /* if RLC is not enabled, do nothing */
3074         rlc_setting = RREG32_SOC15(GC, 0, mmRLC_CNTL);
3075         if (!(rlc_setting & RLC_CNTL__RLC_ENABLE_F32_MASK))
3076                 return;
3077
3078         if (adev->cg_flags &
3079             (AMD_CG_SUPPORT_GFX_CGCG | AMD_CG_SUPPORT_GFX_MGCG |
3080              AMD_CG_SUPPORT_GFX_3D_CGCG)) {
3081                 data = RLC_SAFE_MODE__CMD_MASK;
3082                 data |= (1 << RLC_SAFE_MODE__MESSAGE__SHIFT);
3083                 WREG32_SOC15(GC, 0, mmRLC_SAFE_MODE, data);
3084
3085                 /* wait for RLC_SAFE_MODE */
3086                 for (i = 0; i < adev->usec_timeout; i++) {
3087                         if (!REG_GET_FIELD(RREG32_SOC15(GC, 0, mmRLC_SAFE_MODE), RLC_SAFE_MODE, CMD))
3088                                 break;
3089                         udelay(1);
3090                 }
3091                 adev->gfx.rlc.in_safe_mode = true;
3092         }
3093 }
3094
3095 static void gfx_v9_0_exit_rlc_safe_mode(struct amdgpu_device *adev)
3096 {
3097         uint32_t rlc_setting, data;
3098
3099         if (!adev->gfx.rlc.in_safe_mode)
3100                 return;
3101
3102         /* if RLC is not enabled, do nothing */
3103         rlc_setting = RREG32_SOC15(GC, 0, mmRLC_CNTL);
3104         if (!(rlc_setting & RLC_CNTL__RLC_ENABLE_F32_MASK))
3105                 return;
3106
3107         if (adev->cg_flags &
3108             (AMD_CG_SUPPORT_GFX_CGCG | AMD_CG_SUPPORT_GFX_MGCG)) {
3109                 /*
3110                  * Try to exit safe mode only if it is already in safe
3111                  * mode.
3112                  */
3113                 data = RLC_SAFE_MODE__CMD_MASK;
3114                 WREG32_SOC15(GC, 0, mmRLC_SAFE_MODE, data);
3115                 adev->gfx.rlc.in_safe_mode = false;
3116         }
3117 }
3118
3119 static void gfx_v9_0_update_gfx_cg_power_gating(struct amdgpu_device *adev,
3120                                                 bool enable)
3121 {
3122         /* TODO: double check if we need to perform under safe mdoe */
3123         /* gfx_v9_0_enter_rlc_safe_mode(adev); */
3124
3125         if ((adev->pg_flags & AMD_PG_SUPPORT_GFX_PG) && enable) {
3126                 gfx_v9_0_enable_gfx_cg_power_gating(adev, true);
3127                 if (adev->pg_flags & AMD_PG_SUPPORT_GFX_PIPELINE)
3128                         gfx_v9_0_enable_gfx_pipeline_powergating(adev, true);
3129         } else {
3130                 gfx_v9_0_enable_gfx_cg_power_gating(adev, false);
3131                 gfx_v9_0_enable_gfx_pipeline_powergating(adev, false);
3132         }
3133
3134         /* gfx_v9_0_exit_rlc_safe_mode(adev); */
3135 }
3136
3137 static void gfx_v9_0_update_gfx_mg_power_gating(struct amdgpu_device *adev,
3138                                                 bool enable)
3139 {
3140         /* TODO: double check if we need to perform under safe mode */
3141         /* gfx_v9_0_enter_rlc_safe_mode(adev); */
3142
3143         if ((adev->pg_flags & AMD_PG_SUPPORT_GFX_SMG) && enable)
3144                 gfx_v9_0_enable_gfx_static_mg_power_gating(adev, true);
3145         else
3146                 gfx_v9_0_enable_gfx_static_mg_power_gating(adev, false);
3147
3148         if ((adev->pg_flags & AMD_PG_SUPPORT_GFX_DMG) && enable)
3149                 gfx_v9_0_enable_gfx_dynamic_mg_power_gating(adev, true);
3150         else
3151                 gfx_v9_0_enable_gfx_dynamic_mg_power_gating(adev, false);
3152
3153         /* gfx_v9_0_exit_rlc_safe_mode(adev); */
3154 }
3155
3156 static void gfx_v9_0_update_medium_grain_clock_gating(struct amdgpu_device *adev,
3157                                                       bool enable)
3158 {
3159         uint32_t data, def;
3160
3161         /* It is disabled by HW by default */
3162         if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGCG)) {
3163                 /* 1 - RLC_CGTT_MGCG_OVERRIDE */
3164                 def = data = RREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE);
3165                 data &= ~(RLC_CGTT_MGCG_OVERRIDE__CPF_CGTT_SCLK_OVERRIDE_MASK |
3166                           RLC_CGTT_MGCG_OVERRIDE__GRBM_CGTT_SCLK_OVERRIDE_MASK |
3167                           RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGCG_OVERRIDE_MASK |
3168                           RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGLS_OVERRIDE_MASK);
3169
3170                 /* only for Vega10 & Raven1 */
3171                 data |= RLC_CGTT_MGCG_OVERRIDE__RLC_CGTT_SCLK_OVERRIDE_MASK;
3172
3173                 if (def != data)
3174                         WREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE, data);
3175
3176                 /* MGLS is a global flag to control all MGLS in GFX */
3177                 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGLS) {
3178                         /* 2 - RLC memory Light sleep */
3179                         if (adev->cg_flags & AMD_CG_SUPPORT_GFX_RLC_LS) {
3180                                 def = data = RREG32_SOC15(GC, 0, mmRLC_MEM_SLP_CNTL);
3181                                 data |= RLC_MEM_SLP_CNTL__RLC_MEM_LS_EN_MASK;
3182                                 if (def != data)
3183                                         WREG32_SOC15(GC, 0, mmRLC_MEM_SLP_CNTL, data);
3184                         }
3185                         /* 3 - CP memory Light sleep */
3186                         if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CP_LS) {
3187                                 def = data = RREG32_SOC15(GC, 0, mmCP_MEM_SLP_CNTL);
3188                                 data |= CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK;
3189                                 if (def != data)
3190                                         WREG32_SOC15(GC, 0, mmCP_MEM_SLP_CNTL, data);
3191                         }
3192                 }
3193         } else {
3194                 /* 1 - MGCG_OVERRIDE */
3195                 def = data = RREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE);
3196                 data |= (RLC_CGTT_MGCG_OVERRIDE__CPF_CGTT_SCLK_OVERRIDE_MASK |
3197                          RLC_CGTT_MGCG_OVERRIDE__RLC_CGTT_SCLK_OVERRIDE_MASK |
3198                          RLC_CGTT_MGCG_OVERRIDE__GRBM_CGTT_SCLK_OVERRIDE_MASK |
3199                          RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGCG_OVERRIDE_MASK |
3200                          RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGLS_OVERRIDE_MASK);
3201                 if (def != data)
3202                         WREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE, data);
3203
3204                 /* 2 - disable MGLS in RLC */
3205                 data = RREG32_SOC15(GC, 0, mmRLC_MEM_SLP_CNTL);
3206                 if (data & RLC_MEM_SLP_CNTL__RLC_MEM_LS_EN_MASK) {
3207                         data &= ~RLC_MEM_SLP_CNTL__RLC_MEM_LS_EN_MASK;
3208                         WREG32_SOC15(GC, 0, mmRLC_MEM_SLP_CNTL, data);
3209                 }
3210
3211                 /* 3 - disable MGLS in CP */
3212                 data = RREG32_SOC15(GC, 0, mmCP_MEM_SLP_CNTL);
3213                 if (data & CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK) {
3214                         data &= ~CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK;
3215                         WREG32_SOC15(GC, 0, mmCP_MEM_SLP_CNTL, data);
3216                 }
3217         }
3218 }
3219
3220 static void gfx_v9_0_update_3d_clock_gating(struct amdgpu_device *adev,
3221                                            bool enable)
3222 {
3223         uint32_t data, def;
3224
3225         adev->gfx.rlc.funcs->enter_safe_mode(adev);
3226
3227         /* Enable 3D CGCG/CGLS */
3228         if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_3D_CGCG)) {
3229                 /* write cmd to clear cgcg/cgls ov */
3230                 def = data = RREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE);
3231                 /* unset CGCG override */
3232                 data &= ~RLC_CGTT_MGCG_OVERRIDE__GFXIP_GFX3D_CG_OVERRIDE_MASK;
3233                 /* update CGCG and CGLS override bits */
3234                 if (def != data)
3235                         WREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE, data);
3236                 /* enable 3Dcgcg FSM(0x0020003f) */
3237                 def = RREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL_3D);
3238                 data = (0x2000 << RLC_CGCG_CGLS_CTRL_3D__CGCG_GFX_IDLE_THRESHOLD__SHIFT) |
3239                         RLC_CGCG_CGLS_CTRL_3D__CGCG_EN_MASK;
3240                 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_3D_CGLS)
3241                         data |= (0x000F << RLC_CGCG_CGLS_CTRL_3D__CGLS_REP_COMPANSAT_DELAY__SHIFT) |
3242                                 RLC_CGCG_CGLS_CTRL_3D__CGLS_EN_MASK;
3243                 if (def != data)
3244                         WREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL_3D, data);
3245
3246                 /* set IDLE_POLL_COUNT(0x00900100) */
3247                 def = RREG32_SOC15(GC, 0, mmCP_RB_WPTR_POLL_CNTL);
3248                 data = (0x0100 << CP_RB_WPTR_POLL_CNTL__POLL_FREQUENCY__SHIFT) |
3249                         (0x0090 << CP_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT);
3250                 if (def != data)
3251                         WREG32_SOC15(GC, 0, mmCP_RB_WPTR_POLL_CNTL, data);
3252         } else {
3253                 /* Disable CGCG/CGLS */
3254                 def = data = RREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL_3D);
3255                 /* disable cgcg, cgls should be disabled */
3256                 data &= ~(RLC_CGCG_CGLS_CTRL_3D__CGCG_EN_MASK |
3257                           RLC_CGCG_CGLS_CTRL_3D__CGLS_EN_MASK);
3258                 /* disable cgcg and cgls in FSM */
3259                 if (def != data)
3260                         WREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL_3D, data);
3261         }
3262
3263         adev->gfx.rlc.funcs->exit_safe_mode(adev);
3264 }
3265
3266 static void gfx_v9_0_update_coarse_grain_clock_gating(struct amdgpu_device *adev,
3267                                                       bool enable)
3268 {
3269         uint32_t def, data;
3270
3271         adev->gfx.rlc.funcs->enter_safe_mode(adev);
3272
3273         if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGCG)) {
3274                 def = data = RREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE);
3275                 /* unset CGCG override */
3276                 data &= ~RLC_CGTT_MGCG_OVERRIDE__GFXIP_CGCG_OVERRIDE_MASK;
3277                 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGLS)
3278                         data &= ~RLC_CGTT_MGCG_OVERRIDE__GFXIP_CGLS_OVERRIDE_MASK;
3279                 else
3280                         data |= RLC_CGTT_MGCG_OVERRIDE__GFXIP_CGLS_OVERRIDE_MASK;
3281                 /* update CGCG and CGLS override bits */
3282                 if (def != data)
3283                         WREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE, data);
3284
3285                 /* enable cgcg FSM(0x0020003F) */
3286                 def = RREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL);
3287                 data = (0x2000 << RLC_CGCG_CGLS_CTRL__CGCG_GFX_IDLE_THRESHOLD__SHIFT) |
3288                         RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK;
3289                 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGLS)
3290                         data |= (0x000F << RLC_CGCG_CGLS_CTRL__CGLS_REP_COMPANSAT_DELAY__SHIFT) |
3291                                 RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK;
3292                 if (def != data)
3293                         WREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL, data);
3294
3295                 /* set IDLE_POLL_COUNT(0x00900100) */
3296                 def = RREG32_SOC15(GC, 0, mmCP_RB_WPTR_POLL_CNTL);
3297                 data = (0x0100 << CP_RB_WPTR_POLL_CNTL__POLL_FREQUENCY__SHIFT) |
3298                         (0x0090 << CP_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT);
3299                 if (def != data)
3300                         WREG32_SOC15(GC, 0, mmCP_RB_WPTR_POLL_CNTL, data);
3301         } else {
3302                 def = data = RREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL);
3303                 /* reset CGCG/CGLS bits */
3304                 data &= ~(RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK | RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK);
3305                 /* disable cgcg and cgls in FSM */
3306                 if (def != data)
3307                         WREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL, data);
3308         }
3309
3310         adev->gfx.rlc.funcs->exit_safe_mode(adev);
3311 }
3312
3313 static int gfx_v9_0_update_gfx_clock_gating(struct amdgpu_device *adev,
3314                                             bool enable)
3315 {
3316         if (enable) {
3317                 /* CGCG/CGLS should be enabled after MGCG/MGLS
3318                  * ===  MGCG + MGLS ===
3319                  */
3320                 gfx_v9_0_update_medium_grain_clock_gating(adev, enable);
3321                 /* ===  CGCG /CGLS for GFX 3D Only === */
3322                 gfx_v9_0_update_3d_clock_gating(adev, enable);
3323                 /* ===  CGCG + CGLS === */
3324                 gfx_v9_0_update_coarse_grain_clock_gating(adev, enable);
3325         } else {
3326                 /* CGCG/CGLS should be disabled before MGCG/MGLS
3327                  * ===  CGCG + CGLS ===
3328                  */
3329                 gfx_v9_0_update_coarse_grain_clock_gating(adev, enable);
3330                 /* ===  CGCG /CGLS for GFX 3D Only === */
3331                 gfx_v9_0_update_3d_clock_gating(adev, enable);
3332                 /* ===  MGCG + MGLS === */
3333                 gfx_v9_0_update_medium_grain_clock_gating(adev, enable);
3334         }
3335         return 0;
3336 }
3337
3338 static const struct amdgpu_rlc_funcs gfx_v9_0_rlc_funcs = {
3339         .enter_safe_mode = gfx_v9_0_enter_rlc_safe_mode,
3340         .exit_safe_mode = gfx_v9_0_exit_rlc_safe_mode
3341 };
3342
3343 static int gfx_v9_0_set_powergating_state(void *handle,
3344                                           enum amd_powergating_state state)
3345 {
3346         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3347         bool enable = (state == AMD_PG_STATE_GATE) ? true : false;
3348
3349         switch (adev->asic_type) {
3350         case CHIP_RAVEN:
3351                 if (adev->pg_flags & AMD_PG_SUPPORT_RLC_SMU_HS) {
3352                         gfx_v9_0_enable_sck_slow_down_on_power_up(adev, true);
3353                         gfx_v9_0_enable_sck_slow_down_on_power_down(adev, true);
3354                 } else {
3355                         gfx_v9_0_enable_sck_slow_down_on_power_up(adev, false);
3356                         gfx_v9_0_enable_sck_slow_down_on_power_down(adev, false);
3357                 }
3358
3359                 if (adev->pg_flags & AMD_PG_SUPPORT_CP)
3360                         gfx_v9_0_enable_cp_power_gating(adev, true);
3361                 else
3362                         gfx_v9_0_enable_cp_power_gating(adev, false);
3363
3364                 /* update gfx cgpg state */
3365                 gfx_v9_0_update_gfx_cg_power_gating(adev, enable);
3366
3367                 /* update mgcg state */
3368                 gfx_v9_0_update_gfx_mg_power_gating(adev, enable);
3369                 break;
3370         default:
3371                 break;
3372         }
3373
3374         return 0;
3375 }
3376
3377 static int gfx_v9_0_set_clockgating_state(void *handle,
3378                                           enum amd_clockgating_state state)
3379 {
3380         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3381
3382         if (amdgpu_sriov_vf(adev))
3383                 return 0;
3384
3385         switch (adev->asic_type) {
3386         case CHIP_VEGA10:
3387         case CHIP_RAVEN:
3388                 gfx_v9_0_update_gfx_clock_gating(adev,
3389                                                  state == AMD_CG_STATE_GATE ? true : false);
3390                 break;
3391         default:
3392                 break;
3393         }
3394         return 0;
3395 }
3396
3397 static void gfx_v9_0_get_clockgating_state(void *handle, u32 *flags)
3398 {
3399         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3400         int data;
3401
3402         if (amdgpu_sriov_vf(adev))
3403                 *flags = 0;
3404
3405         /* AMD_CG_SUPPORT_GFX_MGCG */
3406         data = RREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE);
3407         if (!(data & RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGCG_OVERRIDE_MASK))
3408                 *flags |= AMD_CG_SUPPORT_GFX_MGCG;
3409
3410         /* AMD_CG_SUPPORT_GFX_CGCG */
3411         data = RREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL);
3412         if (data & RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK)
3413                 *flags |= AMD_CG_SUPPORT_GFX_CGCG;
3414
3415         /* AMD_CG_SUPPORT_GFX_CGLS */
3416         if (data & RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK)
3417                 *flags |= AMD_CG_SUPPORT_GFX_CGLS;
3418
3419         /* AMD_CG_SUPPORT_GFX_RLC_LS */
3420         data = RREG32_SOC15(GC, 0, mmRLC_MEM_SLP_CNTL);
3421         if (data & RLC_MEM_SLP_CNTL__RLC_MEM_LS_EN_MASK)
3422                 *flags |= AMD_CG_SUPPORT_GFX_RLC_LS | AMD_CG_SUPPORT_GFX_MGLS;
3423
3424         /* AMD_CG_SUPPORT_GFX_CP_LS */
3425         data = RREG32_SOC15(GC, 0, mmCP_MEM_SLP_CNTL);
3426         if (data & CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK)
3427                 *flags |= AMD_CG_SUPPORT_GFX_CP_LS | AMD_CG_SUPPORT_GFX_MGLS;
3428
3429         /* AMD_CG_SUPPORT_GFX_3D_CGCG */
3430         data = RREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL_3D);
3431         if (data & RLC_CGCG_CGLS_CTRL_3D__CGCG_EN_MASK)
3432                 *flags |= AMD_CG_SUPPORT_GFX_3D_CGCG;
3433
3434         /* AMD_CG_SUPPORT_GFX_3D_CGLS */
3435         if (data & RLC_CGCG_CGLS_CTRL_3D__CGLS_EN_MASK)
3436                 *flags |= AMD_CG_SUPPORT_GFX_3D_CGLS;
3437 }
3438
3439 static u64 gfx_v9_0_ring_get_rptr_gfx(struct amdgpu_ring *ring)
3440 {
3441         return ring->adev->wb.wb[ring->rptr_offs]; /* gfx9 is 32bit rptr*/
3442 }
3443
3444 static u64 gfx_v9_0_ring_get_wptr_gfx(struct amdgpu_ring *ring)
3445 {
3446         struct amdgpu_device *adev = ring->adev;
3447         u64 wptr;
3448
3449         /* XXX check if swapping is necessary on BE */
3450         if (ring->use_doorbell) {
3451                 wptr = atomic64_read((atomic64_t *)&adev->wb.wb[ring->wptr_offs]);
3452         } else {
3453                 wptr = RREG32_SOC15(GC, 0, mmCP_RB0_WPTR);
3454                 wptr += (u64)RREG32_SOC15(GC, 0, mmCP_RB0_WPTR_HI) << 32;
3455         }
3456
3457         return wptr;
3458 }
3459
3460 static void gfx_v9_0_ring_set_wptr_gfx(struct amdgpu_ring *ring)
3461 {
3462         struct amdgpu_device *adev = ring->adev;
3463
3464         if (ring->use_doorbell) {
3465                 /* XXX check if swapping is necessary on BE */
3466                 atomic64_set((atomic64_t*)&adev->wb.wb[ring->wptr_offs], ring->wptr);
3467                 WDOORBELL64(ring->doorbell_index, ring->wptr);
3468         } else {
3469                 WREG32_SOC15(GC, 0, mmCP_RB0_WPTR, lower_32_bits(ring->wptr));
3470                 WREG32_SOC15(GC, 0, mmCP_RB0_WPTR_HI, upper_32_bits(ring->wptr));
3471         }
3472 }
3473
3474 static void gfx_v9_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
3475 {
3476         u32 ref_and_mask, reg_mem_engine;
3477         struct nbio_hdp_flush_reg *nbio_hf_reg;
3478
3479         if (ring->adev->asic_type == CHIP_VEGA10)
3480                 nbio_hf_reg = &nbio_v6_1_hdp_flush_reg;
3481
3482         if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE) {
3483                 switch (ring->me) {
3484                 case 1:
3485                         ref_and_mask = nbio_hf_reg->ref_and_mask_cp2 << ring->pipe;
3486                         break;
3487                 case 2:
3488                         ref_and_mask = nbio_hf_reg->ref_and_mask_cp6 << ring->pipe;
3489                         break;
3490                 default:
3491                         return;
3492                 }
3493                 reg_mem_engine = 0;
3494         } else {
3495                 ref_and_mask = nbio_hf_reg->ref_and_mask_cp0;
3496                 reg_mem_engine = 1; /* pfp */
3497         }
3498
3499         gfx_v9_0_wait_reg_mem(ring, reg_mem_engine, 0, 1,
3500                               nbio_hf_reg->hdp_flush_req_offset,
3501                               nbio_hf_reg->hdp_flush_done_offset,
3502                               ref_and_mask, ref_and_mask, 0x20);
3503 }
3504
3505 static void gfx_v9_0_ring_emit_hdp_invalidate(struct amdgpu_ring *ring)
3506 {
3507         gfx_v9_0_write_data_to_reg(ring, 0, true,
3508                                    SOC15_REG_OFFSET(HDP, 0, mmHDP_DEBUG0), 1);
3509 }
3510
3511 static void gfx_v9_0_ring_emit_ib_gfx(struct amdgpu_ring *ring,
3512                                       struct amdgpu_ib *ib,
3513                                       unsigned vm_id, bool ctx_switch)
3514 {
3515         u32 header, control = 0;
3516
3517         if (ib->flags & AMDGPU_IB_FLAG_CE)
3518                 header = PACKET3(PACKET3_INDIRECT_BUFFER_CONST, 2);
3519         else
3520                 header = PACKET3(PACKET3_INDIRECT_BUFFER, 2);
3521
3522         control |= ib->length_dw | (vm_id << 24);
3523
3524         if (amdgpu_sriov_vf(ring->adev) && (ib->flags & AMDGPU_IB_FLAG_PREEMPT)) {
3525                 control |= INDIRECT_BUFFER_PRE_ENB(1);
3526
3527                 if (!(ib->flags & AMDGPU_IB_FLAG_CE))
3528                         gfx_v9_0_ring_emit_de_meta(ring);
3529         }
3530
3531         amdgpu_ring_write(ring, header);
3532 BUG_ON(ib->gpu_addr & 0x3); /* Dword align */
3533         amdgpu_ring_write(ring,
3534 #ifdef __BIG_ENDIAN
3535                 (2 << 0) |
3536 #endif
3537                 lower_32_bits(ib->gpu_addr));
3538         amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
3539         amdgpu_ring_write(ring, control);
3540 }
3541
3542 static void gfx_v9_0_ring_emit_ib_compute(struct amdgpu_ring *ring,
3543                                           struct amdgpu_ib *ib,
3544                                           unsigned vm_id, bool ctx_switch)
3545 {
3546         u32 control = INDIRECT_BUFFER_VALID | ib->length_dw | (vm_id << 24);
3547
3548         amdgpu_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
3549         BUG_ON(ib->gpu_addr & 0x3); /* Dword align */
3550         amdgpu_ring_write(ring,
3551 #ifdef __BIG_ENDIAN
3552                                 (2 << 0) |
3553 #endif
3554                                 lower_32_bits(ib->gpu_addr));
3555         amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
3556         amdgpu_ring_write(ring, control);
3557 }
3558
3559 static void gfx_v9_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr,
3560                                      u64 seq, unsigned flags)
3561 {
3562         bool write64bit = flags & AMDGPU_FENCE_FLAG_64BIT;
3563         bool int_sel = flags & AMDGPU_FENCE_FLAG_INT;
3564
3565         /* RELEASE_MEM - flush caches, send int */
3566         amdgpu_ring_write(ring, PACKET3(PACKET3_RELEASE_MEM, 6));
3567         amdgpu_ring_write(ring, (EOP_TCL1_ACTION_EN |
3568                                  EOP_TC_ACTION_EN |
3569                                  EOP_TC_WB_ACTION_EN |
3570                                  EOP_TC_MD_ACTION_EN |
3571                                  EVENT_TYPE(CACHE_FLUSH_AND_INV_TS_EVENT) |
3572                                  EVENT_INDEX(5)));
3573         amdgpu_ring_write(ring, DATA_SEL(write64bit ? 2 : 1) | INT_SEL(int_sel ? 2 : 0));
3574
3575         /*
3576          * the address should be Qword aligned if 64bit write, Dword
3577          * aligned if only send 32bit data low (discard data high)
3578          */
3579         if (write64bit)
3580                 BUG_ON(addr & 0x7);
3581         else
3582                 BUG_ON(addr & 0x3);
3583         amdgpu_ring_write(ring, lower_32_bits(addr));
3584         amdgpu_ring_write(ring, upper_32_bits(addr));
3585         amdgpu_ring_write(ring, lower_32_bits(seq));
3586         amdgpu_ring_write(ring, upper_32_bits(seq));
3587         amdgpu_ring_write(ring, 0);
3588 }
3589
3590 static void gfx_v9_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
3591 {
3592         int usepfp = (ring->funcs->type == AMDGPU_RING_TYPE_GFX);
3593         uint32_t seq = ring->fence_drv.sync_seq;
3594         uint64_t addr = ring->fence_drv.gpu_addr;
3595
3596         gfx_v9_0_wait_reg_mem(ring, usepfp, 1, 0,
3597                               lower_32_bits(addr), upper_32_bits(addr),
3598                               seq, 0xffffffff, 4);
3599 }
3600
3601 static void gfx_v9_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
3602                                         unsigned vm_id, uint64_t pd_addr)
3603 {
3604         struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub];
3605         int usepfp = (ring->funcs->type == AMDGPU_RING_TYPE_GFX);
3606         uint32_t req = ring->adev->gart.gart_funcs->get_invalidate_req(vm_id);
3607         unsigned eng = ring->vm_inv_eng;
3608
3609         pd_addr = amdgpu_gart_get_vm_pde(ring->adev, pd_addr);
3610         pd_addr |= AMDGPU_PTE_VALID;
3611
3612         gfx_v9_0_write_data_to_reg(ring, usepfp, true,
3613                                    hub->ctx0_ptb_addr_lo32 + (2 * vm_id),
3614                                    lower_32_bits(pd_addr));
3615
3616         gfx_v9_0_write_data_to_reg(ring, usepfp, true,
3617                                    hub->ctx0_ptb_addr_hi32 + (2 * vm_id),
3618                                    upper_32_bits(pd_addr));
3619
3620         gfx_v9_0_write_data_to_reg(ring, usepfp, true,
3621                                    hub->vm_inv_eng0_req + eng, req);
3622
3623         /* wait for the invalidate to complete */
3624         gfx_v9_0_wait_reg_mem(ring, 0, 0, 0, hub->vm_inv_eng0_ack +
3625                               eng, 0, 1 << vm_id, 1 << vm_id, 0x20);
3626
3627         /* compute doesn't have PFP */
3628         if (usepfp) {
3629                 /* sync PFP to ME, otherwise we might get invalid PFP reads */
3630                 amdgpu_ring_write(ring, PACKET3(PACKET3_PFP_SYNC_ME, 0));
3631                 amdgpu_ring_write(ring, 0x0);
3632         }
3633 }
3634
3635 static u64 gfx_v9_0_ring_get_rptr_compute(struct amdgpu_ring *ring)
3636 {
3637         return ring->adev->wb.wb[ring->rptr_offs]; /* gfx9 hardware is 32bit rptr */
3638 }
3639
3640 static u64 gfx_v9_0_ring_get_wptr_compute(struct amdgpu_ring *ring)
3641 {
3642         u64 wptr;
3643
3644         /* XXX check if swapping is necessary on BE */
3645         if (ring->use_doorbell)
3646                 wptr = atomic64_read((atomic64_t *)&ring->adev->wb.wb[ring->wptr_offs]);
3647         else
3648                 BUG();
3649         return wptr;
3650 }
3651
3652 static void gfx_v9_0_ring_set_wptr_compute(struct amdgpu_ring *ring)
3653 {
3654         struct amdgpu_device *adev = ring->adev;
3655
3656         /* XXX check if swapping is necessary on BE */
3657         if (ring->use_doorbell) {
3658                 atomic64_set((atomic64_t*)&adev->wb.wb[ring->wptr_offs], ring->wptr);
3659                 WDOORBELL64(ring->doorbell_index, ring->wptr);
3660         } else{
3661                 BUG(); /* only DOORBELL method supported on gfx9 now */
3662         }
3663 }
3664
3665 static void gfx_v9_0_ring_emit_fence_kiq(struct amdgpu_ring *ring, u64 addr,
3666                                          u64 seq, unsigned int flags)
3667 {
3668         /* we only allocate 32bit for each seq wb address */
3669         BUG_ON(flags & AMDGPU_FENCE_FLAG_64BIT);
3670
3671         /* write fence seq to the "addr" */
3672         amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
3673         amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
3674                                  WRITE_DATA_DST_SEL(5) | WR_CONFIRM));
3675         amdgpu_ring_write(ring, lower_32_bits(addr));
3676         amdgpu_ring_write(ring, upper_32_bits(addr));
3677         amdgpu_ring_write(ring, lower_32_bits(seq));
3678
3679         if (flags & AMDGPU_FENCE_FLAG_INT) {
3680                 /* set register to trigger INT */
3681                 amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
3682                 amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
3683                                          WRITE_DATA_DST_SEL(0) | WR_CONFIRM));
3684                 amdgpu_ring_write(ring, SOC15_REG_OFFSET(GC, 0, mmCPC_INT_STATUS));
3685                 amdgpu_ring_write(ring, 0);
3686                 amdgpu_ring_write(ring, 0x20000000); /* src_id is 178 */
3687         }
3688 }
3689
3690 static void gfx_v9_ring_emit_sb(struct amdgpu_ring *ring)
3691 {
3692         amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0));
3693         amdgpu_ring_write(ring, 0);
3694 }
3695
3696 static void gfx_v9_0_ring_emit_ce_meta(struct amdgpu_ring *ring)
3697 {
3698         static struct v9_ce_ib_state ce_payload = {0};
3699         uint64_t csa_addr;
3700         int cnt;
3701
3702         cnt = (sizeof(ce_payload) >> 2) + 4 - 2;
3703         csa_addr = AMDGPU_VA_RESERVED_SIZE - 2 * 4096;
3704
3705         amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, cnt));
3706         amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(2) |
3707                                  WRITE_DATA_DST_SEL(8) |
3708                                  WR_CONFIRM) |
3709                                  WRITE_DATA_CACHE_POLICY(0));
3710         amdgpu_ring_write(ring, lower_32_bits(csa_addr + offsetof(struct v9_gfx_meta_data, ce_payload)));
3711         amdgpu_ring_write(ring, upper_32_bits(csa_addr + offsetof(struct v9_gfx_meta_data, ce_payload)));
3712         amdgpu_ring_write_multiple(ring, (void *)&ce_payload, sizeof(ce_payload) >> 2);
3713 }
3714
3715 static void gfx_v9_0_ring_emit_de_meta(struct amdgpu_ring *ring)
3716 {
3717         static struct v9_de_ib_state de_payload = {0};
3718         uint64_t csa_addr, gds_addr;
3719         int cnt;
3720
3721         csa_addr = AMDGPU_VA_RESERVED_SIZE - 2 * 4096;
3722         gds_addr = csa_addr + 4096;
3723         de_payload.gds_backup_addrlo = lower_32_bits(gds_addr);
3724         de_payload.gds_backup_addrhi = upper_32_bits(gds_addr);
3725
3726         cnt = (sizeof(de_payload) >> 2) + 4 - 2;
3727         amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, cnt));
3728         amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(1) |
3729                                  WRITE_DATA_DST_SEL(8) |
3730                                  WR_CONFIRM) |
3731                                  WRITE_DATA_CACHE_POLICY(0));
3732         amdgpu_ring_write(ring, lower_32_bits(csa_addr + offsetof(struct v9_gfx_meta_data, de_payload)));
3733         amdgpu_ring_write(ring, upper_32_bits(csa_addr + offsetof(struct v9_gfx_meta_data, de_payload)));
3734         amdgpu_ring_write_multiple(ring, (void *)&de_payload, sizeof(de_payload) >> 2);
3735 }
3736
3737 static void gfx_v9_ring_emit_cntxcntl(struct amdgpu_ring *ring, uint32_t flags)
3738 {
3739         uint32_t dw2 = 0;
3740
3741         if (amdgpu_sriov_vf(ring->adev))
3742                 gfx_v9_0_ring_emit_ce_meta(ring);
3743
3744         dw2 |= 0x80000000; /* set load_enable otherwise this package is just NOPs */
3745         if (flags & AMDGPU_HAVE_CTX_SWITCH) {
3746                 /* set load_global_config & load_global_uconfig */
3747                 dw2 |= 0x8001;
3748                 /* set load_cs_sh_regs */
3749                 dw2 |= 0x01000000;
3750                 /* set load_per_context_state & load_gfx_sh_regs for GFX */
3751                 dw2 |= 0x10002;
3752
3753                 /* set load_ce_ram if preamble presented */
3754                 if (AMDGPU_PREAMBLE_IB_PRESENT & flags)
3755                         dw2 |= 0x10000000;
3756         } else {
3757                 /* still load_ce_ram if this is the first time preamble presented
3758                  * although there is no context switch happens.
3759                  */
3760                 if (AMDGPU_PREAMBLE_IB_PRESENT_FIRST & flags)
3761                         dw2 |= 0x10000000;
3762         }
3763
3764         amdgpu_ring_write(ring, PACKET3(PACKET3_CONTEXT_CONTROL, 1));
3765         amdgpu_ring_write(ring, dw2);
3766         amdgpu_ring_write(ring, 0);
3767 }
3768
3769 static unsigned gfx_v9_0_ring_emit_init_cond_exec(struct amdgpu_ring *ring)
3770 {
3771         unsigned ret;
3772         amdgpu_ring_write(ring, PACKET3(PACKET3_COND_EXEC, 3));
3773         amdgpu_ring_write(ring, lower_32_bits(ring->cond_exe_gpu_addr));
3774         amdgpu_ring_write(ring, upper_32_bits(ring->cond_exe_gpu_addr));
3775         amdgpu_ring_write(ring, 0); /* discard following DWs if *cond_exec_gpu_addr==0 */
3776         ret = ring->wptr & ring->buf_mask;
3777         amdgpu_ring_write(ring, 0x55aa55aa); /* patch dummy value later */
3778         return ret;
3779 }
3780
3781 static void gfx_v9_0_ring_emit_patch_cond_exec(struct amdgpu_ring *ring, unsigned offset)
3782 {
3783         unsigned cur;
3784         BUG_ON(offset > ring->buf_mask);
3785         BUG_ON(ring->ring[offset] != 0x55aa55aa);
3786
3787         cur = (ring->wptr & ring->buf_mask) - 1;
3788         if (likely(cur > offset))
3789                 ring->ring[offset] = cur - offset;
3790         else
3791                 ring->ring[offset] = (ring->ring_size>>2) - offset + cur;
3792 }
3793
3794 static void gfx_v9_0_ring_emit_tmz(struct amdgpu_ring *ring, bool start)
3795 {
3796         amdgpu_ring_write(ring, PACKET3(PACKET3_FRAME_CONTROL, 0));
3797         amdgpu_ring_write(ring, FRAME_CMD(start ? 0 : 1)); /* frame_end */
3798 }
3799
3800 static void gfx_v9_0_ring_emit_rreg(struct amdgpu_ring *ring, uint32_t reg)
3801 {
3802         struct amdgpu_device *adev = ring->adev;
3803
3804         amdgpu_ring_write(ring, PACKET3(PACKET3_COPY_DATA, 4));
3805         amdgpu_ring_write(ring, 0 |     /* src: register*/
3806                                 (5 << 8) |      /* dst: memory */
3807                                 (1 << 20));     /* write confirm */
3808         amdgpu_ring_write(ring, reg);
3809         amdgpu_ring_write(ring, 0);
3810         amdgpu_ring_write(ring, lower_32_bits(adev->wb.gpu_addr +
3811                                 adev->virt.reg_val_offs * 4));
3812         amdgpu_ring_write(ring, upper_32_bits(adev->wb.gpu_addr +
3813                                 adev->virt.reg_val_offs * 4));
3814 }
3815
3816 static void gfx_v9_0_ring_emit_wreg(struct amdgpu_ring *ring, uint32_t reg,
3817                                   uint32_t val)
3818 {
3819         amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
3820         amdgpu_ring_write(ring, (1 << 16)); /* no inc addr */
3821         amdgpu_ring_write(ring, reg);
3822         amdgpu_ring_write(ring, 0);
3823         amdgpu_ring_write(ring, val);
3824 }
3825
3826 static void gfx_v9_0_set_gfx_eop_interrupt_state(struct amdgpu_device *adev,
3827                                                  enum amdgpu_interrupt_state state)
3828 {
3829         switch (state) {
3830         case AMDGPU_IRQ_STATE_DISABLE:
3831         case AMDGPU_IRQ_STATE_ENABLE:
3832                 WREG32_FIELD15(GC, 0, CP_INT_CNTL_RING0,
3833                                TIME_STAMP_INT_ENABLE,
3834                                state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0);
3835                 break;
3836         default:
3837                 break;
3838         }
3839 }
3840
3841 static void gfx_v9_0_set_compute_eop_interrupt_state(struct amdgpu_device *adev,
3842                                                      int me, int pipe,
3843                                                      enum amdgpu_interrupt_state state)
3844 {
3845         u32 mec_int_cntl, mec_int_cntl_reg;
3846
3847         /*
3848          * amdgpu controls only the first MEC. That's why this function only
3849          * handles the setting of interrupts for this specific MEC. All other
3850          * pipes' interrupts are set by amdkfd.
3851          */
3852
3853         if (me == 1) {
3854                 switch (pipe) {
3855                 case 0:
3856                         mec_int_cntl_reg = SOC15_REG_OFFSET(GC, 0, mmCP_ME1_PIPE0_INT_CNTL);
3857                         break;
3858                 case 1:
3859                         mec_int_cntl_reg = SOC15_REG_OFFSET(GC, 0, mmCP_ME1_PIPE1_INT_CNTL);
3860                         break;
3861                 case 2:
3862                         mec_int_cntl_reg = SOC15_REG_OFFSET(GC, 0, mmCP_ME1_PIPE2_INT_CNTL);
3863                         break;
3864                 case 3:
3865                         mec_int_cntl_reg = SOC15_REG_OFFSET(GC, 0, mmCP_ME1_PIPE3_INT_CNTL);
3866                         break;
3867                 default:
3868                         DRM_DEBUG("invalid pipe %d\n", pipe);
3869                         return;
3870                 }
3871         } else {
3872                 DRM_DEBUG("invalid me %d\n", me);
3873                 return;
3874         }
3875
3876         switch (state) {
3877         case AMDGPU_IRQ_STATE_DISABLE:
3878                 mec_int_cntl = RREG32(mec_int_cntl_reg);
3879                 mec_int_cntl = REG_SET_FIELD(mec_int_cntl, CP_ME1_PIPE0_INT_CNTL,
3880                                              TIME_STAMP_INT_ENABLE, 0);
3881                 WREG32(mec_int_cntl_reg, mec_int_cntl);
3882                 break;
3883         case AMDGPU_IRQ_STATE_ENABLE:
3884                 mec_int_cntl = RREG32(mec_int_cntl_reg);
3885                 mec_int_cntl = REG_SET_FIELD(mec_int_cntl, CP_ME1_PIPE0_INT_CNTL,
3886                                              TIME_STAMP_INT_ENABLE, 1);
3887                 WREG32(mec_int_cntl_reg, mec_int_cntl);
3888                 break;
3889         default:
3890                 break;
3891         }
3892 }
3893
3894 static int gfx_v9_0_set_priv_reg_fault_state(struct amdgpu_device *adev,
3895                                              struct amdgpu_irq_src *source,
3896                                              unsigned type,
3897                                              enum amdgpu_interrupt_state state)
3898 {
3899         switch (state) {
3900         case AMDGPU_IRQ_STATE_DISABLE:
3901         case AMDGPU_IRQ_STATE_ENABLE:
3902                 WREG32_FIELD15(GC, 0, CP_INT_CNTL_RING0,
3903                                PRIV_REG_INT_ENABLE,
3904                                state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0);
3905                 break;
3906         default:
3907                 break;
3908         }
3909
3910         return 0;
3911 }
3912
3913 static int gfx_v9_0_set_priv_inst_fault_state(struct amdgpu_device *adev,
3914                                               struct amdgpu_irq_src *source,
3915                                               unsigned type,
3916                                               enum amdgpu_interrupt_state state)
3917 {
3918         switch (state) {
3919         case AMDGPU_IRQ_STATE_DISABLE:
3920         case AMDGPU_IRQ_STATE_ENABLE:
3921                 WREG32_FIELD15(GC, 0, CP_INT_CNTL_RING0,
3922                                PRIV_INSTR_INT_ENABLE,
3923                                state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0);
3924         default:
3925                 break;
3926         }
3927
3928         return 0;
3929 }
3930
3931 static int gfx_v9_0_set_eop_interrupt_state(struct amdgpu_device *adev,
3932                                             struct amdgpu_irq_src *src,
3933                                             unsigned type,
3934                                             enum amdgpu_interrupt_state state)
3935 {
3936         switch (type) {
3937         case AMDGPU_CP_IRQ_GFX_EOP:
3938                 gfx_v9_0_set_gfx_eop_interrupt_state(adev, state);
3939                 break;
3940         case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP:
3941                 gfx_v9_0_set_compute_eop_interrupt_state(adev, 1, 0, state);
3942                 break;
3943         case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE1_EOP:
3944                 gfx_v9_0_set_compute_eop_interrupt_state(adev, 1, 1, state);
3945                 break;
3946         case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE2_EOP:
3947                 gfx_v9_0_set_compute_eop_interrupt_state(adev, 1, 2, state);
3948                 break;
3949         case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE3_EOP:
3950                 gfx_v9_0_set_compute_eop_interrupt_state(adev, 1, 3, state);
3951                 break;
3952         case AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE0_EOP:
3953                 gfx_v9_0_set_compute_eop_interrupt_state(adev, 2, 0, state);
3954                 break;
3955         case AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE1_EOP:
3956                 gfx_v9_0_set_compute_eop_interrupt_state(adev, 2, 1, state);
3957                 break;
3958         case AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE2_EOP:
3959                 gfx_v9_0_set_compute_eop_interrupt_state(adev, 2, 2, state);
3960                 break;
3961         case AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE3_EOP:
3962                 gfx_v9_0_set_compute_eop_interrupt_state(adev, 2, 3, state);
3963                 break;
3964         default:
3965                 break;
3966         }
3967         return 0;
3968 }
3969
3970 static int gfx_v9_0_eop_irq(struct amdgpu_device *adev,
3971                             struct amdgpu_irq_src *source,
3972                             struct amdgpu_iv_entry *entry)
3973 {
3974         int i;
3975         u8 me_id, pipe_id, queue_id;
3976         struct amdgpu_ring *ring;
3977
3978         DRM_DEBUG("IH: CP EOP\n");
3979         me_id = (entry->ring_id & 0x0c) >> 2;
3980         pipe_id = (entry->ring_id & 0x03) >> 0;
3981         queue_id = (entry->ring_id & 0x70) >> 4;
3982
3983         switch (me_id) {
3984         case 0:
3985                 amdgpu_fence_process(&adev->gfx.gfx_ring[0]);
3986                 break;
3987         case 1:
3988         case 2:
3989                 for (i = 0; i < adev->gfx.num_compute_rings; i++) {
3990                         ring = &adev->gfx.compute_ring[i];
3991                         /* Per-queue interrupt is supported for MEC starting from VI.
3992                           * The interrupt can only be enabled/disabled per pipe instead of per queue.
3993                           */
3994                         if ((ring->me == me_id) && (ring->pipe == pipe_id) && (ring->queue == queue_id))
3995                                 amdgpu_fence_process(ring);
3996                 }
3997                 break;
3998         }
3999         return 0;
4000 }
4001
4002 static int gfx_v9_0_priv_reg_irq(struct amdgpu_device *adev,
4003                                  struct amdgpu_irq_src *source,
4004                                  struct amdgpu_iv_entry *entry)
4005 {
4006         DRM_ERROR("Illegal register access in command stream\n");
4007         schedule_work(&adev->reset_work);
4008         return 0;
4009 }
4010
4011 static int gfx_v9_0_priv_inst_irq(struct amdgpu_device *adev,
4012                                   struct amdgpu_irq_src *source,
4013                                   struct amdgpu_iv_entry *entry)
4014 {
4015         DRM_ERROR("Illegal instruction in command stream\n");
4016         schedule_work(&adev->reset_work);
4017         return 0;
4018 }
4019
4020 static int gfx_v9_0_kiq_set_interrupt_state(struct amdgpu_device *adev,
4021                                             struct amdgpu_irq_src *src,
4022                                             unsigned int type,
4023                                             enum amdgpu_interrupt_state state)
4024 {
4025         uint32_t tmp, target;
4026         struct amdgpu_ring *ring = &(adev->gfx.kiq.ring);
4027
4028         if (ring->me == 1)
4029                 target = SOC15_REG_OFFSET(GC, 0, mmCP_ME1_PIPE0_INT_CNTL);
4030         else
4031                 target = SOC15_REG_OFFSET(GC, 0, mmCP_ME2_PIPE0_INT_CNTL);
4032         target += ring->pipe;
4033
4034         switch (type) {
4035         case AMDGPU_CP_KIQ_IRQ_DRIVER0:
4036                 if (state == AMDGPU_IRQ_STATE_DISABLE) {
4037                         tmp = RREG32_SOC15(GC, 0, mmCPC_INT_CNTL);
4038                         tmp = REG_SET_FIELD(tmp, CPC_INT_CNTL,
4039                                                  GENERIC2_INT_ENABLE, 0);
4040                         WREG32_SOC15(GC, 0, mmCPC_INT_CNTL, tmp);
4041
4042                         tmp = RREG32(target);
4043                         tmp = REG_SET_FIELD(tmp, CP_ME2_PIPE0_INT_CNTL,
4044                                                  GENERIC2_INT_ENABLE, 0);
4045                         WREG32(target, tmp);
4046                 } else {
4047                         tmp = RREG32_SOC15(GC, 0, mmCPC_INT_CNTL);
4048                         tmp = REG_SET_FIELD(tmp, CPC_INT_CNTL,
4049                                                  GENERIC2_INT_ENABLE, 1);
4050                         WREG32_SOC15(GC, 0, mmCPC_INT_CNTL, tmp);
4051
4052                         tmp = RREG32(target);
4053                         tmp = REG_SET_FIELD(tmp, CP_ME2_PIPE0_INT_CNTL,
4054                                                  GENERIC2_INT_ENABLE, 1);
4055                         WREG32(target, tmp);
4056                 }
4057                 break;
4058         default:
4059                 BUG(); /* kiq only support GENERIC2_INT now */
4060                 break;
4061         }
4062         return 0;
4063 }
4064
4065 static int gfx_v9_0_kiq_irq(struct amdgpu_device *adev,
4066                             struct amdgpu_irq_src *source,
4067                             struct amdgpu_iv_entry *entry)
4068 {
4069         u8 me_id, pipe_id, queue_id;
4070         struct amdgpu_ring *ring = &(adev->gfx.kiq.ring);
4071
4072         me_id = (entry->ring_id & 0x0c) >> 2;
4073         pipe_id = (entry->ring_id & 0x03) >> 0;
4074         queue_id = (entry->ring_id & 0x70) >> 4;
4075         DRM_DEBUG("IH: CPC GENERIC2_INT, me:%d, pipe:%d, queue:%d\n",
4076                    me_id, pipe_id, queue_id);
4077
4078         amdgpu_fence_process(ring);
4079         return 0;
4080 }
4081
4082 static const struct amd_ip_funcs gfx_v9_0_ip_funcs = {
4083         .name = "gfx_v9_0",
4084         .early_init = gfx_v9_0_early_init,
4085         .late_init = gfx_v9_0_late_init,
4086         .sw_init = gfx_v9_0_sw_init,
4087         .sw_fini = gfx_v9_0_sw_fini,
4088         .hw_init = gfx_v9_0_hw_init,
4089         .hw_fini = gfx_v9_0_hw_fini,
4090         .suspend = gfx_v9_0_suspend,
4091         .resume = gfx_v9_0_resume,
4092         .is_idle = gfx_v9_0_is_idle,
4093         .wait_for_idle = gfx_v9_0_wait_for_idle,
4094         .soft_reset = gfx_v9_0_soft_reset,
4095         .set_clockgating_state = gfx_v9_0_set_clockgating_state,
4096         .set_powergating_state = gfx_v9_0_set_powergating_state,
4097         .get_clockgating_state = gfx_v9_0_get_clockgating_state,
4098 };
4099
4100 static const struct amdgpu_ring_funcs gfx_v9_0_ring_funcs_gfx = {
4101         .type = AMDGPU_RING_TYPE_GFX,
4102         .align_mask = 0xff,
4103         .nop = PACKET3(PACKET3_NOP, 0x3FFF),
4104         .support_64bit_ptrs = true,
4105         .vmhub = AMDGPU_GFXHUB,
4106         .get_rptr = gfx_v9_0_ring_get_rptr_gfx,
4107         .get_wptr = gfx_v9_0_ring_get_wptr_gfx,
4108         .set_wptr = gfx_v9_0_ring_set_wptr_gfx,
4109         .emit_frame_size = /* totally 242 maximum if 16 IBs */
4110                 5 +  /* COND_EXEC */
4111                 7 +  /* PIPELINE_SYNC */
4112                 24 + /* VM_FLUSH */
4113                 8 +  /* FENCE for VM_FLUSH */
4114                 20 + /* GDS switch */
4115                 4 + /* double SWITCH_BUFFER,
4116                        the first COND_EXEC jump to the place just
4117                            prior to this double SWITCH_BUFFER  */
4118                 5 + /* COND_EXEC */
4119                 7 +      /*     HDP_flush */
4120                 4 +      /*     VGT_flush */
4121                 14 + /* CE_META */
4122                 31 + /* DE_META */
4123                 3 + /* CNTX_CTRL */
4124                 5 + /* HDP_INVL */
4125                 8 + 8 + /* FENCE x2 */
4126                 2, /* SWITCH_BUFFER */
4127         .emit_ib_size = 4, /* gfx_v9_0_ring_emit_ib_gfx */
4128         .emit_ib = gfx_v9_0_ring_emit_ib_gfx,
4129         .emit_fence = gfx_v9_0_ring_emit_fence,
4130         .emit_pipeline_sync = gfx_v9_0_ring_emit_pipeline_sync,
4131         .emit_vm_flush = gfx_v9_0_ring_emit_vm_flush,
4132         .emit_gds_switch = gfx_v9_0_ring_emit_gds_switch,
4133         .emit_hdp_flush = gfx_v9_0_ring_emit_hdp_flush,
4134         .emit_hdp_invalidate = gfx_v9_0_ring_emit_hdp_invalidate,
4135         .test_ring = gfx_v9_0_ring_test_ring,
4136         .test_ib = gfx_v9_0_ring_test_ib,
4137         .insert_nop = amdgpu_ring_insert_nop,
4138         .pad_ib = amdgpu_ring_generic_pad_ib,
4139         .emit_switch_buffer = gfx_v9_ring_emit_sb,
4140         .emit_cntxcntl = gfx_v9_ring_emit_cntxcntl,
4141         .init_cond_exec = gfx_v9_0_ring_emit_init_cond_exec,
4142         .patch_cond_exec = gfx_v9_0_ring_emit_patch_cond_exec,
4143         .emit_tmz = gfx_v9_0_ring_emit_tmz,
4144 };
4145
4146 static const struct amdgpu_ring_funcs gfx_v9_0_ring_funcs_compute = {
4147         .type = AMDGPU_RING_TYPE_COMPUTE,
4148         .align_mask = 0xff,
4149         .nop = PACKET3(PACKET3_NOP, 0x3FFF),
4150         .support_64bit_ptrs = true,
4151         .vmhub = AMDGPU_GFXHUB,
4152         .get_rptr = gfx_v9_0_ring_get_rptr_compute,
4153         .get_wptr = gfx_v9_0_ring_get_wptr_compute,
4154         .set_wptr = gfx_v9_0_ring_set_wptr_compute,
4155         .emit_frame_size =
4156                 20 + /* gfx_v9_0_ring_emit_gds_switch */
4157                 7 + /* gfx_v9_0_ring_emit_hdp_flush */
4158                 5 + /* gfx_v9_0_ring_emit_hdp_invalidate */
4159                 7 + /* gfx_v9_0_ring_emit_pipeline_sync */
4160                 24 + /* gfx_v9_0_ring_emit_vm_flush */
4161                 8 + 8 + 8, /* gfx_v9_0_ring_emit_fence x3 for user fence, vm fence */
4162         .emit_ib_size = 4, /* gfx_v9_0_ring_emit_ib_compute */
4163         .emit_ib = gfx_v9_0_ring_emit_ib_compute,
4164         .emit_fence = gfx_v9_0_ring_emit_fence,
4165         .emit_pipeline_sync = gfx_v9_0_ring_emit_pipeline_sync,
4166         .emit_vm_flush = gfx_v9_0_ring_emit_vm_flush,
4167         .emit_gds_switch = gfx_v9_0_ring_emit_gds_switch,
4168         .emit_hdp_flush = gfx_v9_0_ring_emit_hdp_flush,
4169         .emit_hdp_invalidate = gfx_v9_0_ring_emit_hdp_invalidate,
4170         .test_ring = gfx_v9_0_ring_test_ring,
4171         .test_ib = gfx_v9_0_ring_test_ib,
4172         .insert_nop = amdgpu_ring_insert_nop,
4173         .pad_ib = amdgpu_ring_generic_pad_ib,
4174 };
4175
4176 static const struct amdgpu_ring_funcs gfx_v9_0_ring_funcs_kiq = {
4177         .type = AMDGPU_RING_TYPE_KIQ,
4178         .align_mask = 0xff,
4179         .nop = PACKET3(PACKET3_NOP, 0x3FFF),
4180         .support_64bit_ptrs = true,
4181         .vmhub = AMDGPU_GFXHUB,
4182         .get_rptr = gfx_v9_0_ring_get_rptr_compute,
4183         .get_wptr = gfx_v9_0_ring_get_wptr_compute,
4184         .set_wptr = gfx_v9_0_ring_set_wptr_compute,
4185         .emit_frame_size =
4186                 20 + /* gfx_v9_0_ring_emit_gds_switch */
4187                 7 + /* gfx_v9_0_ring_emit_hdp_flush */
4188                 5 + /* gfx_v9_0_ring_emit_hdp_invalidate */
4189                 7 + /* gfx_v9_0_ring_emit_pipeline_sync */
4190                 24 + /* gfx_v9_0_ring_emit_vm_flush */
4191                 8 + 8 + 8, /* gfx_v9_0_ring_emit_fence_kiq x3 for user fence, vm fence */
4192         .emit_ib_size = 4, /* gfx_v9_0_ring_emit_ib_compute */
4193         .emit_ib = gfx_v9_0_ring_emit_ib_compute,
4194         .emit_fence = gfx_v9_0_ring_emit_fence_kiq,
4195         .test_ring = gfx_v9_0_ring_test_ring,
4196         .test_ib = gfx_v9_0_ring_test_ib,
4197         .insert_nop = amdgpu_ring_insert_nop,
4198         .pad_ib = amdgpu_ring_generic_pad_ib,
4199         .emit_rreg = gfx_v9_0_ring_emit_rreg,
4200         .emit_wreg = gfx_v9_0_ring_emit_wreg,
4201 };
4202
4203 static void gfx_v9_0_set_ring_funcs(struct amdgpu_device *adev)
4204 {
4205         int i;
4206
4207         adev->gfx.kiq.ring.funcs = &gfx_v9_0_ring_funcs_kiq;
4208
4209         for (i = 0; i < adev->gfx.num_gfx_rings; i++)
4210                 adev->gfx.gfx_ring[i].funcs = &gfx_v9_0_ring_funcs_gfx;
4211
4212         for (i = 0; i < adev->gfx.num_compute_rings; i++)
4213                 adev->gfx.compute_ring[i].funcs = &gfx_v9_0_ring_funcs_compute;
4214 }
4215
4216 static const struct amdgpu_irq_src_funcs gfx_v9_0_kiq_irq_funcs = {
4217         .set = gfx_v9_0_kiq_set_interrupt_state,
4218         .process = gfx_v9_0_kiq_irq,
4219 };
4220
4221 static const struct amdgpu_irq_src_funcs gfx_v9_0_eop_irq_funcs = {
4222         .set = gfx_v9_0_set_eop_interrupt_state,
4223         .process = gfx_v9_0_eop_irq,
4224 };
4225
4226 static const struct amdgpu_irq_src_funcs gfx_v9_0_priv_reg_irq_funcs = {
4227         .set = gfx_v9_0_set_priv_reg_fault_state,
4228         .process = gfx_v9_0_priv_reg_irq,
4229 };
4230
4231 static const struct amdgpu_irq_src_funcs gfx_v9_0_priv_inst_irq_funcs = {
4232         .set = gfx_v9_0_set_priv_inst_fault_state,
4233         .process = gfx_v9_0_priv_inst_irq,
4234 };
4235
4236 static void gfx_v9_0_set_irq_funcs(struct amdgpu_device *adev)
4237 {
4238         adev->gfx.eop_irq.num_types = AMDGPU_CP_IRQ_LAST;
4239         adev->gfx.eop_irq.funcs = &gfx_v9_0_eop_irq_funcs;
4240
4241         adev->gfx.priv_reg_irq.num_types = 1;
4242         adev->gfx.priv_reg_irq.funcs = &gfx_v9_0_priv_reg_irq_funcs;
4243
4244         adev->gfx.priv_inst_irq.num_types = 1;
4245         adev->gfx.priv_inst_irq.funcs = &gfx_v9_0_priv_inst_irq_funcs;
4246
4247         adev->gfx.kiq.irq.num_types = AMDGPU_CP_KIQ_IRQ_LAST;
4248         adev->gfx.kiq.irq.funcs = &gfx_v9_0_kiq_irq_funcs;
4249 }
4250
4251 static void gfx_v9_0_set_rlc_funcs(struct amdgpu_device *adev)
4252 {
4253         switch (adev->asic_type) {
4254         case CHIP_VEGA10:
4255         case CHIP_RAVEN:
4256                 adev->gfx.rlc.funcs = &gfx_v9_0_rlc_funcs;
4257                 break;
4258         default:
4259                 break;
4260         }
4261 }
4262
4263 static void gfx_v9_0_set_gds_init(struct amdgpu_device *adev)
4264 {
4265         /* init asci gds info */
4266         adev->gds.mem.total_size = RREG32_SOC15(GC, 0, mmGDS_VMID0_SIZE);
4267         adev->gds.gws.total_size = 64;
4268         adev->gds.oa.total_size = 16;
4269
4270         if (adev->gds.mem.total_size == 64 * 1024) {
4271                 adev->gds.mem.gfx_partition_size = 4096;
4272                 adev->gds.mem.cs_partition_size = 4096;
4273
4274                 adev->gds.gws.gfx_partition_size = 4;
4275                 adev->gds.gws.cs_partition_size = 4;
4276
4277                 adev->gds.oa.gfx_partition_size = 4;
4278                 adev->gds.oa.cs_partition_size = 1;
4279         } else {
4280                 adev->gds.mem.gfx_partition_size = 1024;
4281                 adev->gds.mem.cs_partition_size = 1024;
4282
4283                 adev->gds.gws.gfx_partition_size = 16;
4284                 adev->gds.gws.cs_partition_size = 16;
4285
4286                 adev->gds.oa.gfx_partition_size = 4;
4287                 adev->gds.oa.cs_partition_size = 4;
4288         }
4289 }
4290
4291 static void gfx_v9_0_set_user_cu_inactive_bitmap(struct amdgpu_device *adev,
4292                                                  u32 bitmap)
4293 {
4294         u32 data;
4295
4296         if (!bitmap)
4297                 return;
4298
4299         data = bitmap << GC_USER_SHADER_ARRAY_CONFIG__INACTIVE_CUS__SHIFT;
4300         data &= GC_USER_SHADER_ARRAY_CONFIG__INACTIVE_CUS_MASK;
4301
4302         WREG32_SOC15(GC, 0, mmGC_USER_SHADER_ARRAY_CONFIG, data);
4303 }
4304
4305 static u32 gfx_v9_0_get_cu_active_bitmap(struct amdgpu_device *adev)
4306 {
4307         u32 data, mask;
4308
4309         data = RREG32_SOC15(GC, 0, mmCC_GC_SHADER_ARRAY_CONFIG);
4310         data |= RREG32_SOC15(GC, 0, mmGC_USER_SHADER_ARRAY_CONFIG);
4311
4312         data &= CC_GC_SHADER_ARRAY_CONFIG__INACTIVE_CUS_MASK;
4313         data >>= CC_GC_SHADER_ARRAY_CONFIG__INACTIVE_CUS__SHIFT;
4314
4315         mask = amdgpu_gfx_create_bitmask(adev->gfx.config.max_cu_per_sh);
4316
4317         return (~data) & mask;
4318 }
4319
4320 static int gfx_v9_0_get_cu_info(struct amdgpu_device *adev,
4321                                  struct amdgpu_cu_info *cu_info)
4322 {
4323         int i, j, k, counter, active_cu_number = 0;
4324         u32 mask, bitmap, ao_bitmap, ao_cu_mask = 0;
4325         unsigned disable_masks[4 * 2];
4326
4327         if (!adev || !cu_info)
4328                 return -EINVAL;
4329
4330         amdgpu_gfx_parse_disable_cu(disable_masks, 4, 2);
4331
4332         mutex_lock(&adev->grbm_idx_mutex);
4333         for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
4334                 for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
4335                         mask = 1;
4336                         ao_bitmap = 0;
4337                         counter = 0;
4338                         gfx_v9_0_select_se_sh(adev, i, j, 0xffffffff);
4339                         if (i < 4 && j < 2)
4340                                 gfx_v9_0_set_user_cu_inactive_bitmap(
4341                                         adev, disable_masks[i * 2 + j]);
4342                         bitmap = gfx_v9_0_get_cu_active_bitmap(adev);
4343                         cu_info->bitmap[i][j] = bitmap;
4344
4345                         for (k = 0; k < adev->gfx.config.max_cu_per_sh; k ++) {
4346                                 if (bitmap & mask) {
4347                                         if (counter < adev->gfx.config.max_cu_per_sh)
4348                                                 ao_bitmap |= mask;
4349                                         counter ++;
4350                                 }
4351                                 mask <<= 1;
4352                         }
4353                         active_cu_number += counter;
4354                         if (i < 2 && j < 2)
4355                                 ao_cu_mask |= (ao_bitmap << (i * 16 + j * 8));
4356                         cu_info->ao_cu_bitmap[i][j] = ao_bitmap;
4357                 }
4358         }
4359         gfx_v9_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
4360         mutex_unlock(&adev->grbm_idx_mutex);
4361
4362         cu_info->number = active_cu_number;
4363         cu_info->ao_cu_mask = ao_cu_mask;
4364
4365         return 0;
4366 }
4367
4368 const struct amdgpu_ip_block_version gfx_v9_0_ip_block =
4369 {
4370         .type = AMD_IP_BLOCK_TYPE_GFX,
4371         .major = 9,
4372         .minor = 0,
4373         .rev = 0,
4374         .funcs = &gfx_v9_0_ip_funcs,
4375 };