2 * Copyright 2014 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
23 #include <linux/firmware.h>
26 #include "amdgpu_ih.h"
27 #include "amdgpu_gfx.h"
31 #include "amdgpu_ucode.h"
32 #include "clearstate_ci.h"
34 #include "uvd/uvd_4_2_d.h"
36 #include "dce/dce_8_0_d.h"
37 #include "dce/dce_8_0_sh_mask.h"
39 #include "bif/bif_4_1_d.h"
40 #include "bif/bif_4_1_sh_mask.h"
42 #include "gca/gfx_7_0_d.h"
43 #include "gca/gfx_7_2_enum.h"
44 #include "gca/gfx_7_2_sh_mask.h"
46 #include "gmc/gmc_7_0_d.h"
47 #include "gmc/gmc_7_0_sh_mask.h"
49 #include "oss/oss_2_0_d.h"
50 #include "oss/oss_2_0_sh_mask.h"
52 #define GFX7_NUM_GFX_RINGS 1
53 #define GFX7_NUM_COMPUTE_RINGS 8
55 static void gfx_v7_0_set_ring_funcs(struct amdgpu_device *adev);
56 static void gfx_v7_0_set_irq_funcs(struct amdgpu_device *adev);
57 static void gfx_v7_0_set_gds_init(struct amdgpu_device *adev);
58 int gfx_v7_0_get_cu_info(struct amdgpu_device *, struct amdgpu_cu_info *);
62 static const struct amdgpu_gds_reg_offset amdgpu_gds_reg_offset[] =
64 {mmGDS_VMID0_BASE, mmGDS_VMID0_SIZE, mmGDS_GWS_VMID0, mmGDS_OA_VMID0},
65 {mmGDS_VMID1_BASE, mmGDS_VMID1_SIZE, mmGDS_GWS_VMID1, mmGDS_OA_VMID1},
66 {mmGDS_VMID2_BASE, mmGDS_VMID2_SIZE, mmGDS_GWS_VMID2, mmGDS_OA_VMID2},
67 {mmGDS_VMID3_BASE, mmGDS_VMID3_SIZE, mmGDS_GWS_VMID3, mmGDS_OA_VMID3},
68 {mmGDS_VMID4_BASE, mmGDS_VMID4_SIZE, mmGDS_GWS_VMID4, mmGDS_OA_VMID4},
69 {mmGDS_VMID5_BASE, mmGDS_VMID5_SIZE, mmGDS_GWS_VMID5, mmGDS_OA_VMID5},
70 {mmGDS_VMID6_BASE, mmGDS_VMID6_SIZE, mmGDS_GWS_VMID6, mmGDS_OA_VMID6},
71 {mmGDS_VMID7_BASE, mmGDS_VMID7_SIZE, mmGDS_GWS_VMID7, mmGDS_OA_VMID7},
72 {mmGDS_VMID8_BASE, mmGDS_VMID8_SIZE, mmGDS_GWS_VMID8, mmGDS_OA_VMID8},
73 {mmGDS_VMID9_BASE, mmGDS_VMID9_SIZE, mmGDS_GWS_VMID9, mmGDS_OA_VMID9},
74 {mmGDS_VMID10_BASE, mmGDS_VMID10_SIZE, mmGDS_GWS_VMID10, mmGDS_OA_VMID10},
75 {mmGDS_VMID11_BASE, mmGDS_VMID11_SIZE, mmGDS_GWS_VMID11, mmGDS_OA_VMID11},
76 {mmGDS_VMID12_BASE, mmGDS_VMID12_SIZE, mmGDS_GWS_VMID12, mmGDS_OA_VMID12},
77 {mmGDS_VMID13_BASE, mmGDS_VMID13_SIZE, mmGDS_GWS_VMID13, mmGDS_OA_VMID13},
78 {mmGDS_VMID14_BASE, mmGDS_VMID14_SIZE, mmGDS_GWS_VMID14, mmGDS_OA_VMID14},
79 {mmGDS_VMID15_BASE, mmGDS_VMID15_SIZE, mmGDS_GWS_VMID15, mmGDS_OA_VMID15}
82 static const u32 spectre_rlc_save_restore_register_list[] =
84 (0x0e00 << 16) | (0xc12c >> 2),
86 (0x0e00 << 16) | (0xc140 >> 2),
88 (0x0e00 << 16) | (0xc150 >> 2),
90 (0x0e00 << 16) | (0xc15c >> 2),
92 (0x0e00 << 16) | (0xc168 >> 2),
94 (0x0e00 << 16) | (0xc170 >> 2),
96 (0x0e00 << 16) | (0xc178 >> 2),
98 (0x0e00 << 16) | (0xc204 >> 2),
100 (0x0e00 << 16) | (0xc2b4 >> 2),
102 (0x0e00 << 16) | (0xc2b8 >> 2),
104 (0x0e00 << 16) | (0xc2bc >> 2),
106 (0x0e00 << 16) | (0xc2c0 >> 2),
108 (0x0e00 << 16) | (0x8228 >> 2),
110 (0x0e00 << 16) | (0x829c >> 2),
112 (0x0e00 << 16) | (0x869c >> 2),
114 (0x0600 << 16) | (0x98f4 >> 2),
116 (0x0e00 << 16) | (0x98f8 >> 2),
118 (0x0e00 << 16) | (0x9900 >> 2),
120 (0x0e00 << 16) | (0xc260 >> 2),
122 (0x0e00 << 16) | (0x90e8 >> 2),
124 (0x0e00 << 16) | (0x3c000 >> 2),
126 (0x0e00 << 16) | (0x3c00c >> 2),
128 (0x0e00 << 16) | (0x8c1c >> 2),
130 (0x0e00 << 16) | (0x9700 >> 2),
132 (0x0e00 << 16) | (0xcd20 >> 2),
134 (0x4e00 << 16) | (0xcd20 >> 2),
136 (0x5e00 << 16) | (0xcd20 >> 2),
138 (0x6e00 << 16) | (0xcd20 >> 2),
140 (0x7e00 << 16) | (0xcd20 >> 2),
142 (0x8e00 << 16) | (0xcd20 >> 2),
144 (0x9e00 << 16) | (0xcd20 >> 2),
146 (0xae00 << 16) | (0xcd20 >> 2),
148 (0xbe00 << 16) | (0xcd20 >> 2),
150 (0x0e00 << 16) | (0x89bc >> 2),
152 (0x0e00 << 16) | (0x8900 >> 2),
155 (0x0e00 << 16) | (0xc130 >> 2),
157 (0x0e00 << 16) | (0xc134 >> 2),
159 (0x0e00 << 16) | (0xc1fc >> 2),
161 (0x0e00 << 16) | (0xc208 >> 2),
163 (0x0e00 << 16) | (0xc264 >> 2),
165 (0x0e00 << 16) | (0xc268 >> 2),
167 (0x0e00 << 16) | (0xc26c >> 2),
169 (0x0e00 << 16) | (0xc270 >> 2),
171 (0x0e00 << 16) | (0xc274 >> 2),
173 (0x0e00 << 16) | (0xc278 >> 2),
175 (0x0e00 << 16) | (0xc27c >> 2),
177 (0x0e00 << 16) | (0xc280 >> 2),
179 (0x0e00 << 16) | (0xc284 >> 2),
181 (0x0e00 << 16) | (0xc288 >> 2),
183 (0x0e00 << 16) | (0xc28c >> 2),
185 (0x0e00 << 16) | (0xc290 >> 2),
187 (0x0e00 << 16) | (0xc294 >> 2),
189 (0x0e00 << 16) | (0xc298 >> 2),
191 (0x0e00 << 16) | (0xc29c >> 2),
193 (0x0e00 << 16) | (0xc2a0 >> 2),
195 (0x0e00 << 16) | (0xc2a4 >> 2),
197 (0x0e00 << 16) | (0xc2a8 >> 2),
199 (0x0e00 << 16) | (0xc2ac >> 2),
201 (0x0e00 << 16) | (0xc2b0 >> 2),
203 (0x0e00 << 16) | (0x301d0 >> 2),
205 (0x0e00 << 16) | (0x30238 >> 2),
207 (0x0e00 << 16) | (0x30250 >> 2),
209 (0x0e00 << 16) | (0x30254 >> 2),
211 (0x0e00 << 16) | (0x30258 >> 2),
213 (0x0e00 << 16) | (0x3025c >> 2),
215 (0x4e00 << 16) | (0xc900 >> 2),
217 (0x5e00 << 16) | (0xc900 >> 2),
219 (0x6e00 << 16) | (0xc900 >> 2),
221 (0x7e00 << 16) | (0xc900 >> 2),
223 (0x8e00 << 16) | (0xc900 >> 2),
225 (0x9e00 << 16) | (0xc900 >> 2),
227 (0xae00 << 16) | (0xc900 >> 2),
229 (0xbe00 << 16) | (0xc900 >> 2),
231 (0x4e00 << 16) | (0xc904 >> 2),
233 (0x5e00 << 16) | (0xc904 >> 2),
235 (0x6e00 << 16) | (0xc904 >> 2),
237 (0x7e00 << 16) | (0xc904 >> 2),
239 (0x8e00 << 16) | (0xc904 >> 2),
241 (0x9e00 << 16) | (0xc904 >> 2),
243 (0xae00 << 16) | (0xc904 >> 2),
245 (0xbe00 << 16) | (0xc904 >> 2),
247 (0x4e00 << 16) | (0xc908 >> 2),
249 (0x5e00 << 16) | (0xc908 >> 2),
251 (0x6e00 << 16) | (0xc908 >> 2),
253 (0x7e00 << 16) | (0xc908 >> 2),
255 (0x8e00 << 16) | (0xc908 >> 2),
257 (0x9e00 << 16) | (0xc908 >> 2),
259 (0xae00 << 16) | (0xc908 >> 2),
261 (0xbe00 << 16) | (0xc908 >> 2),
263 (0x4e00 << 16) | (0xc90c >> 2),
265 (0x5e00 << 16) | (0xc90c >> 2),
267 (0x6e00 << 16) | (0xc90c >> 2),
269 (0x7e00 << 16) | (0xc90c >> 2),
271 (0x8e00 << 16) | (0xc90c >> 2),
273 (0x9e00 << 16) | (0xc90c >> 2),
275 (0xae00 << 16) | (0xc90c >> 2),
277 (0xbe00 << 16) | (0xc90c >> 2),
279 (0x4e00 << 16) | (0xc910 >> 2),
281 (0x5e00 << 16) | (0xc910 >> 2),
283 (0x6e00 << 16) | (0xc910 >> 2),
285 (0x7e00 << 16) | (0xc910 >> 2),
287 (0x8e00 << 16) | (0xc910 >> 2),
289 (0x9e00 << 16) | (0xc910 >> 2),
291 (0xae00 << 16) | (0xc910 >> 2),
293 (0xbe00 << 16) | (0xc910 >> 2),
295 (0x0e00 << 16) | (0xc99c >> 2),
297 (0x0e00 << 16) | (0x9834 >> 2),
299 (0x0000 << 16) | (0x30f00 >> 2),
301 (0x0001 << 16) | (0x30f00 >> 2),
303 (0x0000 << 16) | (0x30f04 >> 2),
305 (0x0001 << 16) | (0x30f04 >> 2),
307 (0x0000 << 16) | (0x30f08 >> 2),
309 (0x0001 << 16) | (0x30f08 >> 2),
311 (0x0000 << 16) | (0x30f0c >> 2),
313 (0x0001 << 16) | (0x30f0c >> 2),
315 (0x0600 << 16) | (0x9b7c >> 2),
317 (0x0e00 << 16) | (0x8a14 >> 2),
319 (0x0e00 << 16) | (0x8a18 >> 2),
321 (0x0600 << 16) | (0x30a00 >> 2),
323 (0x0e00 << 16) | (0x8bf0 >> 2),
325 (0x0e00 << 16) | (0x8bcc >> 2),
327 (0x0e00 << 16) | (0x8b24 >> 2),
329 (0x0e00 << 16) | (0x30a04 >> 2),
331 (0x0600 << 16) | (0x30a10 >> 2),
333 (0x0600 << 16) | (0x30a14 >> 2),
335 (0x0600 << 16) | (0x30a18 >> 2),
337 (0x0600 << 16) | (0x30a2c >> 2),
339 (0x0e00 << 16) | (0xc700 >> 2),
341 (0x0e00 << 16) | (0xc704 >> 2),
343 (0x0e00 << 16) | (0xc708 >> 2),
345 (0x0e00 << 16) | (0xc768 >> 2),
347 (0x0400 << 16) | (0xc770 >> 2),
349 (0x0400 << 16) | (0xc774 >> 2),
351 (0x0400 << 16) | (0xc778 >> 2),
353 (0x0400 << 16) | (0xc77c >> 2),
355 (0x0400 << 16) | (0xc780 >> 2),
357 (0x0400 << 16) | (0xc784 >> 2),
359 (0x0400 << 16) | (0xc788 >> 2),
361 (0x0400 << 16) | (0xc78c >> 2),
363 (0x0400 << 16) | (0xc798 >> 2),
365 (0x0400 << 16) | (0xc79c >> 2),
367 (0x0400 << 16) | (0xc7a0 >> 2),
369 (0x0400 << 16) | (0xc7a4 >> 2),
371 (0x0400 << 16) | (0xc7a8 >> 2),
373 (0x0400 << 16) | (0xc7ac >> 2),
375 (0x0400 << 16) | (0xc7b0 >> 2),
377 (0x0400 << 16) | (0xc7b4 >> 2),
379 (0x0e00 << 16) | (0x9100 >> 2),
381 (0x0e00 << 16) | (0x3c010 >> 2),
383 (0x0e00 << 16) | (0x92a8 >> 2),
385 (0x0e00 << 16) | (0x92ac >> 2),
387 (0x0e00 << 16) | (0x92b4 >> 2),
389 (0x0e00 << 16) | (0x92b8 >> 2),
391 (0x0e00 << 16) | (0x92bc >> 2),
393 (0x0e00 << 16) | (0x92c0 >> 2),
395 (0x0e00 << 16) | (0x92c4 >> 2),
397 (0x0e00 << 16) | (0x92c8 >> 2),
399 (0x0e00 << 16) | (0x92cc >> 2),
401 (0x0e00 << 16) | (0x92d0 >> 2),
403 (0x0e00 << 16) | (0x8c00 >> 2),
405 (0x0e00 << 16) | (0x8c04 >> 2),
407 (0x0e00 << 16) | (0x8c20 >> 2),
409 (0x0e00 << 16) | (0x8c38 >> 2),
411 (0x0e00 << 16) | (0x8c3c >> 2),
413 (0x0e00 << 16) | (0xae00 >> 2),
415 (0x0e00 << 16) | (0x9604 >> 2),
417 (0x0e00 << 16) | (0xac08 >> 2),
419 (0x0e00 << 16) | (0xac0c >> 2),
421 (0x0e00 << 16) | (0xac10 >> 2),
423 (0x0e00 << 16) | (0xac14 >> 2),
425 (0x0e00 << 16) | (0xac58 >> 2),
427 (0x0e00 << 16) | (0xac68 >> 2),
429 (0x0e00 << 16) | (0xac6c >> 2),
431 (0x0e00 << 16) | (0xac70 >> 2),
433 (0x0e00 << 16) | (0xac74 >> 2),
435 (0x0e00 << 16) | (0xac78 >> 2),
437 (0x0e00 << 16) | (0xac7c >> 2),
439 (0x0e00 << 16) | (0xac80 >> 2),
441 (0x0e00 << 16) | (0xac84 >> 2),
443 (0x0e00 << 16) | (0xac88 >> 2),
445 (0x0e00 << 16) | (0xac8c >> 2),
447 (0x0e00 << 16) | (0x970c >> 2),
449 (0x0e00 << 16) | (0x9714 >> 2),
451 (0x0e00 << 16) | (0x9718 >> 2),
453 (0x0e00 << 16) | (0x971c >> 2),
455 (0x0e00 << 16) | (0x31068 >> 2),
457 (0x4e00 << 16) | (0x31068 >> 2),
459 (0x5e00 << 16) | (0x31068 >> 2),
461 (0x6e00 << 16) | (0x31068 >> 2),
463 (0x7e00 << 16) | (0x31068 >> 2),
465 (0x8e00 << 16) | (0x31068 >> 2),
467 (0x9e00 << 16) | (0x31068 >> 2),
469 (0xae00 << 16) | (0x31068 >> 2),
471 (0xbe00 << 16) | (0x31068 >> 2),
473 (0x0e00 << 16) | (0xcd10 >> 2),
475 (0x0e00 << 16) | (0xcd14 >> 2),
477 (0x0e00 << 16) | (0x88b0 >> 2),
479 (0x0e00 << 16) | (0x88b4 >> 2),
481 (0x0e00 << 16) | (0x88b8 >> 2),
483 (0x0e00 << 16) | (0x88bc >> 2),
485 (0x0400 << 16) | (0x89c0 >> 2),
487 (0x0e00 << 16) | (0x88c4 >> 2),
489 (0x0e00 << 16) | (0x88c8 >> 2),
491 (0x0e00 << 16) | (0x88d0 >> 2),
493 (0x0e00 << 16) | (0x88d4 >> 2),
495 (0x0e00 << 16) | (0x88d8 >> 2),
497 (0x0e00 << 16) | (0x8980 >> 2),
499 (0x0e00 << 16) | (0x30938 >> 2),
501 (0x0e00 << 16) | (0x3093c >> 2),
503 (0x0e00 << 16) | (0x30940 >> 2),
505 (0x0e00 << 16) | (0x89a0 >> 2),
507 (0x0e00 << 16) | (0x30900 >> 2),
509 (0x0e00 << 16) | (0x30904 >> 2),
511 (0x0e00 << 16) | (0x89b4 >> 2),
513 (0x0e00 << 16) | (0x3c210 >> 2),
515 (0x0e00 << 16) | (0x3c214 >> 2),
517 (0x0e00 << 16) | (0x3c218 >> 2),
519 (0x0e00 << 16) | (0x8904 >> 2),
522 (0x0e00 << 16) | (0x8c28 >> 2),
523 (0x0e00 << 16) | (0x8c2c >> 2),
524 (0x0e00 << 16) | (0x8c30 >> 2),
525 (0x0e00 << 16) | (0x8c34 >> 2),
526 (0x0e00 << 16) | (0x9600 >> 2),
529 static const u32 kalindi_rlc_save_restore_register_list[] =
531 (0x0e00 << 16) | (0xc12c >> 2),
533 (0x0e00 << 16) | (0xc140 >> 2),
535 (0x0e00 << 16) | (0xc150 >> 2),
537 (0x0e00 << 16) | (0xc15c >> 2),
539 (0x0e00 << 16) | (0xc168 >> 2),
541 (0x0e00 << 16) | (0xc170 >> 2),
543 (0x0e00 << 16) | (0xc204 >> 2),
545 (0x0e00 << 16) | (0xc2b4 >> 2),
547 (0x0e00 << 16) | (0xc2b8 >> 2),
549 (0x0e00 << 16) | (0xc2bc >> 2),
551 (0x0e00 << 16) | (0xc2c0 >> 2),
553 (0x0e00 << 16) | (0x8228 >> 2),
555 (0x0e00 << 16) | (0x829c >> 2),
557 (0x0e00 << 16) | (0x869c >> 2),
559 (0x0600 << 16) | (0x98f4 >> 2),
561 (0x0e00 << 16) | (0x98f8 >> 2),
563 (0x0e00 << 16) | (0x9900 >> 2),
565 (0x0e00 << 16) | (0xc260 >> 2),
567 (0x0e00 << 16) | (0x90e8 >> 2),
569 (0x0e00 << 16) | (0x3c000 >> 2),
571 (0x0e00 << 16) | (0x3c00c >> 2),
573 (0x0e00 << 16) | (0x8c1c >> 2),
575 (0x0e00 << 16) | (0x9700 >> 2),
577 (0x0e00 << 16) | (0xcd20 >> 2),
579 (0x4e00 << 16) | (0xcd20 >> 2),
581 (0x5e00 << 16) | (0xcd20 >> 2),
583 (0x6e00 << 16) | (0xcd20 >> 2),
585 (0x7e00 << 16) | (0xcd20 >> 2),
587 (0x0e00 << 16) | (0x89bc >> 2),
589 (0x0e00 << 16) | (0x8900 >> 2),
592 (0x0e00 << 16) | (0xc130 >> 2),
594 (0x0e00 << 16) | (0xc134 >> 2),
596 (0x0e00 << 16) | (0xc1fc >> 2),
598 (0x0e00 << 16) | (0xc208 >> 2),
600 (0x0e00 << 16) | (0xc264 >> 2),
602 (0x0e00 << 16) | (0xc268 >> 2),
604 (0x0e00 << 16) | (0xc26c >> 2),
606 (0x0e00 << 16) | (0xc270 >> 2),
608 (0x0e00 << 16) | (0xc274 >> 2),
610 (0x0e00 << 16) | (0xc28c >> 2),
612 (0x0e00 << 16) | (0xc290 >> 2),
614 (0x0e00 << 16) | (0xc294 >> 2),
616 (0x0e00 << 16) | (0xc298 >> 2),
618 (0x0e00 << 16) | (0xc2a0 >> 2),
620 (0x0e00 << 16) | (0xc2a4 >> 2),
622 (0x0e00 << 16) | (0xc2a8 >> 2),
624 (0x0e00 << 16) | (0xc2ac >> 2),
626 (0x0e00 << 16) | (0x301d0 >> 2),
628 (0x0e00 << 16) | (0x30238 >> 2),
630 (0x0e00 << 16) | (0x30250 >> 2),
632 (0x0e00 << 16) | (0x30254 >> 2),
634 (0x0e00 << 16) | (0x30258 >> 2),
636 (0x0e00 << 16) | (0x3025c >> 2),
638 (0x4e00 << 16) | (0xc900 >> 2),
640 (0x5e00 << 16) | (0xc900 >> 2),
642 (0x6e00 << 16) | (0xc900 >> 2),
644 (0x7e00 << 16) | (0xc900 >> 2),
646 (0x4e00 << 16) | (0xc904 >> 2),
648 (0x5e00 << 16) | (0xc904 >> 2),
650 (0x6e00 << 16) | (0xc904 >> 2),
652 (0x7e00 << 16) | (0xc904 >> 2),
654 (0x4e00 << 16) | (0xc908 >> 2),
656 (0x5e00 << 16) | (0xc908 >> 2),
658 (0x6e00 << 16) | (0xc908 >> 2),
660 (0x7e00 << 16) | (0xc908 >> 2),
662 (0x4e00 << 16) | (0xc90c >> 2),
664 (0x5e00 << 16) | (0xc90c >> 2),
666 (0x6e00 << 16) | (0xc90c >> 2),
668 (0x7e00 << 16) | (0xc90c >> 2),
670 (0x4e00 << 16) | (0xc910 >> 2),
672 (0x5e00 << 16) | (0xc910 >> 2),
674 (0x6e00 << 16) | (0xc910 >> 2),
676 (0x7e00 << 16) | (0xc910 >> 2),
678 (0x0e00 << 16) | (0xc99c >> 2),
680 (0x0e00 << 16) | (0x9834 >> 2),
682 (0x0000 << 16) | (0x30f00 >> 2),
684 (0x0000 << 16) | (0x30f04 >> 2),
686 (0x0000 << 16) | (0x30f08 >> 2),
688 (0x0000 << 16) | (0x30f0c >> 2),
690 (0x0600 << 16) | (0x9b7c >> 2),
692 (0x0e00 << 16) | (0x8a14 >> 2),
694 (0x0e00 << 16) | (0x8a18 >> 2),
696 (0x0600 << 16) | (0x30a00 >> 2),
698 (0x0e00 << 16) | (0x8bf0 >> 2),
700 (0x0e00 << 16) | (0x8bcc >> 2),
702 (0x0e00 << 16) | (0x8b24 >> 2),
704 (0x0e00 << 16) | (0x30a04 >> 2),
706 (0x0600 << 16) | (0x30a10 >> 2),
708 (0x0600 << 16) | (0x30a14 >> 2),
710 (0x0600 << 16) | (0x30a18 >> 2),
712 (0x0600 << 16) | (0x30a2c >> 2),
714 (0x0e00 << 16) | (0xc700 >> 2),
716 (0x0e00 << 16) | (0xc704 >> 2),
718 (0x0e00 << 16) | (0xc708 >> 2),
720 (0x0e00 << 16) | (0xc768 >> 2),
722 (0x0400 << 16) | (0xc770 >> 2),
724 (0x0400 << 16) | (0xc774 >> 2),
726 (0x0400 << 16) | (0xc798 >> 2),
728 (0x0400 << 16) | (0xc79c >> 2),
730 (0x0e00 << 16) | (0x9100 >> 2),
732 (0x0e00 << 16) | (0x3c010 >> 2),
734 (0x0e00 << 16) | (0x8c00 >> 2),
736 (0x0e00 << 16) | (0x8c04 >> 2),
738 (0x0e00 << 16) | (0x8c20 >> 2),
740 (0x0e00 << 16) | (0x8c38 >> 2),
742 (0x0e00 << 16) | (0x8c3c >> 2),
744 (0x0e00 << 16) | (0xae00 >> 2),
746 (0x0e00 << 16) | (0x9604 >> 2),
748 (0x0e00 << 16) | (0xac08 >> 2),
750 (0x0e00 << 16) | (0xac0c >> 2),
752 (0x0e00 << 16) | (0xac10 >> 2),
754 (0x0e00 << 16) | (0xac14 >> 2),
756 (0x0e00 << 16) | (0xac58 >> 2),
758 (0x0e00 << 16) | (0xac68 >> 2),
760 (0x0e00 << 16) | (0xac6c >> 2),
762 (0x0e00 << 16) | (0xac70 >> 2),
764 (0x0e00 << 16) | (0xac74 >> 2),
766 (0x0e00 << 16) | (0xac78 >> 2),
768 (0x0e00 << 16) | (0xac7c >> 2),
770 (0x0e00 << 16) | (0xac80 >> 2),
772 (0x0e00 << 16) | (0xac84 >> 2),
774 (0x0e00 << 16) | (0xac88 >> 2),
776 (0x0e00 << 16) | (0xac8c >> 2),
778 (0x0e00 << 16) | (0x970c >> 2),
780 (0x0e00 << 16) | (0x9714 >> 2),
782 (0x0e00 << 16) | (0x9718 >> 2),
784 (0x0e00 << 16) | (0x971c >> 2),
786 (0x0e00 << 16) | (0x31068 >> 2),
788 (0x4e00 << 16) | (0x31068 >> 2),
790 (0x5e00 << 16) | (0x31068 >> 2),
792 (0x6e00 << 16) | (0x31068 >> 2),
794 (0x7e00 << 16) | (0x31068 >> 2),
796 (0x0e00 << 16) | (0xcd10 >> 2),
798 (0x0e00 << 16) | (0xcd14 >> 2),
800 (0x0e00 << 16) | (0x88b0 >> 2),
802 (0x0e00 << 16) | (0x88b4 >> 2),
804 (0x0e00 << 16) | (0x88b8 >> 2),
806 (0x0e00 << 16) | (0x88bc >> 2),
808 (0x0400 << 16) | (0x89c0 >> 2),
810 (0x0e00 << 16) | (0x88c4 >> 2),
812 (0x0e00 << 16) | (0x88c8 >> 2),
814 (0x0e00 << 16) | (0x88d0 >> 2),
816 (0x0e00 << 16) | (0x88d4 >> 2),
818 (0x0e00 << 16) | (0x88d8 >> 2),
820 (0x0e00 << 16) | (0x8980 >> 2),
822 (0x0e00 << 16) | (0x30938 >> 2),
824 (0x0e00 << 16) | (0x3093c >> 2),
826 (0x0e00 << 16) | (0x30940 >> 2),
828 (0x0e00 << 16) | (0x89a0 >> 2),
830 (0x0e00 << 16) | (0x30900 >> 2),
832 (0x0e00 << 16) | (0x30904 >> 2),
834 (0x0e00 << 16) | (0x89b4 >> 2),
836 (0x0e00 << 16) | (0x3e1fc >> 2),
838 (0x0e00 << 16) | (0x3c210 >> 2),
840 (0x0e00 << 16) | (0x3c214 >> 2),
842 (0x0e00 << 16) | (0x3c218 >> 2),
844 (0x0e00 << 16) | (0x8904 >> 2),
847 (0x0e00 << 16) | (0x8c28 >> 2),
848 (0x0e00 << 16) | (0x8c2c >> 2),
849 (0x0e00 << 16) | (0x8c30 >> 2),
850 (0x0e00 << 16) | (0x8c34 >> 2),
851 (0x0e00 << 16) | (0x9600 >> 2),
854 static u32 gfx_v7_0_get_csb_size(struct amdgpu_device *adev);
855 static void gfx_v7_0_get_csb_buffer(struct amdgpu_device *adev, volatile u32 *buffer);
856 static void gfx_v7_0_init_cp_pg_table(struct amdgpu_device *adev);
857 static void gfx_v7_0_init_pg(struct amdgpu_device *adev);
863 * gfx_v7_0_init_microcode - load ucode images from disk
865 * @adev: amdgpu_device pointer
867 * Use the firmware interface to load the ucode images into
868 * the driver (not loaded into hw).
869 * Returns 0 on success, error on failure.
871 static int gfx_v7_0_init_microcode(struct amdgpu_device *adev)
873 const char *chip_name;
879 switch (adev->asic_type) {
881 chip_name = "bonaire";
884 chip_name = "hawaii";
887 chip_name = "kaveri";
890 chip_name = "kabini";
893 chip_name = "mullins";
898 snprintf(fw_name, sizeof(fw_name), "/*(DEBLOBBED)*/", chip_name);
899 err = reject_firmware(&adev->gfx.pfp_fw, fw_name, adev->dev);
902 err = amdgpu_ucode_validate(adev->gfx.pfp_fw);
906 snprintf(fw_name, sizeof(fw_name), "/*(DEBLOBBED)*/", chip_name);
907 err = reject_firmware(&adev->gfx.me_fw, fw_name, adev->dev);
910 err = amdgpu_ucode_validate(adev->gfx.me_fw);
914 snprintf(fw_name, sizeof(fw_name), "/*(DEBLOBBED)*/", chip_name);
915 err = reject_firmware(&adev->gfx.ce_fw, fw_name, adev->dev);
918 err = amdgpu_ucode_validate(adev->gfx.ce_fw);
922 snprintf(fw_name, sizeof(fw_name), "/*(DEBLOBBED)*/", chip_name);
923 err = reject_firmware(&adev->gfx.mec_fw, fw_name, adev->dev);
926 err = amdgpu_ucode_validate(adev->gfx.mec_fw);
930 if (adev->asic_type == CHIP_KAVERI) {
931 snprintf(fw_name, sizeof(fw_name), "/*(DEBLOBBED)*/", chip_name);
932 err = reject_firmware(&adev->gfx.mec2_fw, fw_name, adev->dev);
935 err = amdgpu_ucode_validate(adev->gfx.mec2_fw);
940 snprintf(fw_name, sizeof(fw_name), "/*(DEBLOBBED)*/", chip_name);
941 err = reject_firmware(&adev->gfx.rlc_fw, fw_name, adev->dev);
944 err = amdgpu_ucode_validate(adev->gfx.rlc_fw);
949 "gfx7: Failed to load firmware \"%s\"\n",
951 release_firmware(adev->gfx.pfp_fw);
952 adev->gfx.pfp_fw = NULL;
953 release_firmware(adev->gfx.me_fw);
954 adev->gfx.me_fw = NULL;
955 release_firmware(adev->gfx.ce_fw);
956 adev->gfx.ce_fw = NULL;
957 release_firmware(adev->gfx.mec_fw);
958 adev->gfx.mec_fw = NULL;
959 release_firmware(adev->gfx.mec2_fw);
960 adev->gfx.mec2_fw = NULL;
961 release_firmware(adev->gfx.rlc_fw);
962 adev->gfx.rlc_fw = NULL;
968 * gfx_v7_0_tiling_mode_table_init - init the hw tiling table
970 * @adev: amdgpu_device pointer
972 * Starting with SI, the tiling setup is done globally in a
973 * set of 32 tiling modes. Rather than selecting each set of
974 * parameters per surface as on older asics, we just select
975 * which index in the tiling table we want to use, and the
976 * surface uses those parameters (CIK).
978 static void gfx_v7_0_tiling_mode_table_init(struct amdgpu_device *adev)
980 const u32 num_tile_mode_states = 32;
981 const u32 num_secondary_tile_mode_states = 16;
982 u32 reg_offset, gb_tile_moden, split_equal_to_row_size;
984 switch (adev->gfx.config.mem_row_size_in_kb) {
986 split_equal_to_row_size = ADDR_SURF_TILE_SPLIT_1KB;
990 split_equal_to_row_size = ADDR_SURF_TILE_SPLIT_2KB;
993 split_equal_to_row_size = ADDR_SURF_TILE_SPLIT_4KB;
997 switch (adev->asic_type) {
999 for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++) {
1000 switch (reg_offset) {
1002 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1003 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
1004 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
1005 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
1008 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1009 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
1010 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B) |
1011 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
1014 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1015 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
1016 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
1017 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
1020 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1021 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
1022 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) |
1023 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
1026 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1027 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
1028 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
1029 TILE_SPLIT(split_equal_to_row_size));
1032 gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
1033 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
1034 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
1037 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
1038 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
1039 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
1040 TILE_SPLIT(split_equal_to_row_size));
1043 gb_tile_moden = (TILE_SPLIT(split_equal_to_row_size));
1047 gb_tile_moden = (ARRAY_MODE(ARRAY_LINEAR_ALIGNED) |
1048 PIPE_CONFIG(ADDR_SURF_P4_16x16));
1051 gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
1052 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
1053 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING));
1056 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1057 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
1058 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
1059 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
1062 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
1063 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
1064 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
1065 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
1068 gb_tile_moden = (TILE_SPLIT(split_equal_to_row_size));
1071 gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
1072 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
1073 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING));
1076 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1077 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
1078 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
1079 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
1082 gb_tile_moden = (ARRAY_MODE(ARRAY_3D_TILED_THIN1) |
1083 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
1084 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
1085 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
1088 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
1089 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
1090 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
1091 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
1094 gb_tile_moden = (TILE_SPLIT(split_equal_to_row_size));
1097 gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THICK) |
1098 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
1099 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
1100 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
1103 gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THICK) |
1104 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
1105 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING));
1108 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THICK) |
1109 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
1110 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
1111 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
1114 gb_tile_moden = (ARRAY_MODE(ARRAY_3D_TILED_THICK) |
1115 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
1116 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
1117 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
1120 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THICK) |
1121 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
1122 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
1123 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
1126 gb_tile_moden = (TILE_SPLIT(split_equal_to_row_size));
1129 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THICK) |
1130 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
1131 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
1132 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
1135 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_XTHICK) |
1136 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
1137 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
1138 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
1141 gb_tile_moden = (ARRAY_MODE(ARRAY_3D_TILED_XTHICK) |
1142 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
1143 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
1144 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
1147 gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
1148 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
1149 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING));
1152 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1153 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
1154 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
1155 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
1158 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
1159 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
1160 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
1161 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
1164 gb_tile_moden = (TILE_SPLIT(split_equal_to_row_size));
1170 adev->gfx.config.tile_mode_array[reg_offset] = gb_tile_moden;
1171 WREG32(mmGB_TILE_MODE0 + reg_offset, gb_tile_moden);
1173 for (reg_offset = 0; reg_offset < num_secondary_tile_mode_states; reg_offset++) {
1174 switch (reg_offset) {
1176 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1177 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
1178 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
1179 NUM_BANKS(ADDR_SURF_16_BANK));
1182 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1183 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
1184 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
1185 NUM_BANKS(ADDR_SURF_16_BANK));
1188 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1189 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
1190 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
1191 NUM_BANKS(ADDR_SURF_16_BANK));
1194 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1195 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
1196 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
1197 NUM_BANKS(ADDR_SURF_16_BANK));
1200 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1201 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
1202 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
1203 NUM_BANKS(ADDR_SURF_16_BANK));
1206 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1207 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
1208 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
1209 NUM_BANKS(ADDR_SURF_8_BANK));
1212 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1213 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
1214 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
1215 NUM_BANKS(ADDR_SURF_4_BANK));
1218 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) |
1219 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_8) |
1220 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
1221 NUM_BANKS(ADDR_SURF_16_BANK));
1224 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) |
1225 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
1226 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
1227 NUM_BANKS(ADDR_SURF_16_BANK));
1230 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1231 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
1232 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
1233 NUM_BANKS(ADDR_SURF_16_BANK));
1236 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1237 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
1238 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
1239 NUM_BANKS(ADDR_SURF_16_BANK));
1242 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1243 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
1244 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
1245 NUM_BANKS(ADDR_SURF_16_BANK));
1248 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1249 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
1250 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
1251 NUM_BANKS(ADDR_SURF_8_BANK));
1254 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1255 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
1256 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
1257 NUM_BANKS(ADDR_SURF_4_BANK));
1263 adev->gfx.config.macrotile_mode_array[reg_offset] = gb_tile_moden;
1264 WREG32(mmGB_MACROTILE_MODE0 + reg_offset, gb_tile_moden);
1268 for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++) {
1269 switch (reg_offset) {
1271 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1272 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1273 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
1274 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
1277 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1278 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1279 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B) |
1280 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
1283 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1284 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1285 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
1286 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
1289 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1290 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1291 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) |
1292 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
1295 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1296 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1297 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
1298 TILE_SPLIT(split_equal_to_row_size));
1301 gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
1302 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1303 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
1304 TILE_SPLIT(split_equal_to_row_size));
1307 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
1308 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1309 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
1310 TILE_SPLIT(split_equal_to_row_size));
1313 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
1314 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
1315 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
1316 TILE_SPLIT(split_equal_to_row_size));
1320 gb_tile_moden = (ARRAY_MODE(ARRAY_LINEAR_ALIGNED) |
1321 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16));
1324 gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
1325 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1326 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING));
1329 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1330 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1331 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
1332 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
1335 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
1336 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1337 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
1338 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
1341 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
1342 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
1343 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
1344 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
1347 gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
1348 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1349 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING));
1352 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1353 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1354 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
1355 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
1358 gb_tile_moden = (ARRAY_MODE(ARRAY_3D_TILED_THIN1) |
1359 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1360 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
1361 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
1364 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
1365 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1366 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
1367 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
1370 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
1371 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
1372 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
1373 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
1376 gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THICK) |
1377 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1378 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
1379 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
1382 gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THICK) |
1383 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1384 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING));
1387 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THICK) |
1388 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1389 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
1390 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
1393 gb_tile_moden = (ARRAY_MODE(ARRAY_3D_TILED_THICK) |
1394 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1395 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
1396 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
1399 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THICK) |
1400 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1401 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
1402 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
1405 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THICK) |
1406 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
1407 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
1408 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
1411 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THICK) |
1412 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1413 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
1414 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
1417 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_XTHICK) |
1418 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1419 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
1420 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
1423 gb_tile_moden = (ARRAY_MODE(ARRAY_3D_TILED_XTHICK) |
1424 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1425 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
1426 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
1429 gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
1430 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1431 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING));
1434 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1435 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1436 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
1437 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
1440 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
1441 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1442 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
1443 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
1446 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
1447 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
1448 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
1449 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
1455 adev->gfx.config.tile_mode_array[reg_offset] = gb_tile_moden;
1456 WREG32(mmGB_TILE_MODE0 + reg_offset, gb_tile_moden);
1458 for (reg_offset = 0; reg_offset < num_secondary_tile_mode_states; reg_offset++) {
1459 switch (reg_offset) {
1461 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1462 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
1463 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
1464 NUM_BANKS(ADDR_SURF_16_BANK));
1467 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1468 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
1469 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
1470 NUM_BANKS(ADDR_SURF_16_BANK));
1473 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1474 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
1475 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
1476 NUM_BANKS(ADDR_SURF_16_BANK));
1479 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1480 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
1481 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
1482 NUM_BANKS(ADDR_SURF_16_BANK));
1485 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1486 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
1487 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
1488 NUM_BANKS(ADDR_SURF_8_BANK));
1491 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1492 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
1493 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
1494 NUM_BANKS(ADDR_SURF_4_BANK));
1497 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1498 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
1499 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
1500 NUM_BANKS(ADDR_SURF_4_BANK));
1503 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1504 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
1505 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
1506 NUM_BANKS(ADDR_SURF_16_BANK));
1509 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1510 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
1511 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
1512 NUM_BANKS(ADDR_SURF_16_BANK));
1515 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1516 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
1517 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
1518 NUM_BANKS(ADDR_SURF_16_BANK));
1521 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1522 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
1523 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
1524 NUM_BANKS(ADDR_SURF_8_BANK));
1527 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1528 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
1529 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
1530 NUM_BANKS(ADDR_SURF_16_BANK));
1533 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1534 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
1535 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
1536 NUM_BANKS(ADDR_SURF_8_BANK));
1539 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1540 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
1541 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
1542 NUM_BANKS(ADDR_SURF_4_BANK));
1548 adev->gfx.config.macrotile_mode_array[reg_offset] = gb_tile_moden;
1549 WREG32(mmGB_MACROTILE_MODE0 + reg_offset, gb_tile_moden);
1556 for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++) {
1557 switch (reg_offset) {
1559 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1560 PIPE_CONFIG(ADDR_SURF_P2) |
1561 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
1562 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
1565 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1566 PIPE_CONFIG(ADDR_SURF_P2) |
1567 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B) |
1568 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
1571 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1572 PIPE_CONFIG(ADDR_SURF_P2) |
1573 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
1574 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
1577 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1578 PIPE_CONFIG(ADDR_SURF_P2) |
1579 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) |
1580 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
1583 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1584 PIPE_CONFIG(ADDR_SURF_P2) |
1585 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
1586 TILE_SPLIT(split_equal_to_row_size));
1589 gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
1590 PIPE_CONFIG(ADDR_SURF_P2) |
1591 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
1594 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
1595 PIPE_CONFIG(ADDR_SURF_P2) |
1596 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
1597 TILE_SPLIT(split_equal_to_row_size));
1600 gb_tile_moden = (TILE_SPLIT(split_equal_to_row_size));
1604 gb_tile_moden = (ARRAY_MODE(ARRAY_LINEAR_ALIGNED) |
1605 PIPE_CONFIG(ADDR_SURF_P2));
1608 gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
1609 PIPE_CONFIG(ADDR_SURF_P2) |
1610 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING));
1613 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1614 PIPE_CONFIG(ADDR_SURF_P2) |
1615 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
1616 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
1619 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
1620 PIPE_CONFIG(ADDR_SURF_P2) |
1621 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
1622 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
1625 gb_tile_moden = (TILE_SPLIT(split_equal_to_row_size));
1628 gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
1629 PIPE_CONFIG(ADDR_SURF_P2) |
1630 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING));
1633 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1634 PIPE_CONFIG(ADDR_SURF_P2) |
1635 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
1636 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
1639 gb_tile_moden = (ARRAY_MODE(ARRAY_3D_TILED_THIN1) |
1640 PIPE_CONFIG(ADDR_SURF_P2) |
1641 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
1642 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
1645 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
1646 PIPE_CONFIG(ADDR_SURF_P2) |
1647 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
1648 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
1651 gb_tile_moden = (TILE_SPLIT(split_equal_to_row_size));
1654 gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THICK) |
1655 PIPE_CONFIG(ADDR_SURF_P2) |
1656 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
1657 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
1660 gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THICK) |
1661 PIPE_CONFIG(ADDR_SURF_P2) |
1662 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING));
1665 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THICK) |
1666 PIPE_CONFIG(ADDR_SURF_P2) |
1667 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
1668 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
1671 gb_tile_moden = (ARRAY_MODE(ARRAY_3D_TILED_THICK) |
1672 PIPE_CONFIG(ADDR_SURF_P2) |
1673 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
1674 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
1677 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THICK) |
1678 PIPE_CONFIG(ADDR_SURF_P2) |
1679 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
1680 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
1683 gb_tile_moden = (TILE_SPLIT(split_equal_to_row_size));
1686 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THICK) |
1687 PIPE_CONFIG(ADDR_SURF_P2) |
1688 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
1689 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
1692 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_XTHICK) |
1693 PIPE_CONFIG(ADDR_SURF_P2) |
1694 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
1695 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
1698 gb_tile_moden = (ARRAY_MODE(ARRAY_3D_TILED_XTHICK) |
1699 PIPE_CONFIG(ADDR_SURF_P2) |
1700 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
1701 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
1704 gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
1705 PIPE_CONFIG(ADDR_SURF_P2) |
1706 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING));
1709 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1710 PIPE_CONFIG(ADDR_SURF_P2) |
1711 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
1712 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
1715 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
1716 PIPE_CONFIG(ADDR_SURF_P2) |
1717 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
1718 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
1721 gb_tile_moden = (TILE_SPLIT(split_equal_to_row_size));
1727 adev->gfx.config.tile_mode_array[reg_offset] = gb_tile_moden;
1728 WREG32(mmGB_TILE_MODE0 + reg_offset, gb_tile_moden);
1730 for (reg_offset = 0; reg_offset < num_secondary_tile_mode_states; reg_offset++) {
1731 switch (reg_offset) {
1733 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1734 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
1735 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
1736 NUM_BANKS(ADDR_SURF_8_BANK));
1739 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1740 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
1741 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
1742 NUM_BANKS(ADDR_SURF_8_BANK));
1745 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1746 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
1747 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
1748 NUM_BANKS(ADDR_SURF_8_BANK));
1751 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1752 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
1753 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
1754 NUM_BANKS(ADDR_SURF_8_BANK));
1757 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1758 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
1759 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
1760 NUM_BANKS(ADDR_SURF_8_BANK));
1763 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1764 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
1765 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
1766 NUM_BANKS(ADDR_SURF_8_BANK));
1769 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1770 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
1771 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
1772 NUM_BANKS(ADDR_SURF_8_BANK));
1775 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_4) |
1776 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_8) |
1777 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
1778 NUM_BANKS(ADDR_SURF_16_BANK));
1781 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_4) |
1782 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
1783 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
1784 NUM_BANKS(ADDR_SURF_16_BANK));
1787 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) |
1788 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
1789 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
1790 NUM_BANKS(ADDR_SURF_16_BANK));
1793 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) |
1794 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
1795 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
1796 NUM_BANKS(ADDR_SURF_16_BANK));
1799 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1800 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
1801 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
1802 NUM_BANKS(ADDR_SURF_16_BANK));
1805 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1806 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
1807 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
1808 NUM_BANKS(ADDR_SURF_16_BANK));
1811 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1812 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
1813 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
1814 NUM_BANKS(ADDR_SURF_8_BANK));
1820 adev->gfx.config.macrotile_mode_array[reg_offset] = gb_tile_moden;
1821 WREG32(mmGB_MACROTILE_MODE0 + reg_offset, gb_tile_moden);
1828 * gfx_v7_0_select_se_sh - select which SE, SH to address
1830 * @adev: amdgpu_device pointer
1831 * @se_num: shader engine to address
1832 * @sh_num: sh block to address
1834 * Select which SE, SH combinations to address. Certain
1835 * registers are instanced per SE or SH. 0xffffffff means
1836 * broadcast to all SEs or SHs (CIK).
1838 void gfx_v7_0_select_se_sh(struct amdgpu_device *adev, u32 se_num, u32 sh_num)
1840 u32 data = GRBM_GFX_INDEX__INSTANCE_BROADCAST_WRITES_MASK;
1842 if ((se_num == 0xffffffff) && (sh_num == 0xffffffff))
1843 data |= GRBM_GFX_INDEX__SH_BROADCAST_WRITES_MASK |
1844 GRBM_GFX_INDEX__SE_BROADCAST_WRITES_MASK;
1845 else if (se_num == 0xffffffff)
1846 data |= GRBM_GFX_INDEX__SE_BROADCAST_WRITES_MASK |
1847 (sh_num << GRBM_GFX_INDEX__SH_INDEX__SHIFT);
1848 else if (sh_num == 0xffffffff)
1849 data |= GRBM_GFX_INDEX__SH_BROADCAST_WRITES_MASK |
1850 (se_num << GRBM_GFX_INDEX__SE_INDEX__SHIFT);
1852 data |= (sh_num << GRBM_GFX_INDEX__SH_INDEX__SHIFT) |
1853 (se_num << GRBM_GFX_INDEX__SE_INDEX__SHIFT);
1854 WREG32(mmGRBM_GFX_INDEX, data);
1858 * gfx_v7_0_create_bitmask - create a bitmask
1860 * @bit_width: length of the mask
1862 * create a variable length bit mask (CIK).
1863 * Returns the bitmask.
1865 static u32 gfx_v7_0_create_bitmask(u32 bit_width)
1869 for (i = 0; i < bit_width; i++) {
1877 * gfx_v7_0_get_rb_disabled - computes the mask of disabled RBs
1879 * @adev: amdgpu_device pointer
1880 * @max_rb_num: max RBs (render backends) for the asic
1881 * @se_num: number of SEs (shader engines) for the asic
1882 * @sh_per_se: number of SH blocks per SE for the asic
1884 * Calculates the bitmask of disabled RBs (CIK).
1885 * Returns the disabled RB bitmask.
1887 static u32 gfx_v7_0_get_rb_disabled(struct amdgpu_device *adev,
1888 u32 max_rb_num_per_se,
1893 data = RREG32(mmCC_RB_BACKEND_DISABLE);
1895 data &= CC_RB_BACKEND_DISABLE__BACKEND_DISABLE_MASK;
1899 data |= RREG32(mmGC_USER_RB_BACKEND_DISABLE);
1901 data >>= GC_USER_RB_BACKEND_DISABLE__BACKEND_DISABLE__SHIFT;
1903 mask = gfx_v7_0_create_bitmask(max_rb_num_per_se / sh_per_se);
1909 * gfx_v7_0_setup_rb - setup the RBs on the asic
1911 * @adev: amdgpu_device pointer
1912 * @se_num: number of SEs (shader engines) for the asic
1913 * @sh_per_se: number of SH blocks per SE for the asic
1914 * @max_rb_num: max RBs (render backends) for the asic
1916 * Configures per-SE/SH RB registers (CIK).
1918 static void gfx_v7_0_setup_rb(struct amdgpu_device *adev,
1919 u32 se_num, u32 sh_per_se,
1920 u32 max_rb_num_per_se)
1924 u32 disabled_rbs = 0;
1925 u32 enabled_rbs = 0;
1927 mutex_lock(&adev->grbm_idx_mutex);
1928 for (i = 0; i < se_num; i++) {
1929 for (j = 0; j < sh_per_se; j++) {
1930 gfx_v7_0_select_se_sh(adev, i, j);
1931 data = gfx_v7_0_get_rb_disabled(adev, max_rb_num_per_se, sh_per_se);
1932 if (adev->asic_type == CHIP_HAWAII)
1933 disabled_rbs |= data << ((i * sh_per_se + j) * HAWAII_RB_BITMAP_WIDTH_PER_SH);
1935 disabled_rbs |= data << ((i * sh_per_se + j) * CIK_RB_BITMAP_WIDTH_PER_SH);
1938 gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff);
1939 mutex_unlock(&adev->grbm_idx_mutex);
1942 for (i = 0; i < max_rb_num_per_se * se_num; i++) {
1943 if (!(disabled_rbs & mask))
1944 enabled_rbs |= mask;
1948 adev->gfx.config.backend_enable_mask = enabled_rbs;
1950 mutex_lock(&adev->grbm_idx_mutex);
1951 for (i = 0; i < se_num; i++) {
1952 gfx_v7_0_select_se_sh(adev, i, 0xffffffff);
1954 for (j = 0; j < sh_per_se; j++) {
1955 switch (enabled_rbs & 3) {
1958 data |= (RASTER_CONFIG_RB_MAP_3 <<
1959 PA_SC_RASTER_CONFIG__PKR_MAP__SHIFT);
1961 data |= (RASTER_CONFIG_RB_MAP_0 <<
1962 PA_SC_RASTER_CONFIG__PKR_MAP__SHIFT);
1965 data |= (RASTER_CONFIG_RB_MAP_0 << (i * sh_per_se + j) * 2);
1968 data |= (RASTER_CONFIG_RB_MAP_3 << (i * sh_per_se + j) * 2);
1972 data |= (RASTER_CONFIG_RB_MAP_2 << (i * sh_per_se + j) * 2);
1977 WREG32(mmPA_SC_RASTER_CONFIG, data);
1979 gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff);
1980 mutex_unlock(&adev->grbm_idx_mutex);
1984 * gmc_v7_0_init_compute_vmid - gart enable
1986 * @rdev: amdgpu_device pointer
1988 * Initialize compute vmid sh_mem registers
1991 #define DEFAULT_SH_MEM_BASES (0x6000)
1992 #define FIRST_COMPUTE_VMID (8)
1993 #define LAST_COMPUTE_VMID (16)
1994 static void gmc_v7_0_init_compute_vmid(struct amdgpu_device *adev)
1997 uint32_t sh_mem_config;
1998 uint32_t sh_mem_bases;
2001 * Configure apertures:
2002 * LDS: 0x60000000'00000000 - 0x60000001'00000000 (4GB)
2003 * Scratch: 0x60000001'00000000 - 0x60000002'00000000 (4GB)
2004 * GPUVM: 0x60010000'00000000 - 0x60020000'00000000 (1TB)
2006 sh_mem_bases = DEFAULT_SH_MEM_BASES | (DEFAULT_SH_MEM_BASES << 16);
2007 sh_mem_config = SH_MEM_ALIGNMENT_MODE_UNALIGNED <<
2008 SH_MEM_CONFIG__ALIGNMENT_MODE__SHIFT;
2009 sh_mem_config |= MTYPE_NONCACHED << SH_MEM_CONFIG__DEFAULT_MTYPE__SHIFT;
2010 mutex_lock(&adev->srbm_mutex);
2011 for (i = FIRST_COMPUTE_VMID; i < LAST_COMPUTE_VMID; i++) {
2012 cik_srbm_select(adev, 0, 0, 0, i);
2013 /* CP and shaders */
2014 WREG32(mmSH_MEM_CONFIG, sh_mem_config);
2015 WREG32(mmSH_MEM_APE1_BASE, 1);
2016 WREG32(mmSH_MEM_APE1_LIMIT, 0);
2017 WREG32(mmSH_MEM_BASES, sh_mem_bases);
2019 cik_srbm_select(adev, 0, 0, 0, 0);
2020 mutex_unlock(&adev->srbm_mutex);
2024 * gfx_v7_0_gpu_init - setup the 3D engine
2026 * @adev: amdgpu_device pointer
2028 * Configures the 3D engine and tiling configuration
2029 * registers so that the 3D engine is usable.
2031 static void gfx_v7_0_gpu_init(struct amdgpu_device *adev)
2034 u32 mc_shared_chmap, mc_arb_ramcfg;
2035 u32 dimm00_addr_map, dimm01_addr_map, dimm10_addr_map, dimm11_addr_map;
2040 switch (adev->asic_type) {
2042 adev->gfx.config.max_shader_engines = 2;
2043 adev->gfx.config.max_tile_pipes = 4;
2044 adev->gfx.config.max_cu_per_sh = 7;
2045 adev->gfx.config.max_sh_per_se = 1;
2046 adev->gfx.config.max_backends_per_se = 2;
2047 adev->gfx.config.max_texture_channel_caches = 4;
2048 adev->gfx.config.max_gprs = 256;
2049 adev->gfx.config.max_gs_threads = 32;
2050 adev->gfx.config.max_hw_contexts = 8;
2052 adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
2053 adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
2054 adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
2055 adev->gfx.config.sc_earlyz_tile_fifo_size = 0x130;
2056 gb_addr_config = BONAIRE_GB_ADDR_CONFIG_GOLDEN;
2059 adev->gfx.config.max_shader_engines = 4;
2060 adev->gfx.config.max_tile_pipes = 16;
2061 adev->gfx.config.max_cu_per_sh = 11;
2062 adev->gfx.config.max_sh_per_se = 1;
2063 adev->gfx.config.max_backends_per_se = 4;
2064 adev->gfx.config.max_texture_channel_caches = 16;
2065 adev->gfx.config.max_gprs = 256;
2066 adev->gfx.config.max_gs_threads = 32;
2067 adev->gfx.config.max_hw_contexts = 8;
2069 adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
2070 adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
2071 adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
2072 adev->gfx.config.sc_earlyz_tile_fifo_size = 0x130;
2073 gb_addr_config = HAWAII_GB_ADDR_CONFIG_GOLDEN;
2076 adev->gfx.config.max_shader_engines = 1;
2077 adev->gfx.config.max_tile_pipes = 4;
2078 adev->gfx.config.max_cu_per_sh = 8;
2079 adev->gfx.config.max_backends_per_se = 2;
2080 adev->gfx.config.max_sh_per_se = 1;
2081 adev->gfx.config.max_texture_channel_caches = 4;
2082 adev->gfx.config.max_gprs = 256;
2083 adev->gfx.config.max_gs_threads = 16;
2084 adev->gfx.config.max_hw_contexts = 8;
2086 adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
2087 adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
2088 adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
2089 adev->gfx.config.sc_earlyz_tile_fifo_size = 0x130;
2090 gb_addr_config = BONAIRE_GB_ADDR_CONFIG_GOLDEN;
2095 adev->gfx.config.max_shader_engines = 1;
2096 adev->gfx.config.max_tile_pipes = 2;
2097 adev->gfx.config.max_cu_per_sh = 2;
2098 adev->gfx.config.max_sh_per_se = 1;
2099 adev->gfx.config.max_backends_per_se = 1;
2100 adev->gfx.config.max_texture_channel_caches = 2;
2101 adev->gfx.config.max_gprs = 256;
2102 adev->gfx.config.max_gs_threads = 16;
2103 adev->gfx.config.max_hw_contexts = 8;
2105 adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
2106 adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
2107 adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
2108 adev->gfx.config.sc_earlyz_tile_fifo_size = 0x130;
2109 gb_addr_config = BONAIRE_GB_ADDR_CONFIG_GOLDEN;
2113 WREG32(mmGRBM_CNTL, (0xff << GRBM_CNTL__READ_TIMEOUT__SHIFT));
2115 mc_shared_chmap = RREG32(mmMC_SHARED_CHMAP);
2116 adev->gfx.config.mc_arb_ramcfg = RREG32(mmMC_ARB_RAMCFG);
2117 mc_arb_ramcfg = adev->gfx.config.mc_arb_ramcfg;
2119 adev->gfx.config.num_tile_pipes = adev->gfx.config.max_tile_pipes;
2120 adev->gfx.config.mem_max_burst_length_bytes = 256;
2121 if (adev->flags & AMD_IS_APU) {
2122 /* Get memory bank mapping mode. */
2123 tmp = RREG32(mmMC_FUS_DRAM0_BANK_ADDR_MAPPING);
2124 dimm00_addr_map = REG_GET_FIELD(tmp, MC_FUS_DRAM0_BANK_ADDR_MAPPING, DIMM0ADDRMAP);
2125 dimm01_addr_map = REG_GET_FIELD(tmp, MC_FUS_DRAM0_BANK_ADDR_MAPPING, DIMM1ADDRMAP);
2127 tmp = RREG32(mmMC_FUS_DRAM1_BANK_ADDR_MAPPING);
2128 dimm10_addr_map = REG_GET_FIELD(tmp, MC_FUS_DRAM1_BANK_ADDR_MAPPING, DIMM0ADDRMAP);
2129 dimm11_addr_map = REG_GET_FIELD(tmp, MC_FUS_DRAM1_BANK_ADDR_MAPPING, DIMM1ADDRMAP);
2131 /* Validate settings in case only one DIMM installed. */
2132 if ((dimm00_addr_map == 0) || (dimm00_addr_map == 3) || (dimm00_addr_map == 4) || (dimm00_addr_map > 12))
2133 dimm00_addr_map = 0;
2134 if ((dimm01_addr_map == 0) || (dimm01_addr_map == 3) || (dimm01_addr_map == 4) || (dimm01_addr_map > 12))
2135 dimm01_addr_map = 0;
2136 if ((dimm10_addr_map == 0) || (dimm10_addr_map == 3) || (dimm10_addr_map == 4) || (dimm10_addr_map > 12))
2137 dimm10_addr_map = 0;
2138 if ((dimm11_addr_map == 0) || (dimm11_addr_map == 3) || (dimm11_addr_map == 4) || (dimm11_addr_map > 12))
2139 dimm11_addr_map = 0;
2141 /* If DIMM Addr map is 8GB, ROW size should be 2KB. Otherwise 1KB. */
2142 /* If ROW size(DIMM1) != ROW size(DMIMM0), ROW size should be larger one. */
2143 if ((dimm00_addr_map == 11) || (dimm01_addr_map == 11) || (dimm10_addr_map == 11) || (dimm11_addr_map == 11))
2144 adev->gfx.config.mem_row_size_in_kb = 2;
2146 adev->gfx.config.mem_row_size_in_kb = 1;
2148 tmp = (mc_arb_ramcfg & MC_ARB_RAMCFG__NOOFCOLS_MASK) >> MC_ARB_RAMCFG__NOOFCOLS__SHIFT;
2149 adev->gfx.config.mem_row_size_in_kb = (4 * (1 << (8 + tmp))) / 1024;
2150 if (adev->gfx.config.mem_row_size_in_kb > 4)
2151 adev->gfx.config.mem_row_size_in_kb = 4;
2153 /* XXX use MC settings? */
2154 adev->gfx.config.shader_engine_tile_size = 32;
2155 adev->gfx.config.num_gpus = 1;
2156 adev->gfx.config.multi_gpu_tile_size = 64;
2158 /* fix up row size */
2159 gb_addr_config &= ~GB_ADDR_CONFIG__ROW_SIZE_MASK;
2160 switch (adev->gfx.config.mem_row_size_in_kb) {
2163 gb_addr_config |= (0 << GB_ADDR_CONFIG__ROW_SIZE__SHIFT);
2166 gb_addr_config |= (1 << GB_ADDR_CONFIG__ROW_SIZE__SHIFT);
2169 gb_addr_config |= (2 << GB_ADDR_CONFIG__ROW_SIZE__SHIFT);
2172 adev->gfx.config.gb_addr_config = gb_addr_config;
2174 WREG32(mmGB_ADDR_CONFIG, gb_addr_config);
2175 WREG32(mmHDP_ADDR_CONFIG, gb_addr_config);
2176 WREG32(mmDMIF_ADDR_CALC, gb_addr_config);
2177 WREG32(mmSDMA0_TILING_CONFIG + SDMA0_REGISTER_OFFSET, gb_addr_config & 0x70);
2178 WREG32(mmSDMA0_TILING_CONFIG + SDMA1_REGISTER_OFFSET, gb_addr_config & 0x70);
2179 WREG32(mmUVD_UDEC_ADDR_CONFIG, gb_addr_config);
2180 WREG32(mmUVD_UDEC_DB_ADDR_CONFIG, gb_addr_config);
2181 WREG32(mmUVD_UDEC_DBW_ADDR_CONFIG, gb_addr_config);
2183 gfx_v7_0_tiling_mode_table_init(adev);
2185 gfx_v7_0_setup_rb(adev, adev->gfx.config.max_shader_engines,
2186 adev->gfx.config.max_sh_per_se,
2187 adev->gfx.config.max_backends_per_se);
2189 /* set HW defaults for 3D engine */
2190 WREG32(mmCP_MEQ_THRESHOLDS,
2191 (0x30 << CP_MEQ_THRESHOLDS__MEQ1_START__SHIFT) |
2192 (0x60 << CP_MEQ_THRESHOLDS__MEQ2_START__SHIFT));
2194 mutex_lock(&adev->grbm_idx_mutex);
2196 * making sure that the following register writes will be broadcasted
2197 * to all the shaders
2199 gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff);
2201 /* XXX SH_MEM regs */
2202 /* where to put LDS, scratch, GPUVM in FSA64 space */
2203 sh_mem_cfg = REG_SET_FIELD(0, SH_MEM_CONFIG, ALIGNMENT_MODE,
2204 SH_MEM_ALIGNMENT_MODE_UNALIGNED);
2206 mutex_lock(&adev->srbm_mutex);
2207 for (i = 0; i < 16; i++) {
2208 cik_srbm_select(adev, 0, 0, 0, i);
2209 /* CP and shaders */
2210 WREG32(mmSH_MEM_CONFIG, sh_mem_cfg);
2211 WREG32(mmSH_MEM_APE1_BASE, 1);
2212 WREG32(mmSH_MEM_APE1_LIMIT, 0);
2213 WREG32(mmSH_MEM_BASES, 0);
2215 cik_srbm_select(adev, 0, 0, 0, 0);
2216 mutex_unlock(&adev->srbm_mutex);
2218 gmc_v7_0_init_compute_vmid(adev);
2220 WREG32(mmSX_DEBUG_1, 0x20);
2222 WREG32(mmTA_CNTL_AUX, 0x00010000);
2224 tmp = RREG32(mmSPI_CONFIG_CNTL);
2226 WREG32(mmSPI_CONFIG_CNTL, tmp);
2228 WREG32(mmSQ_CONFIG, 1);
2230 WREG32(mmDB_DEBUG, 0);
2232 tmp = RREG32(mmDB_DEBUG2) & ~0xf00fffff;
2234 WREG32(mmDB_DEBUG2, tmp);
2236 tmp = RREG32(mmDB_DEBUG3) & ~0x0002021c;
2238 WREG32(mmDB_DEBUG3, tmp);
2240 tmp = RREG32(mmCB_HW_CONTROL) & ~0x00010000;
2242 WREG32(mmCB_HW_CONTROL, tmp);
2244 WREG32(mmSPI_CONFIG_CNTL_1, (4 << SPI_CONFIG_CNTL_1__VTX_DONE_DELAY__SHIFT));
2246 WREG32(mmPA_SC_FIFO_SIZE,
2247 ((adev->gfx.config.sc_prim_fifo_size_frontend << PA_SC_FIFO_SIZE__SC_FRONTEND_PRIM_FIFO_SIZE__SHIFT) |
2248 (adev->gfx.config.sc_prim_fifo_size_backend << PA_SC_FIFO_SIZE__SC_BACKEND_PRIM_FIFO_SIZE__SHIFT) |
2249 (adev->gfx.config.sc_hiz_tile_fifo_size << PA_SC_FIFO_SIZE__SC_HIZ_TILE_FIFO_SIZE__SHIFT) |
2250 (adev->gfx.config.sc_earlyz_tile_fifo_size << PA_SC_FIFO_SIZE__SC_EARLYZ_TILE_FIFO_SIZE__SHIFT)));
2252 WREG32(mmVGT_NUM_INSTANCES, 1);
2254 WREG32(mmCP_PERFMON_CNTL, 0);
2256 WREG32(mmSQ_CONFIG, 0);
2258 WREG32(mmPA_SC_FORCE_EOV_MAX_CNTS,
2259 ((4095 << PA_SC_FORCE_EOV_MAX_CNTS__FORCE_EOV_MAX_CLK_CNT__SHIFT) |
2260 (255 << PA_SC_FORCE_EOV_MAX_CNTS__FORCE_EOV_MAX_REZ_CNT__SHIFT)));
2262 WREG32(mmVGT_CACHE_INVALIDATION,
2263 (VC_AND_TC << VGT_CACHE_INVALIDATION__CACHE_INVALIDATION__SHIFT) |
2264 (ES_AND_GS_AUTO << VGT_CACHE_INVALIDATION__AUTO_INVLD_EN__SHIFT));
2266 WREG32(mmVGT_GS_VERTEX_REUSE, 16);
2267 WREG32(mmPA_SC_LINE_STIPPLE_STATE, 0);
2269 WREG32(mmPA_CL_ENHANCE, PA_CL_ENHANCE__CLIP_VTX_REORDER_ENA_MASK |
2270 (3 << PA_CL_ENHANCE__NUM_CLIP_SEQ__SHIFT));
2271 WREG32(mmPA_SC_ENHANCE, PA_SC_ENHANCE__ENABLE_PA_SC_OUT_OF_ORDER_MASK);
2272 mutex_unlock(&adev->grbm_idx_mutex);
2278 * GPU scratch registers helpers function.
2281 * gfx_v7_0_scratch_init - setup driver info for CP scratch regs
2283 * @adev: amdgpu_device pointer
2285 * Set up the number and offset of the CP scratch registers.
2286 * NOTE: use of CP scratch registers is a legacy inferface and
2287 * is not used by default on newer asics (r6xx+). On newer asics,
2288 * memory buffers are used for fences rather than scratch regs.
2290 static void gfx_v7_0_scratch_init(struct amdgpu_device *adev)
2294 adev->gfx.scratch.num_reg = 7;
2295 adev->gfx.scratch.reg_base = mmSCRATCH_REG0;
2296 for (i = 0; i < adev->gfx.scratch.num_reg; i++) {
2297 adev->gfx.scratch.free[i] = true;
2298 adev->gfx.scratch.reg[i] = adev->gfx.scratch.reg_base + i;
2303 * gfx_v7_0_ring_test_ring - basic gfx ring test
2305 * @adev: amdgpu_device pointer
2306 * @ring: amdgpu_ring structure holding ring information
2308 * Allocate a scratch register and write to it using the gfx ring (CIK).
2309 * Provides a basic gfx ring test to verify that the ring is working.
2310 * Used by gfx_v7_0_cp_gfx_resume();
2311 * Returns 0 on success, error on failure.
2313 static int gfx_v7_0_ring_test_ring(struct amdgpu_ring *ring)
2315 struct amdgpu_device *adev = ring->adev;
2321 r = amdgpu_gfx_scratch_get(adev, &scratch);
2323 DRM_ERROR("amdgpu: cp failed to get scratch reg (%d).\n", r);
2326 WREG32(scratch, 0xCAFEDEAD);
2327 r = amdgpu_ring_lock(ring, 3);
2329 DRM_ERROR("amdgpu: cp failed to lock ring %d (%d).\n", ring->idx, r);
2330 amdgpu_gfx_scratch_free(adev, scratch);
2333 amdgpu_ring_write(ring, PACKET3(PACKET3_SET_UCONFIG_REG, 1));
2334 amdgpu_ring_write(ring, (scratch - PACKET3_SET_UCONFIG_REG_START));
2335 amdgpu_ring_write(ring, 0xDEADBEEF);
2336 amdgpu_ring_unlock_commit(ring);
2338 for (i = 0; i < adev->usec_timeout; i++) {
2339 tmp = RREG32(scratch);
2340 if (tmp == 0xDEADBEEF)
2344 if (i < adev->usec_timeout) {
2345 DRM_INFO("ring test on %d succeeded in %d usecs\n", ring->idx, i);
2347 DRM_ERROR("amdgpu: ring %d test failed (scratch(0x%04X)=0x%08X)\n",
2348 ring->idx, scratch, tmp);
2351 amdgpu_gfx_scratch_free(adev, scratch);
2356 * gfx_v7_0_ring_emit_hdp - emit an hdp flush on the cp
2358 * @adev: amdgpu_device pointer
2359 * @ridx: amdgpu ring index
2361 * Emits an hdp flush on the cp.
2363 static void gfx_v7_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
2366 int usepfp = ring->type == AMDGPU_RING_TYPE_COMPUTE ? 0 : 1;
2368 if (ring->type == AMDGPU_RING_TYPE_COMPUTE) {
2371 ref_and_mask = GPU_HDP_FLUSH_DONE__CP2_MASK << ring->pipe;
2374 ref_and_mask = GPU_HDP_FLUSH_DONE__CP6_MASK << ring->pipe;
2380 ref_and_mask = GPU_HDP_FLUSH_DONE__CP0_MASK;
2383 amdgpu_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5));
2384 amdgpu_ring_write(ring, (WAIT_REG_MEM_OPERATION(1) | /* write, wait, write */
2385 WAIT_REG_MEM_FUNCTION(3) | /* == */
2386 WAIT_REG_MEM_ENGINE(usepfp))); /* pfp or me */
2387 amdgpu_ring_write(ring, mmGPU_HDP_FLUSH_REQ);
2388 amdgpu_ring_write(ring, mmGPU_HDP_FLUSH_DONE);
2389 amdgpu_ring_write(ring, ref_and_mask);
2390 amdgpu_ring_write(ring, ref_and_mask);
2391 amdgpu_ring_write(ring, 0x20); /* poll interval */
2395 * gfx_v7_0_ring_emit_fence_gfx - emit a fence on the gfx ring
2397 * @adev: amdgpu_device pointer
2398 * @fence: amdgpu fence object
2400 * Emits a fence sequnce number on the gfx ring and flushes
2403 static void gfx_v7_0_ring_emit_fence_gfx(struct amdgpu_ring *ring, u64 addr,
2404 u64 seq, unsigned flags)
2406 bool write64bit = flags & AMDGPU_FENCE_FLAG_64BIT;
2407 bool int_sel = flags & AMDGPU_FENCE_FLAG_INT;
2408 /* Workaround for cache flush problems. First send a dummy EOP
2409 * event down the pipe with seq one below.
2411 amdgpu_ring_write(ring, PACKET3(PACKET3_EVENT_WRITE_EOP, 4));
2412 amdgpu_ring_write(ring, (EOP_TCL1_ACTION_EN |
2414 EVENT_TYPE(CACHE_FLUSH_AND_INV_TS_EVENT) |
2416 amdgpu_ring_write(ring, addr & 0xfffffffc);
2417 amdgpu_ring_write(ring, (upper_32_bits(addr) & 0xffff) |
2418 DATA_SEL(1) | INT_SEL(0));
2419 amdgpu_ring_write(ring, lower_32_bits(seq - 1));
2420 amdgpu_ring_write(ring, upper_32_bits(seq - 1));
2422 /* Then send the real EOP event down the pipe. */
2423 amdgpu_ring_write(ring, PACKET3(PACKET3_EVENT_WRITE_EOP, 4));
2424 amdgpu_ring_write(ring, (EOP_TCL1_ACTION_EN |
2426 EVENT_TYPE(CACHE_FLUSH_AND_INV_TS_EVENT) |
2428 amdgpu_ring_write(ring, addr & 0xfffffffc);
2429 amdgpu_ring_write(ring, (upper_32_bits(addr) & 0xffff) |
2430 DATA_SEL(write64bit ? 2 : 1) | INT_SEL(int_sel ? 2 : 0));
2431 amdgpu_ring_write(ring, lower_32_bits(seq));
2432 amdgpu_ring_write(ring, upper_32_bits(seq));
2436 * gfx_v7_0_ring_emit_fence_compute - emit a fence on the compute ring
2438 * @adev: amdgpu_device pointer
2439 * @fence: amdgpu fence object
2441 * Emits a fence sequnce number on the compute ring and flushes
2444 static void gfx_v7_0_ring_emit_fence_compute(struct amdgpu_ring *ring,
2448 bool write64bit = flags & AMDGPU_FENCE_FLAG_64BIT;
2449 bool int_sel = flags & AMDGPU_FENCE_FLAG_INT;
2451 /* RELEASE_MEM - flush caches, send int */
2452 amdgpu_ring_write(ring, PACKET3(PACKET3_RELEASE_MEM, 5));
2453 amdgpu_ring_write(ring, (EOP_TCL1_ACTION_EN |
2455 EVENT_TYPE(CACHE_FLUSH_AND_INV_TS_EVENT) |
2457 amdgpu_ring_write(ring, DATA_SEL(write64bit ? 2 : 1) | INT_SEL(int_sel ? 2 : 0));
2458 amdgpu_ring_write(ring, addr & 0xfffffffc);
2459 amdgpu_ring_write(ring, upper_32_bits(addr));
2460 amdgpu_ring_write(ring, lower_32_bits(seq));
2461 amdgpu_ring_write(ring, upper_32_bits(seq));
2465 * gfx_v7_0_ring_emit_semaphore - emit a semaphore on the CP ring
2467 * @ring: amdgpu ring buffer object
2468 * @semaphore: amdgpu semaphore object
2469 * @emit_wait: Is this a sempahore wait?
2471 * Emits a semaphore signal/wait packet to the CP ring and prevents the PFP
2472 * from running ahead of semaphore waits.
2474 static bool gfx_v7_0_ring_emit_semaphore(struct amdgpu_ring *ring,
2475 struct amdgpu_semaphore *semaphore,
2478 uint64_t addr = semaphore->gpu_addr;
2479 unsigned sel = emit_wait ? PACKET3_SEM_SEL_WAIT : PACKET3_SEM_SEL_SIGNAL;
2481 amdgpu_ring_write(ring, PACKET3(PACKET3_MEM_SEMAPHORE, 1));
2482 amdgpu_ring_write(ring, addr & 0xffffffff);
2483 amdgpu_ring_write(ring, (upper_32_bits(addr) & 0xffff) | sel);
2485 if (emit_wait && (ring->type == AMDGPU_RING_TYPE_GFX)) {
2486 /* Prevent the PFP from running ahead of the semaphore wait */
2487 amdgpu_ring_write(ring, PACKET3(PACKET3_PFP_SYNC_ME, 0));
2488 amdgpu_ring_write(ring, 0x0);
2498 * gfx_v7_0_ring_emit_ib - emit an IB (Indirect Buffer) on the ring
2500 * @ring: amdgpu_ring structure holding ring information
2501 * @ib: amdgpu indirect buffer object
2503 * Emits an DE (drawing engine) or CE (constant engine) IB
2504 * on the gfx ring. IBs are usually generated by userspace
2505 * acceleration drivers and submitted to the kernel for
2506 * sheduling on the ring. This function schedules the IB
2507 * on the gfx ring for execution by the GPU.
2509 static void gfx_v7_0_ring_emit_ib_gfx(struct amdgpu_ring *ring,
2510 struct amdgpu_ib *ib)
2512 bool need_ctx_switch = ring->current_ctx != ib->ctx;
2513 u32 header, control = 0;
2514 u32 next_rptr = ring->wptr + 5;
2516 /* drop the CE preamble IB for the same context */
2517 if ((ib->flags & AMDGPU_IB_FLAG_PREAMBLE) && !need_ctx_switch)
2520 if (need_ctx_switch)
2524 amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
2525 amdgpu_ring_write(ring, WRITE_DATA_DST_SEL(5) | WR_CONFIRM);
2526 amdgpu_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc);
2527 amdgpu_ring_write(ring, upper_32_bits(ring->next_rptr_gpu_addr) & 0xffffffff);
2528 amdgpu_ring_write(ring, next_rptr);
2530 /* insert SWITCH_BUFFER packet before first IB in the ring frame */
2531 if (need_ctx_switch) {
2532 amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0));
2533 amdgpu_ring_write(ring, 0);
2536 if (ib->flags & AMDGPU_IB_FLAG_CE)
2537 header = PACKET3(PACKET3_INDIRECT_BUFFER_CONST, 2);
2539 header = PACKET3(PACKET3_INDIRECT_BUFFER, 2);
2541 control |= ib->length_dw |
2542 (ib->vm ? (ib->vm->ids[ring->idx].id << 24) : 0);
2544 amdgpu_ring_write(ring, header);
2545 amdgpu_ring_write(ring,
2549 (ib->gpu_addr & 0xFFFFFFFC));
2550 amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr) & 0xFFFF);
2551 amdgpu_ring_write(ring, control);
2554 static void gfx_v7_0_ring_emit_ib_compute(struct amdgpu_ring *ring,
2555 struct amdgpu_ib *ib)
2557 u32 header, control = 0;
2558 u32 next_rptr = ring->wptr + 5;
2560 control |= INDIRECT_BUFFER_VALID;
2562 amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
2563 amdgpu_ring_write(ring, WRITE_DATA_DST_SEL(5) | WR_CONFIRM);
2564 amdgpu_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc);
2565 amdgpu_ring_write(ring, upper_32_bits(ring->next_rptr_gpu_addr) & 0xffffffff);
2566 amdgpu_ring_write(ring, next_rptr);
2568 header = PACKET3(PACKET3_INDIRECT_BUFFER, 2);
2570 control |= ib->length_dw |
2571 (ib->vm ? (ib->vm->ids[ring->idx].id << 24) : 0);
2573 amdgpu_ring_write(ring, header);
2574 amdgpu_ring_write(ring,
2578 (ib->gpu_addr & 0xFFFFFFFC));
2579 amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr) & 0xFFFF);
2580 amdgpu_ring_write(ring, control);
2584 * gfx_v7_0_ring_test_ib - basic ring IB test
2586 * @ring: amdgpu_ring structure holding ring information
2588 * Allocate an IB and execute it on the gfx ring (CIK).
2589 * Provides a basic gfx ring test to verify that IBs are working.
2590 * Returns 0 on success, error on failure.
2592 static int gfx_v7_0_ring_test_ib(struct amdgpu_ring *ring)
2594 struct amdgpu_device *adev = ring->adev;
2595 struct amdgpu_ib ib;
2596 struct fence *f = NULL;
2602 r = amdgpu_gfx_scratch_get(adev, &scratch);
2604 DRM_ERROR("amdgpu: failed to get scratch reg (%d).\n", r);
2607 WREG32(scratch, 0xCAFEDEAD);
2608 memset(&ib, 0, sizeof(ib));
2609 r = amdgpu_ib_get(ring, NULL, 256, &ib);
2611 DRM_ERROR("amdgpu: failed to get ib (%d).\n", r);
2614 ib.ptr[0] = PACKET3(PACKET3_SET_UCONFIG_REG, 1);
2615 ib.ptr[1] = ((scratch - PACKET3_SET_UCONFIG_REG_START));
2616 ib.ptr[2] = 0xDEADBEEF;
2619 r = amdgpu_sched_ib_submit_kernel_helper(adev, ring, &ib, 1, NULL,
2620 AMDGPU_FENCE_OWNER_UNDEFINED,
2625 r = fence_wait(f, false);
2627 DRM_ERROR("amdgpu: fence wait failed (%d).\n", r);
2630 for (i = 0; i < adev->usec_timeout; i++) {
2631 tmp = RREG32(scratch);
2632 if (tmp == 0xDEADBEEF)
2636 if (i < adev->usec_timeout) {
2637 DRM_INFO("ib test on ring %d succeeded in %u usecs\n",
2641 DRM_ERROR("amdgpu: ib test failed (scratch(0x%04X)=0x%08X)\n",
2648 amdgpu_ib_free(adev, &ib);
2650 amdgpu_gfx_scratch_free(adev, scratch);
2656 * On CIK, gfx and compute now have independant command processors.
2659 * Gfx consists of a single ring and can process both gfx jobs and
2660 * compute jobs. The gfx CP consists of three microengines (ME):
2661 * PFP - Pre-Fetch Parser
2663 * CE - Constant Engine
2664 * The PFP and ME make up what is considered the Drawing Engine (DE).
2665 * The CE is an asynchronous engine used for updating buffer desciptors
2666 * used by the DE so that they can be loaded into cache in parallel
2667 * while the DE is processing state update packets.
2670 * The compute CP consists of two microengines (ME):
2671 * MEC1 - Compute MicroEngine 1
2672 * MEC2 - Compute MicroEngine 2
2673 * Each MEC supports 4 compute pipes and each pipe supports 8 queues.
2674 * The queues are exposed to userspace and are programmed directly
2675 * by the compute runtime.
2678 * gfx_v7_0_cp_gfx_enable - enable/disable the gfx CP MEs
2680 * @adev: amdgpu_device pointer
2681 * @enable: enable or disable the MEs
2683 * Halts or unhalts the gfx MEs.
2685 static void gfx_v7_0_cp_gfx_enable(struct amdgpu_device *adev, bool enable)
2690 WREG32(mmCP_ME_CNTL, 0);
2692 WREG32(mmCP_ME_CNTL, (CP_ME_CNTL__ME_HALT_MASK | CP_ME_CNTL__PFP_HALT_MASK | CP_ME_CNTL__CE_HALT_MASK));
2693 for (i = 0; i < adev->gfx.num_gfx_rings; i++)
2694 adev->gfx.gfx_ring[i].ready = false;
2700 * gfx_v7_0_cp_gfx_load_microcode - load the gfx CP ME ucode
2702 * @adev: amdgpu_device pointer
2704 * Loads the gfx PFP, ME, and CE ucode.
2705 * Returns 0 for success, -EINVAL if the ucode is not available.
2707 static int gfx_v7_0_cp_gfx_load_microcode(struct amdgpu_device *adev)
2709 const struct gfx_firmware_header_v1_0 *pfp_hdr;
2710 const struct gfx_firmware_header_v1_0 *ce_hdr;
2711 const struct gfx_firmware_header_v1_0 *me_hdr;
2712 const __le32 *fw_data;
2713 unsigned i, fw_size;
2715 if (!adev->gfx.me_fw || !adev->gfx.pfp_fw || !adev->gfx.ce_fw)
2718 pfp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.pfp_fw->data;
2719 ce_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.ce_fw->data;
2720 me_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.me_fw->data;
2722 amdgpu_ucode_print_gfx_hdr(&pfp_hdr->header);
2723 amdgpu_ucode_print_gfx_hdr(&ce_hdr->header);
2724 amdgpu_ucode_print_gfx_hdr(&me_hdr->header);
2725 adev->gfx.pfp_fw_version = le32_to_cpu(pfp_hdr->header.ucode_version);
2726 adev->gfx.ce_fw_version = le32_to_cpu(ce_hdr->header.ucode_version);
2727 adev->gfx.me_fw_version = le32_to_cpu(me_hdr->header.ucode_version);
2728 adev->gfx.me_feature_version = le32_to_cpu(me_hdr->ucode_feature_version);
2729 adev->gfx.ce_feature_version = le32_to_cpu(ce_hdr->ucode_feature_version);
2730 adev->gfx.pfp_feature_version = le32_to_cpu(pfp_hdr->ucode_feature_version);
2732 gfx_v7_0_cp_gfx_enable(adev, false);
2735 fw_data = (const __le32 *)
2736 (adev->gfx.pfp_fw->data +
2737 le32_to_cpu(pfp_hdr->header.ucode_array_offset_bytes));
2738 fw_size = le32_to_cpu(pfp_hdr->header.ucode_size_bytes) / 4;
2739 WREG32(mmCP_PFP_UCODE_ADDR, 0);
2740 for (i = 0; i < fw_size; i++)
2741 WREG32(mmCP_PFP_UCODE_DATA, le32_to_cpup(fw_data++));
2742 WREG32(mmCP_PFP_UCODE_ADDR, adev->gfx.pfp_fw_version);
2745 fw_data = (const __le32 *)
2746 (adev->gfx.ce_fw->data +
2747 le32_to_cpu(ce_hdr->header.ucode_array_offset_bytes));
2748 fw_size = le32_to_cpu(ce_hdr->header.ucode_size_bytes) / 4;
2749 WREG32(mmCP_CE_UCODE_ADDR, 0);
2750 for (i = 0; i < fw_size; i++)
2751 WREG32(mmCP_CE_UCODE_DATA, le32_to_cpup(fw_data++));
2752 WREG32(mmCP_CE_UCODE_ADDR, adev->gfx.ce_fw_version);
2755 fw_data = (const __le32 *)
2756 (adev->gfx.me_fw->data +
2757 le32_to_cpu(me_hdr->header.ucode_array_offset_bytes));
2758 fw_size = le32_to_cpu(me_hdr->header.ucode_size_bytes) / 4;
2759 WREG32(mmCP_ME_RAM_WADDR, 0);
2760 for (i = 0; i < fw_size; i++)
2761 WREG32(mmCP_ME_RAM_DATA, le32_to_cpup(fw_data++));
2762 WREG32(mmCP_ME_RAM_WADDR, adev->gfx.me_fw_version);
2768 * gfx_v7_0_cp_gfx_start - start the gfx ring
2770 * @adev: amdgpu_device pointer
2772 * Enables the ring and loads the clear state context and other
2773 * packets required to init the ring.
2774 * Returns 0 for success, error for failure.
2776 static int gfx_v7_0_cp_gfx_start(struct amdgpu_device *adev)
2778 struct amdgpu_ring *ring = &adev->gfx.gfx_ring[0];
2779 const struct cs_section_def *sect = NULL;
2780 const struct cs_extent_def *ext = NULL;
2784 WREG32(mmCP_MAX_CONTEXT, adev->gfx.config.max_hw_contexts - 1);
2785 WREG32(mmCP_ENDIAN_SWAP, 0);
2786 WREG32(mmCP_DEVICE_ID, 1);
2788 gfx_v7_0_cp_gfx_enable(adev, true);
2790 r = amdgpu_ring_lock(ring, gfx_v7_0_get_csb_size(adev) + 8);
2792 DRM_ERROR("amdgpu: cp failed to lock ring (%d).\n", r);
2796 /* init the CE partitions. CE only used for gfx on CIK */
2797 amdgpu_ring_write(ring, PACKET3(PACKET3_SET_BASE, 2));
2798 amdgpu_ring_write(ring, PACKET3_BASE_INDEX(CE_PARTITION_BASE));
2799 amdgpu_ring_write(ring, 0x8000);
2800 amdgpu_ring_write(ring, 0x8000);
2802 /* clear state buffer */
2803 amdgpu_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
2804 amdgpu_ring_write(ring, PACKET3_PREAMBLE_BEGIN_CLEAR_STATE);
2806 amdgpu_ring_write(ring, PACKET3(PACKET3_CONTEXT_CONTROL, 1));
2807 amdgpu_ring_write(ring, 0x80000000);
2808 amdgpu_ring_write(ring, 0x80000000);
2810 for (sect = adev->gfx.rlc.cs_data; sect->section != NULL; ++sect) {
2811 for (ext = sect->section; ext->extent != NULL; ++ext) {
2812 if (sect->id == SECT_CONTEXT) {
2813 amdgpu_ring_write(ring,
2814 PACKET3(PACKET3_SET_CONTEXT_REG, ext->reg_count));
2815 amdgpu_ring_write(ring, ext->reg_index - PACKET3_SET_CONTEXT_REG_START);
2816 for (i = 0; i < ext->reg_count; i++)
2817 amdgpu_ring_write(ring, ext->extent[i]);
2822 amdgpu_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 2));
2823 amdgpu_ring_write(ring, mmPA_SC_RASTER_CONFIG - PACKET3_SET_CONTEXT_REG_START);
2824 switch (adev->asic_type) {
2826 amdgpu_ring_write(ring, 0x16000012);
2827 amdgpu_ring_write(ring, 0x00000000);
2830 amdgpu_ring_write(ring, 0x00000000); /* XXX */
2831 amdgpu_ring_write(ring, 0x00000000);
2835 amdgpu_ring_write(ring, 0x00000000); /* XXX */
2836 amdgpu_ring_write(ring, 0x00000000);
2839 amdgpu_ring_write(ring, 0x3a00161a);
2840 amdgpu_ring_write(ring, 0x0000002e);
2843 amdgpu_ring_write(ring, 0x00000000);
2844 amdgpu_ring_write(ring, 0x00000000);
2848 amdgpu_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
2849 amdgpu_ring_write(ring, PACKET3_PREAMBLE_END_CLEAR_STATE);
2851 amdgpu_ring_write(ring, PACKET3(PACKET3_CLEAR_STATE, 0));
2852 amdgpu_ring_write(ring, 0);
2854 amdgpu_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 2));
2855 amdgpu_ring_write(ring, 0x00000316);
2856 amdgpu_ring_write(ring, 0x0000000e); /* VGT_VERTEX_REUSE_BLOCK_CNTL */
2857 amdgpu_ring_write(ring, 0x00000010); /* VGT_OUT_DEALLOC_CNTL */
2859 amdgpu_ring_unlock_commit(ring);
2865 * gfx_v7_0_cp_gfx_resume - setup the gfx ring buffer registers
2867 * @adev: amdgpu_device pointer
2869 * Program the location and size of the gfx ring buffer
2870 * and test it to make sure it's working.
2871 * Returns 0 for success, error for failure.
2873 static int gfx_v7_0_cp_gfx_resume(struct amdgpu_device *adev)
2875 struct amdgpu_ring *ring;
2878 u64 rb_addr, rptr_addr;
2881 WREG32(mmCP_SEM_WAIT_TIMER, 0x0);
2882 if (adev->asic_type != CHIP_HAWAII)
2883 WREG32(mmCP_SEM_INCOMPLETE_TIMER_CNTL, 0x0);
2885 /* Set the write pointer delay */
2886 WREG32(mmCP_RB_WPTR_DELAY, 0);
2888 /* set the RB to use vmid 0 */
2889 WREG32(mmCP_RB_VMID, 0);
2891 WREG32(mmSCRATCH_ADDR, 0);
2893 /* ring 0 - compute and gfx */
2894 /* Set ring buffer size */
2895 ring = &adev->gfx.gfx_ring[0];
2896 rb_bufsz = order_base_2(ring->ring_size / 8);
2897 tmp = (order_base_2(AMDGPU_GPU_PAGE_SIZE/8) << 8) | rb_bufsz;
2899 tmp |= 2 << CP_RB0_CNTL__BUF_SWAP__SHIFT;
2901 WREG32(mmCP_RB0_CNTL, tmp);
2903 /* Initialize the ring buffer's read and write pointers */
2904 WREG32(mmCP_RB0_CNTL, tmp | CP_RB0_CNTL__RB_RPTR_WR_ENA_MASK);
2906 WREG32(mmCP_RB0_WPTR, ring->wptr);
2908 /* set the wb address wether it's enabled or not */
2909 rptr_addr = adev->wb.gpu_addr + (ring->rptr_offs * 4);
2910 WREG32(mmCP_RB0_RPTR_ADDR, lower_32_bits(rptr_addr));
2911 WREG32(mmCP_RB0_RPTR_ADDR_HI, upper_32_bits(rptr_addr) & 0xFF);
2913 /* scratch register shadowing is no longer supported */
2914 WREG32(mmSCRATCH_UMSK, 0);
2917 WREG32(mmCP_RB0_CNTL, tmp);
2919 rb_addr = ring->gpu_addr >> 8;
2920 WREG32(mmCP_RB0_BASE, rb_addr);
2921 WREG32(mmCP_RB0_BASE_HI, upper_32_bits(rb_addr));
2923 /* start the ring */
2924 gfx_v7_0_cp_gfx_start(adev);
2926 r = amdgpu_ring_test_ring(ring);
2928 ring->ready = false;
2935 static u32 gfx_v7_0_ring_get_rptr_gfx(struct amdgpu_ring *ring)
2939 rptr = ring->adev->wb.wb[ring->rptr_offs];
2944 static u32 gfx_v7_0_ring_get_wptr_gfx(struct amdgpu_ring *ring)
2946 struct amdgpu_device *adev = ring->adev;
2949 wptr = RREG32(mmCP_RB0_WPTR);
2954 static void gfx_v7_0_ring_set_wptr_gfx(struct amdgpu_ring *ring)
2956 struct amdgpu_device *adev = ring->adev;
2958 WREG32(mmCP_RB0_WPTR, ring->wptr);
2959 (void)RREG32(mmCP_RB0_WPTR);
2962 static u32 gfx_v7_0_ring_get_rptr_compute(struct amdgpu_ring *ring)
2966 rptr = ring->adev->wb.wb[ring->rptr_offs];
2971 static u32 gfx_v7_0_ring_get_wptr_compute(struct amdgpu_ring *ring)
2975 /* XXX check if swapping is necessary on BE */
2976 wptr = ring->adev->wb.wb[ring->wptr_offs];
2981 static void gfx_v7_0_ring_set_wptr_compute(struct amdgpu_ring *ring)
2983 struct amdgpu_device *adev = ring->adev;
2985 /* XXX check if swapping is necessary on BE */
2986 adev->wb.wb[ring->wptr_offs] = ring->wptr;
2987 WDOORBELL32(ring->doorbell_index, ring->wptr);
2991 * gfx_v7_0_cp_compute_enable - enable/disable the compute CP MEs
2993 * @adev: amdgpu_device pointer
2994 * @enable: enable or disable the MEs
2996 * Halts or unhalts the compute MEs.
2998 static void gfx_v7_0_cp_compute_enable(struct amdgpu_device *adev, bool enable)
3003 WREG32(mmCP_MEC_CNTL, 0);
3005 WREG32(mmCP_MEC_CNTL, (CP_MEC_CNTL__MEC_ME1_HALT_MASK | CP_MEC_CNTL__MEC_ME2_HALT_MASK));
3006 for (i = 0; i < adev->gfx.num_compute_rings; i++)
3007 adev->gfx.compute_ring[i].ready = false;
3013 * gfx_v7_0_cp_compute_load_microcode - load the compute CP ME ucode
3015 * @adev: amdgpu_device pointer
3017 * Loads the compute MEC1&2 ucode.
3018 * Returns 0 for success, -EINVAL if the ucode is not available.
3020 static int gfx_v7_0_cp_compute_load_microcode(struct amdgpu_device *adev)
3022 const struct gfx_firmware_header_v1_0 *mec_hdr;
3023 const __le32 *fw_data;
3024 unsigned i, fw_size;
3026 if (!adev->gfx.mec_fw)
3029 mec_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data;
3030 amdgpu_ucode_print_gfx_hdr(&mec_hdr->header);
3031 adev->gfx.mec_fw_version = le32_to_cpu(mec_hdr->header.ucode_version);
3032 adev->gfx.mec_feature_version = le32_to_cpu(
3033 mec_hdr->ucode_feature_version);
3035 gfx_v7_0_cp_compute_enable(adev, false);
3038 fw_data = (const __le32 *)
3039 (adev->gfx.mec_fw->data +
3040 le32_to_cpu(mec_hdr->header.ucode_array_offset_bytes));
3041 fw_size = le32_to_cpu(mec_hdr->header.ucode_size_bytes) / 4;
3042 WREG32(mmCP_MEC_ME1_UCODE_ADDR, 0);
3043 for (i = 0; i < fw_size; i++)
3044 WREG32(mmCP_MEC_ME1_UCODE_DATA, le32_to_cpup(fw_data++));
3045 WREG32(mmCP_MEC_ME1_UCODE_ADDR, 0);
3047 if (adev->asic_type == CHIP_KAVERI) {
3048 const struct gfx_firmware_header_v1_0 *mec2_hdr;
3050 if (!adev->gfx.mec2_fw)
3053 mec2_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec2_fw->data;
3054 amdgpu_ucode_print_gfx_hdr(&mec2_hdr->header);
3055 adev->gfx.mec2_fw_version = le32_to_cpu(mec2_hdr->header.ucode_version);
3056 adev->gfx.mec2_feature_version = le32_to_cpu(
3057 mec2_hdr->ucode_feature_version);
3060 fw_data = (const __le32 *)
3061 (adev->gfx.mec2_fw->data +
3062 le32_to_cpu(mec2_hdr->header.ucode_array_offset_bytes));
3063 fw_size = le32_to_cpu(mec2_hdr->header.ucode_size_bytes) / 4;
3064 WREG32(mmCP_MEC_ME2_UCODE_ADDR, 0);
3065 for (i = 0; i < fw_size; i++)
3066 WREG32(mmCP_MEC_ME2_UCODE_DATA, le32_to_cpup(fw_data++));
3067 WREG32(mmCP_MEC_ME2_UCODE_ADDR, 0);
3074 * gfx_v7_0_cp_compute_start - start the compute queues
3076 * @adev: amdgpu_device pointer
3078 * Enable the compute queues.
3079 * Returns 0 for success, error for failure.
3081 static int gfx_v7_0_cp_compute_start(struct amdgpu_device *adev)
3083 gfx_v7_0_cp_compute_enable(adev, true);
3089 * gfx_v7_0_cp_compute_fini - stop the compute queues
3091 * @adev: amdgpu_device pointer
3093 * Stop the compute queues and tear down the driver queue
3096 static void gfx_v7_0_cp_compute_fini(struct amdgpu_device *adev)
3100 for (i = 0; i < adev->gfx.num_compute_rings; i++) {
3101 struct amdgpu_ring *ring = &adev->gfx.compute_ring[i];
3103 if (ring->mqd_obj) {
3104 r = amdgpu_bo_reserve(ring->mqd_obj, false);
3105 if (unlikely(r != 0))
3106 dev_warn(adev->dev, "(%d) reserve MQD bo failed\n", r);
3108 amdgpu_bo_unpin(ring->mqd_obj);
3109 amdgpu_bo_unreserve(ring->mqd_obj);
3111 amdgpu_bo_unref(&ring->mqd_obj);
3112 ring->mqd_obj = NULL;
3117 static void gfx_v7_0_mec_fini(struct amdgpu_device *adev)
3121 if (adev->gfx.mec.hpd_eop_obj) {
3122 r = amdgpu_bo_reserve(adev->gfx.mec.hpd_eop_obj, false);
3123 if (unlikely(r != 0))
3124 dev_warn(adev->dev, "(%d) reserve HPD EOP bo failed\n", r);
3125 amdgpu_bo_unpin(adev->gfx.mec.hpd_eop_obj);
3126 amdgpu_bo_unreserve(adev->gfx.mec.hpd_eop_obj);
3128 amdgpu_bo_unref(&adev->gfx.mec.hpd_eop_obj);
3129 adev->gfx.mec.hpd_eop_obj = NULL;
3133 #define MEC_HPD_SIZE 2048
3135 static int gfx_v7_0_mec_init(struct amdgpu_device *adev)
3141 * KV: 2 MEC, 4 Pipes/MEC, 8 Queues/Pipe - 64 Queues total
3142 * CI/KB: 1 MEC, 4 Pipes/MEC, 8 Queues/Pipe - 32 Queues total
3143 * Nonetheless, we assign only 1 pipe because all other pipes will
3146 adev->gfx.mec.num_mec = 1;
3147 adev->gfx.mec.num_pipe = 1;
3148 adev->gfx.mec.num_queue = adev->gfx.mec.num_mec * adev->gfx.mec.num_pipe * 8;
3150 if (adev->gfx.mec.hpd_eop_obj == NULL) {
3151 r = amdgpu_bo_create(adev,
3152 adev->gfx.mec.num_mec *adev->gfx.mec.num_pipe * MEC_HPD_SIZE * 2,
3154 AMDGPU_GEM_DOMAIN_GTT, 0, NULL, NULL,
3155 &adev->gfx.mec.hpd_eop_obj);
3157 dev_warn(adev->dev, "(%d) create HDP EOP bo failed\n", r);
3162 r = amdgpu_bo_reserve(adev->gfx.mec.hpd_eop_obj, false);
3163 if (unlikely(r != 0)) {
3164 gfx_v7_0_mec_fini(adev);
3167 r = amdgpu_bo_pin(adev->gfx.mec.hpd_eop_obj, AMDGPU_GEM_DOMAIN_GTT,
3168 &adev->gfx.mec.hpd_eop_gpu_addr);
3170 dev_warn(adev->dev, "(%d) pin HDP EOP bo failed\n", r);
3171 gfx_v7_0_mec_fini(adev);
3174 r = amdgpu_bo_kmap(adev->gfx.mec.hpd_eop_obj, (void **)&hpd);
3176 dev_warn(adev->dev, "(%d) map HDP EOP bo failed\n", r);
3177 gfx_v7_0_mec_fini(adev);
3181 /* clear memory. Not sure if this is required or not */
3182 memset(hpd, 0, adev->gfx.mec.num_mec *adev->gfx.mec.num_pipe * MEC_HPD_SIZE * 2);
3184 amdgpu_bo_kunmap(adev->gfx.mec.hpd_eop_obj);
3185 amdgpu_bo_unreserve(adev->gfx.mec.hpd_eop_obj);
3190 struct hqd_registers
3192 u32 cp_mqd_base_addr;
3193 u32 cp_mqd_base_addr_hi;
3196 u32 cp_hqd_persistent_state;
3197 u32 cp_hqd_pipe_priority;
3198 u32 cp_hqd_queue_priority;
3201 u32 cp_hqd_pq_base_hi;
3203 u32 cp_hqd_pq_rptr_report_addr;
3204 u32 cp_hqd_pq_rptr_report_addr_hi;
3205 u32 cp_hqd_pq_wptr_poll_addr;
3206 u32 cp_hqd_pq_wptr_poll_addr_hi;
3207 u32 cp_hqd_pq_doorbell_control;
3209 u32 cp_hqd_pq_control;
3210 u32 cp_hqd_ib_base_addr;
3211 u32 cp_hqd_ib_base_addr_hi;
3213 u32 cp_hqd_ib_control;
3214 u32 cp_hqd_iq_timer;
3216 u32 cp_hqd_dequeue_request;
3217 u32 cp_hqd_dma_offload;
3218 u32 cp_hqd_sema_cmd;
3219 u32 cp_hqd_msg_type;
3220 u32 cp_hqd_atomic0_preop_lo;
3221 u32 cp_hqd_atomic0_preop_hi;
3222 u32 cp_hqd_atomic1_preop_lo;
3223 u32 cp_hqd_atomic1_preop_hi;
3224 u32 cp_hqd_hq_scheduler0;
3225 u32 cp_hqd_hq_scheduler1;
3232 u32 dispatch_initiator;
3236 u32 pipeline_stat_enable;
3237 u32 perf_counter_enable;
3243 u32 resource_limits;
3244 u32 static_thread_mgmt01[2];
3246 u32 static_thread_mgmt23[2];
3248 u32 thread_trace_enable;
3251 u32 vgtcs_invoke_count[2];
3252 struct hqd_registers queue_state;
3254 u32 interrupt_queue[64];
3258 * gfx_v7_0_cp_compute_resume - setup the compute queue registers
3260 * @adev: amdgpu_device pointer
3262 * Program the compute queues and test them to make sure they
3264 * Returns 0 for success, error for failure.
3266 static int gfx_v7_0_cp_compute_resume(struct amdgpu_device *adev)
3270 bool use_doorbell = true;
3276 struct bonaire_mqd *mqd;
3278 r = gfx_v7_0_cp_compute_start(adev);
3282 /* fix up chicken bits */
3283 tmp = RREG32(mmCP_CPF_DEBUG);
3285 WREG32(mmCP_CPF_DEBUG, tmp);
3287 /* init the pipes */
3288 mutex_lock(&adev->srbm_mutex);
3289 for (i = 0; i < (adev->gfx.mec.num_pipe * adev->gfx.mec.num_mec); i++) {
3290 int me = (i < 4) ? 1 : 2;
3291 int pipe = (i < 4) ? i : (i - 4);
3293 eop_gpu_addr = adev->gfx.mec.hpd_eop_gpu_addr + (i * MEC_HPD_SIZE * 2);
3295 cik_srbm_select(adev, me, pipe, 0, 0);
3297 /* write the EOP addr */
3298 WREG32(mmCP_HPD_EOP_BASE_ADDR, eop_gpu_addr >> 8);
3299 WREG32(mmCP_HPD_EOP_BASE_ADDR_HI, upper_32_bits(eop_gpu_addr) >> 8);
3301 /* set the VMID assigned */
3302 WREG32(mmCP_HPD_EOP_VMID, 0);
3304 /* set the EOP size, register value is 2^(EOP_SIZE+1) dwords */
3305 tmp = RREG32(mmCP_HPD_EOP_CONTROL);
3306 tmp &= ~CP_HPD_EOP_CONTROL__EOP_SIZE_MASK;
3307 tmp |= order_base_2(MEC_HPD_SIZE / 8);
3308 WREG32(mmCP_HPD_EOP_CONTROL, tmp);
3310 cik_srbm_select(adev, 0, 0, 0, 0);
3311 mutex_unlock(&adev->srbm_mutex);
3313 /* init the queues. Just two for now. */
3314 for (i = 0; i < adev->gfx.num_compute_rings; i++) {
3315 struct amdgpu_ring *ring = &adev->gfx.compute_ring[i];
3317 if (ring->mqd_obj == NULL) {
3318 r = amdgpu_bo_create(adev,
3319 sizeof(struct bonaire_mqd),
3321 AMDGPU_GEM_DOMAIN_GTT, 0, NULL, NULL,
3324 dev_warn(adev->dev, "(%d) create MQD bo failed\n", r);
3329 r = amdgpu_bo_reserve(ring->mqd_obj, false);
3330 if (unlikely(r != 0)) {
3331 gfx_v7_0_cp_compute_fini(adev);
3334 r = amdgpu_bo_pin(ring->mqd_obj, AMDGPU_GEM_DOMAIN_GTT,
3337 dev_warn(adev->dev, "(%d) pin MQD bo failed\n", r);
3338 gfx_v7_0_cp_compute_fini(adev);
3341 r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&buf);
3343 dev_warn(adev->dev, "(%d) map MQD bo failed\n", r);
3344 gfx_v7_0_cp_compute_fini(adev);
3348 /* init the mqd struct */
3349 memset(buf, 0, sizeof(struct bonaire_mqd));
3351 mqd = (struct bonaire_mqd *)buf;
3352 mqd->header = 0xC0310800;
3353 mqd->static_thread_mgmt01[0] = 0xffffffff;
3354 mqd->static_thread_mgmt01[1] = 0xffffffff;
3355 mqd->static_thread_mgmt23[0] = 0xffffffff;
3356 mqd->static_thread_mgmt23[1] = 0xffffffff;
3358 mutex_lock(&adev->srbm_mutex);
3359 cik_srbm_select(adev, ring->me,
3363 /* disable wptr polling */
3364 tmp = RREG32(mmCP_PQ_WPTR_POLL_CNTL);
3365 tmp &= ~CP_PQ_WPTR_POLL_CNTL__EN_MASK;
3366 WREG32(mmCP_PQ_WPTR_POLL_CNTL, tmp);
3368 /* enable doorbell? */
3369 mqd->queue_state.cp_hqd_pq_doorbell_control =
3370 RREG32(mmCP_HQD_PQ_DOORBELL_CONTROL);
3372 mqd->queue_state.cp_hqd_pq_doorbell_control |= CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_EN_MASK;
3374 mqd->queue_state.cp_hqd_pq_doorbell_control &= ~CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_EN_MASK;
3375 WREG32(mmCP_HQD_PQ_DOORBELL_CONTROL,
3376 mqd->queue_state.cp_hqd_pq_doorbell_control);
3378 /* disable the queue if it's active */
3379 mqd->queue_state.cp_hqd_dequeue_request = 0;
3380 mqd->queue_state.cp_hqd_pq_rptr = 0;
3381 mqd->queue_state.cp_hqd_pq_wptr= 0;
3382 if (RREG32(mmCP_HQD_ACTIVE) & 1) {
3383 WREG32(mmCP_HQD_DEQUEUE_REQUEST, 1);
3384 for (j = 0; j < adev->usec_timeout; j++) {
3385 if (!(RREG32(mmCP_HQD_ACTIVE) & 1))
3389 WREG32(mmCP_HQD_DEQUEUE_REQUEST, mqd->queue_state.cp_hqd_dequeue_request);
3390 WREG32(mmCP_HQD_PQ_RPTR, mqd->queue_state.cp_hqd_pq_rptr);
3391 WREG32(mmCP_HQD_PQ_WPTR, mqd->queue_state.cp_hqd_pq_wptr);
3394 /* set the pointer to the MQD */
3395 mqd->queue_state.cp_mqd_base_addr = mqd_gpu_addr & 0xfffffffc;
3396 mqd->queue_state.cp_mqd_base_addr_hi = upper_32_bits(mqd_gpu_addr);
3397 WREG32(mmCP_MQD_BASE_ADDR, mqd->queue_state.cp_mqd_base_addr);
3398 WREG32(mmCP_MQD_BASE_ADDR_HI, mqd->queue_state.cp_mqd_base_addr_hi);
3399 /* set MQD vmid to 0 */
3400 mqd->queue_state.cp_mqd_control = RREG32(mmCP_MQD_CONTROL);
3401 mqd->queue_state.cp_mqd_control &= ~CP_MQD_CONTROL__VMID_MASK;
3402 WREG32(mmCP_MQD_CONTROL, mqd->queue_state.cp_mqd_control);
3404 /* set the pointer to the HQD, this is similar CP_RB0_BASE/_HI */
3405 hqd_gpu_addr = ring->gpu_addr >> 8;
3406 mqd->queue_state.cp_hqd_pq_base = hqd_gpu_addr;
3407 mqd->queue_state.cp_hqd_pq_base_hi = upper_32_bits(hqd_gpu_addr);
3408 WREG32(mmCP_HQD_PQ_BASE, mqd->queue_state.cp_hqd_pq_base);
3409 WREG32(mmCP_HQD_PQ_BASE_HI, mqd->queue_state.cp_hqd_pq_base_hi);
3411 /* set up the HQD, this is similar to CP_RB0_CNTL */
3412 mqd->queue_state.cp_hqd_pq_control = RREG32(mmCP_HQD_PQ_CONTROL);
3413 mqd->queue_state.cp_hqd_pq_control &=
3414 ~(CP_HQD_PQ_CONTROL__QUEUE_SIZE_MASK |
3415 CP_HQD_PQ_CONTROL__RPTR_BLOCK_SIZE_MASK);
3417 mqd->queue_state.cp_hqd_pq_control |=
3418 order_base_2(ring->ring_size / 8);
3419 mqd->queue_state.cp_hqd_pq_control |=
3420 (order_base_2(AMDGPU_GPU_PAGE_SIZE/8) << 8);
3422 mqd->queue_state.cp_hqd_pq_control |=
3423 2 << CP_HQD_PQ_CONTROL__ENDIAN_SWAP__SHIFT;
3425 mqd->queue_state.cp_hqd_pq_control &=
3426 ~(CP_HQD_PQ_CONTROL__UNORD_DISPATCH_MASK |
3427 CP_HQD_PQ_CONTROL__ROQ_PQ_IB_FLIP_MASK |
3428 CP_HQD_PQ_CONTROL__PQ_VOLATILE_MASK);
3429 mqd->queue_state.cp_hqd_pq_control |=
3430 CP_HQD_PQ_CONTROL__PRIV_STATE_MASK |
3431 CP_HQD_PQ_CONTROL__KMD_QUEUE_MASK; /* assuming kernel queue control */
3432 WREG32(mmCP_HQD_PQ_CONTROL, mqd->queue_state.cp_hqd_pq_control);
3434 /* only used if CP_PQ_WPTR_POLL_CNTL.CP_PQ_WPTR_POLL_CNTL__EN_MASK=1 */
3435 wb_gpu_addr = adev->wb.gpu_addr + (ring->wptr_offs * 4);
3436 mqd->queue_state.cp_hqd_pq_wptr_poll_addr = wb_gpu_addr & 0xfffffffc;
3437 mqd->queue_state.cp_hqd_pq_wptr_poll_addr_hi = upper_32_bits(wb_gpu_addr) & 0xffff;
3438 WREG32(mmCP_HQD_PQ_WPTR_POLL_ADDR, mqd->queue_state.cp_hqd_pq_wptr_poll_addr);
3439 WREG32(mmCP_HQD_PQ_WPTR_POLL_ADDR_HI,
3440 mqd->queue_state.cp_hqd_pq_wptr_poll_addr_hi);
3442 /* set the wb address wether it's enabled or not */
3443 wb_gpu_addr = adev->wb.gpu_addr + (ring->rptr_offs * 4);
3444 mqd->queue_state.cp_hqd_pq_rptr_report_addr = wb_gpu_addr & 0xfffffffc;
3445 mqd->queue_state.cp_hqd_pq_rptr_report_addr_hi =
3446 upper_32_bits(wb_gpu_addr) & 0xffff;
3447 WREG32(mmCP_HQD_PQ_RPTR_REPORT_ADDR,
3448 mqd->queue_state.cp_hqd_pq_rptr_report_addr);
3449 WREG32(mmCP_HQD_PQ_RPTR_REPORT_ADDR_HI,
3450 mqd->queue_state.cp_hqd_pq_rptr_report_addr_hi);
3452 /* enable the doorbell if requested */
3454 mqd->queue_state.cp_hqd_pq_doorbell_control =
3455 RREG32(mmCP_HQD_PQ_DOORBELL_CONTROL);
3456 mqd->queue_state.cp_hqd_pq_doorbell_control &=
3457 ~CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_OFFSET_MASK;
3458 mqd->queue_state.cp_hqd_pq_doorbell_control |=
3459 (ring->doorbell_index <<
3460 CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_OFFSET__SHIFT);
3461 mqd->queue_state.cp_hqd_pq_doorbell_control |=
3462 CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_EN_MASK;
3463 mqd->queue_state.cp_hqd_pq_doorbell_control &=
3464 ~(CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_SOURCE_MASK |
3465 CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_HIT_MASK);
3468 mqd->queue_state.cp_hqd_pq_doorbell_control = 0;
3470 WREG32(mmCP_HQD_PQ_DOORBELL_CONTROL,
3471 mqd->queue_state.cp_hqd_pq_doorbell_control);
3473 /* read and write pointers, similar to CP_RB0_WPTR/_RPTR */
3475 mqd->queue_state.cp_hqd_pq_wptr = ring->wptr;
3476 WREG32(mmCP_HQD_PQ_WPTR, mqd->queue_state.cp_hqd_pq_wptr);
3477 mqd->queue_state.cp_hqd_pq_rptr = RREG32(mmCP_HQD_PQ_RPTR);
3479 /* set the vmid for the queue */
3480 mqd->queue_state.cp_hqd_vmid = 0;
3481 WREG32(mmCP_HQD_VMID, mqd->queue_state.cp_hqd_vmid);
3483 /* activate the queue */
3484 mqd->queue_state.cp_hqd_active = 1;
3485 WREG32(mmCP_HQD_ACTIVE, mqd->queue_state.cp_hqd_active);
3487 cik_srbm_select(adev, 0, 0, 0, 0);
3488 mutex_unlock(&adev->srbm_mutex);
3490 amdgpu_bo_kunmap(ring->mqd_obj);
3491 amdgpu_bo_unreserve(ring->mqd_obj);
3494 r = amdgpu_ring_test_ring(ring);
3496 ring->ready = false;
3502 static void gfx_v7_0_cp_enable(struct amdgpu_device *adev, bool enable)
3504 gfx_v7_0_cp_gfx_enable(adev, enable);
3505 gfx_v7_0_cp_compute_enable(adev, enable);
3508 static int gfx_v7_0_cp_load_microcode(struct amdgpu_device *adev)
3512 r = gfx_v7_0_cp_gfx_load_microcode(adev);
3515 r = gfx_v7_0_cp_compute_load_microcode(adev);
3522 static void gfx_v7_0_enable_gui_idle_interrupt(struct amdgpu_device *adev,
3525 u32 tmp = RREG32(mmCP_INT_CNTL_RING0);
3528 tmp |= (CP_INT_CNTL_RING0__CNTX_BUSY_INT_ENABLE_MASK |
3529 CP_INT_CNTL_RING0__CNTX_EMPTY_INT_ENABLE_MASK);
3531 tmp &= ~(CP_INT_CNTL_RING0__CNTX_BUSY_INT_ENABLE_MASK |
3532 CP_INT_CNTL_RING0__CNTX_EMPTY_INT_ENABLE_MASK);
3533 WREG32(mmCP_INT_CNTL_RING0, tmp);
3536 static int gfx_v7_0_cp_resume(struct amdgpu_device *adev)
3540 gfx_v7_0_enable_gui_idle_interrupt(adev, false);
3542 r = gfx_v7_0_cp_load_microcode(adev);
3546 r = gfx_v7_0_cp_gfx_resume(adev);
3549 r = gfx_v7_0_cp_compute_resume(adev);
3553 gfx_v7_0_enable_gui_idle_interrupt(adev, true);
3560 * VMID 0 is the physical GPU addresses as used by the kernel.
3561 * VMIDs 1-15 are used for userspace clients and are handled
3562 * by the amdgpu vm/hsa code.
3565 * gfx_v7_0_ring_emit_vm_flush - cik vm flush using the CP
3567 * @adev: amdgpu_device pointer
3569 * Update the page table base and flush the VM TLB
3570 * using the CP (CIK).
3572 static void gfx_v7_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
3573 unsigned vm_id, uint64_t pd_addr)
3575 int usepfp = (ring->type == AMDGPU_RING_TYPE_GFX);
3576 uint32_t seq = ring->fence_drv.sync_seq[ring->idx];
3577 uint64_t addr = ring->fence_drv.gpu_addr;
3579 amdgpu_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5));
3580 amdgpu_ring_write(ring, (WAIT_REG_MEM_MEM_SPACE(1) | /* memory */
3581 WAIT_REG_MEM_FUNCTION(3) | /* equal */
3582 WAIT_REG_MEM_ENGINE(usepfp))); /* pfp or me */
3583 amdgpu_ring_write(ring, addr & 0xfffffffc);
3584 amdgpu_ring_write(ring, upper_32_bits(addr) & 0xffffffff);
3585 amdgpu_ring_write(ring, seq);
3586 amdgpu_ring_write(ring, 0xffffffff);
3587 amdgpu_ring_write(ring, 4); /* poll interval */
3590 /* synce CE with ME to prevent CE fetch CEIB before context switch done */
3591 amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0));
3592 amdgpu_ring_write(ring, 0);
3593 amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0));
3594 amdgpu_ring_write(ring, 0);
3597 amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
3598 amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(usepfp) |
3599 WRITE_DATA_DST_SEL(0)));
3601 amdgpu_ring_write(ring,
3602 (mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR + vm_id));
3604 amdgpu_ring_write(ring,
3605 (mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR + vm_id - 8));
3607 amdgpu_ring_write(ring, 0);
3608 amdgpu_ring_write(ring, pd_addr >> 12);
3610 /* bits 0-15 are the VM contexts0-15 */
3611 amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
3612 amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
3613 WRITE_DATA_DST_SEL(0)));
3614 amdgpu_ring_write(ring, mmVM_INVALIDATE_REQUEST);
3615 amdgpu_ring_write(ring, 0);
3616 amdgpu_ring_write(ring, 1 << vm_id);
3618 /* wait for the invalidate to complete */
3619 amdgpu_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5));
3620 amdgpu_ring_write(ring, (WAIT_REG_MEM_OPERATION(0) | /* wait */
3621 WAIT_REG_MEM_FUNCTION(0) | /* always */
3622 WAIT_REG_MEM_ENGINE(0))); /* me */
3623 amdgpu_ring_write(ring, mmVM_INVALIDATE_REQUEST);
3624 amdgpu_ring_write(ring, 0);
3625 amdgpu_ring_write(ring, 0); /* ref */
3626 amdgpu_ring_write(ring, 0); /* mask */
3627 amdgpu_ring_write(ring, 0x20); /* poll interval */
3629 /* compute doesn't have PFP */
3631 /* sync PFP to ME, otherwise we might get invalid PFP reads */
3632 amdgpu_ring_write(ring, PACKET3(PACKET3_PFP_SYNC_ME, 0));
3633 amdgpu_ring_write(ring, 0x0);
3635 /* synce CE with ME to prevent CE fetch CEIB before context switch done */
3636 amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0));
3637 amdgpu_ring_write(ring, 0);
3638 amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0));
3639 amdgpu_ring_write(ring, 0);
3645 * The RLC is a multi-purpose microengine that handles a
3646 * variety of functions.
3648 static void gfx_v7_0_rlc_fini(struct amdgpu_device *adev)
3652 /* save restore block */
3653 if (adev->gfx.rlc.save_restore_obj) {
3654 r = amdgpu_bo_reserve(adev->gfx.rlc.save_restore_obj, false);
3655 if (unlikely(r != 0))
3656 dev_warn(adev->dev, "(%d) reserve RLC sr bo failed\n", r);
3657 amdgpu_bo_unpin(adev->gfx.rlc.save_restore_obj);
3658 amdgpu_bo_unreserve(adev->gfx.rlc.save_restore_obj);
3660 amdgpu_bo_unref(&adev->gfx.rlc.save_restore_obj);
3661 adev->gfx.rlc.save_restore_obj = NULL;
3664 /* clear state block */
3665 if (adev->gfx.rlc.clear_state_obj) {
3666 r = amdgpu_bo_reserve(adev->gfx.rlc.clear_state_obj, false);
3667 if (unlikely(r != 0))
3668 dev_warn(adev->dev, "(%d) reserve RLC c bo failed\n", r);
3669 amdgpu_bo_unpin(adev->gfx.rlc.clear_state_obj);
3670 amdgpu_bo_unreserve(adev->gfx.rlc.clear_state_obj);
3672 amdgpu_bo_unref(&adev->gfx.rlc.clear_state_obj);
3673 adev->gfx.rlc.clear_state_obj = NULL;
3676 /* clear state block */
3677 if (adev->gfx.rlc.cp_table_obj) {
3678 r = amdgpu_bo_reserve(adev->gfx.rlc.cp_table_obj, false);
3679 if (unlikely(r != 0))
3680 dev_warn(adev->dev, "(%d) reserve RLC cp table bo failed\n", r);
3681 amdgpu_bo_unpin(adev->gfx.rlc.cp_table_obj);
3682 amdgpu_bo_unreserve(adev->gfx.rlc.cp_table_obj);
3684 amdgpu_bo_unref(&adev->gfx.rlc.cp_table_obj);
3685 adev->gfx.rlc.cp_table_obj = NULL;
3689 static int gfx_v7_0_rlc_init(struct amdgpu_device *adev)
3692 volatile u32 *dst_ptr;
3694 const struct cs_section_def *cs_data;
3697 /* allocate rlc buffers */
3698 if (adev->flags & AMD_IS_APU) {
3699 if (adev->asic_type == CHIP_KAVERI) {
3700 adev->gfx.rlc.reg_list = spectre_rlc_save_restore_register_list;
3701 adev->gfx.rlc.reg_list_size =
3702 (u32)ARRAY_SIZE(spectre_rlc_save_restore_register_list);
3704 adev->gfx.rlc.reg_list = kalindi_rlc_save_restore_register_list;
3705 adev->gfx.rlc.reg_list_size =
3706 (u32)ARRAY_SIZE(kalindi_rlc_save_restore_register_list);
3709 adev->gfx.rlc.cs_data = ci_cs_data;
3710 adev->gfx.rlc.cp_table_size = CP_ME_TABLE_SIZE * 5 * 4;
3712 src_ptr = adev->gfx.rlc.reg_list;
3713 dws = adev->gfx.rlc.reg_list_size;
3714 dws += (5 * 16) + 48 + 48 + 64;
3716 cs_data = adev->gfx.rlc.cs_data;
3719 /* save restore block */
3720 if (adev->gfx.rlc.save_restore_obj == NULL) {
3721 r = amdgpu_bo_create(adev, dws * 4, PAGE_SIZE, true,
3722 AMDGPU_GEM_DOMAIN_VRAM,
3723 AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
3725 &adev->gfx.rlc.save_restore_obj);
3727 dev_warn(adev->dev, "(%d) create RLC sr bo failed\n", r);
3732 r = amdgpu_bo_reserve(adev->gfx.rlc.save_restore_obj, false);
3733 if (unlikely(r != 0)) {
3734 gfx_v7_0_rlc_fini(adev);
3737 r = amdgpu_bo_pin(adev->gfx.rlc.save_restore_obj, AMDGPU_GEM_DOMAIN_VRAM,
3738 &adev->gfx.rlc.save_restore_gpu_addr);
3740 amdgpu_bo_unreserve(adev->gfx.rlc.save_restore_obj);
3741 dev_warn(adev->dev, "(%d) pin RLC sr bo failed\n", r);
3742 gfx_v7_0_rlc_fini(adev);
3746 r = amdgpu_bo_kmap(adev->gfx.rlc.save_restore_obj, (void **)&adev->gfx.rlc.sr_ptr);
3748 dev_warn(adev->dev, "(%d) map RLC sr bo failed\n", r);
3749 gfx_v7_0_rlc_fini(adev);
3752 /* write the sr buffer */
3753 dst_ptr = adev->gfx.rlc.sr_ptr;
3754 for (i = 0; i < adev->gfx.rlc.reg_list_size; i++)
3755 dst_ptr[i] = cpu_to_le32(src_ptr[i]);
3756 amdgpu_bo_kunmap(adev->gfx.rlc.save_restore_obj);
3757 amdgpu_bo_unreserve(adev->gfx.rlc.save_restore_obj);
3761 /* clear state block */
3762 adev->gfx.rlc.clear_state_size = dws = gfx_v7_0_get_csb_size(adev);
3764 if (adev->gfx.rlc.clear_state_obj == NULL) {
3765 r = amdgpu_bo_create(adev, dws * 4, PAGE_SIZE, true,
3766 AMDGPU_GEM_DOMAIN_VRAM,
3767 AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
3769 &adev->gfx.rlc.clear_state_obj);
3771 dev_warn(adev->dev, "(%d) create RLC c bo failed\n", r);
3772 gfx_v7_0_rlc_fini(adev);
3776 r = amdgpu_bo_reserve(adev->gfx.rlc.clear_state_obj, false);
3777 if (unlikely(r != 0)) {
3778 gfx_v7_0_rlc_fini(adev);
3781 r = amdgpu_bo_pin(adev->gfx.rlc.clear_state_obj, AMDGPU_GEM_DOMAIN_VRAM,
3782 &adev->gfx.rlc.clear_state_gpu_addr);
3784 amdgpu_bo_unreserve(adev->gfx.rlc.clear_state_obj);
3785 dev_warn(adev->dev, "(%d) pin RLC c bo failed\n", r);
3786 gfx_v7_0_rlc_fini(adev);
3790 r = amdgpu_bo_kmap(adev->gfx.rlc.clear_state_obj, (void **)&adev->gfx.rlc.cs_ptr);
3792 dev_warn(adev->dev, "(%d) map RLC c bo failed\n", r);
3793 gfx_v7_0_rlc_fini(adev);
3796 /* set up the cs buffer */
3797 dst_ptr = adev->gfx.rlc.cs_ptr;
3798 gfx_v7_0_get_csb_buffer(adev, dst_ptr);
3799 amdgpu_bo_kunmap(adev->gfx.rlc.clear_state_obj);
3800 amdgpu_bo_unreserve(adev->gfx.rlc.clear_state_obj);
3803 if (adev->gfx.rlc.cp_table_size) {
3804 if (adev->gfx.rlc.cp_table_obj == NULL) {
3805 r = amdgpu_bo_create(adev, adev->gfx.rlc.cp_table_size, PAGE_SIZE, true,
3806 AMDGPU_GEM_DOMAIN_VRAM,
3807 AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
3809 &adev->gfx.rlc.cp_table_obj);
3811 dev_warn(adev->dev, "(%d) create RLC cp table bo failed\n", r);
3812 gfx_v7_0_rlc_fini(adev);
3817 r = amdgpu_bo_reserve(adev->gfx.rlc.cp_table_obj, false);
3818 if (unlikely(r != 0)) {
3819 dev_warn(adev->dev, "(%d) reserve RLC cp table bo failed\n", r);
3820 gfx_v7_0_rlc_fini(adev);
3823 r = amdgpu_bo_pin(adev->gfx.rlc.cp_table_obj, AMDGPU_GEM_DOMAIN_VRAM,
3824 &adev->gfx.rlc.cp_table_gpu_addr);
3826 amdgpu_bo_unreserve(adev->gfx.rlc.cp_table_obj);
3827 dev_warn(adev->dev, "(%d) pin RLC cp_table bo failed\n", r);
3828 gfx_v7_0_rlc_fini(adev);
3831 r = amdgpu_bo_kmap(adev->gfx.rlc.cp_table_obj, (void **)&adev->gfx.rlc.cp_table_ptr);
3833 dev_warn(adev->dev, "(%d) map RLC cp table bo failed\n", r);
3834 gfx_v7_0_rlc_fini(adev);
3838 gfx_v7_0_init_cp_pg_table(adev);
3840 amdgpu_bo_kunmap(adev->gfx.rlc.cp_table_obj);
3841 amdgpu_bo_unreserve(adev->gfx.rlc.cp_table_obj);
3848 static void gfx_v7_0_enable_lbpw(struct amdgpu_device *adev, bool enable)
3852 tmp = RREG32(mmRLC_LB_CNTL);
3854 tmp |= RLC_LB_CNTL__LOAD_BALANCE_ENABLE_MASK;
3856 tmp &= ~RLC_LB_CNTL__LOAD_BALANCE_ENABLE_MASK;
3857 WREG32(mmRLC_LB_CNTL, tmp);
3860 static void gfx_v7_0_wait_for_rlc_serdes(struct amdgpu_device *adev)
3865 mutex_lock(&adev->grbm_idx_mutex);
3866 for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
3867 for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
3868 gfx_v7_0_select_se_sh(adev, i, j);
3869 for (k = 0; k < adev->usec_timeout; k++) {
3870 if (RREG32(mmRLC_SERDES_CU_MASTER_BUSY) == 0)
3876 gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff);
3877 mutex_unlock(&adev->grbm_idx_mutex);
3879 mask = RLC_SERDES_NONCU_MASTER_BUSY__SE_MASTER_BUSY_MASK |
3880 RLC_SERDES_NONCU_MASTER_BUSY__GC_MASTER_BUSY_MASK |
3881 RLC_SERDES_NONCU_MASTER_BUSY__TC0_MASTER_BUSY_MASK |
3882 RLC_SERDES_NONCU_MASTER_BUSY__TC1_MASTER_BUSY_MASK;
3883 for (k = 0; k < adev->usec_timeout; k++) {
3884 if ((RREG32(mmRLC_SERDES_NONCU_MASTER_BUSY) & mask) == 0)
3890 static void gfx_v7_0_update_rlc(struct amdgpu_device *adev, u32 rlc)
3894 tmp = RREG32(mmRLC_CNTL);
3896 WREG32(mmRLC_CNTL, rlc);
3899 static u32 gfx_v7_0_halt_rlc(struct amdgpu_device *adev)
3903 orig = data = RREG32(mmRLC_CNTL);
3905 if (data & RLC_CNTL__RLC_ENABLE_F32_MASK) {
3908 data &= ~RLC_CNTL__RLC_ENABLE_F32_MASK;
3909 WREG32(mmRLC_CNTL, data);
3911 for (i = 0; i < adev->usec_timeout; i++) {
3912 if ((RREG32(mmRLC_GPM_STAT) & RLC_GPM_STAT__RLC_BUSY_MASK) == 0)
3917 gfx_v7_0_wait_for_rlc_serdes(adev);
3923 void gfx_v7_0_enter_rlc_safe_mode(struct amdgpu_device *adev)
3927 tmp = 0x1 | (1 << 1);
3928 WREG32(mmRLC_GPR_REG2, tmp);
3930 mask = RLC_GPM_STAT__GFX_POWER_STATUS_MASK |
3931 RLC_GPM_STAT__GFX_CLOCK_STATUS_MASK;
3932 for (i = 0; i < adev->usec_timeout; i++) {
3933 if ((RREG32(mmRLC_GPM_STAT) & mask) == mask)
3938 for (i = 0; i < adev->usec_timeout; i++) {
3939 if ((RREG32(mmRLC_GPR_REG2) & 0x1) == 0)
3945 void gfx_v7_0_exit_rlc_safe_mode(struct amdgpu_device *adev)
3949 tmp = 0x1 | (0 << 1);
3950 WREG32(mmRLC_GPR_REG2, tmp);
3954 * gfx_v7_0_rlc_stop - stop the RLC ME
3956 * @adev: amdgpu_device pointer
3958 * Halt the RLC ME (MicroEngine) (CIK).
3960 void gfx_v7_0_rlc_stop(struct amdgpu_device *adev)
3962 WREG32(mmRLC_CNTL, 0);
3964 gfx_v7_0_enable_gui_idle_interrupt(adev, false);
3966 gfx_v7_0_wait_for_rlc_serdes(adev);
3970 * gfx_v7_0_rlc_start - start the RLC ME
3972 * @adev: amdgpu_device pointer
3974 * Unhalt the RLC ME (MicroEngine) (CIK).
3976 static void gfx_v7_0_rlc_start(struct amdgpu_device *adev)
3978 WREG32(mmRLC_CNTL, RLC_CNTL__RLC_ENABLE_F32_MASK);
3980 gfx_v7_0_enable_gui_idle_interrupt(adev, true);
3985 static void gfx_v7_0_rlc_reset(struct amdgpu_device *adev)
3987 u32 tmp = RREG32(mmGRBM_SOFT_RESET);
3989 tmp |= GRBM_SOFT_RESET__SOFT_RESET_RLC_MASK;
3990 WREG32(mmGRBM_SOFT_RESET, tmp);
3992 tmp &= ~GRBM_SOFT_RESET__SOFT_RESET_RLC_MASK;
3993 WREG32(mmGRBM_SOFT_RESET, tmp);
3998 * gfx_v7_0_rlc_resume - setup the RLC hw
4000 * @adev: amdgpu_device pointer
4002 * Initialize the RLC registers, load the ucode,
4003 * and start the RLC (CIK).
4004 * Returns 0 for success, -EINVAL if the ucode is not available.
4006 static int gfx_v7_0_rlc_resume(struct amdgpu_device *adev)
4008 const struct rlc_firmware_header_v1_0 *hdr;
4009 const __le32 *fw_data;
4010 unsigned i, fw_size;
4013 if (!adev->gfx.rlc_fw)
4016 hdr = (const struct rlc_firmware_header_v1_0 *)adev->gfx.rlc_fw->data;
4017 amdgpu_ucode_print_rlc_hdr(&hdr->header);
4018 adev->gfx.rlc_fw_version = le32_to_cpu(hdr->header.ucode_version);
4019 adev->gfx.rlc_feature_version = le32_to_cpu(
4020 hdr->ucode_feature_version);
4022 gfx_v7_0_rlc_stop(adev);
4025 tmp = RREG32(mmRLC_CGCG_CGLS_CTRL) & 0xfffffffc;
4026 WREG32(mmRLC_CGCG_CGLS_CTRL, tmp);
4028 gfx_v7_0_rlc_reset(adev);
4030 gfx_v7_0_init_pg(adev);
4032 WREG32(mmRLC_LB_CNTR_INIT, 0);
4033 WREG32(mmRLC_LB_CNTR_MAX, 0x00008000);
4035 mutex_lock(&adev->grbm_idx_mutex);
4036 gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff);
4037 WREG32(mmRLC_LB_INIT_CU_MASK, 0xffffffff);
4038 WREG32(mmRLC_LB_PARAMS, 0x00600408);
4039 WREG32(mmRLC_LB_CNTL, 0x80000004);
4040 mutex_unlock(&adev->grbm_idx_mutex);
4042 WREG32(mmRLC_MC_CNTL, 0);
4043 WREG32(mmRLC_UCODE_CNTL, 0);
4045 fw_data = (const __le32 *)
4046 (adev->gfx.rlc_fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes));
4047 fw_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4;
4048 WREG32(mmRLC_GPM_UCODE_ADDR, 0);
4049 for (i = 0; i < fw_size; i++)
4050 WREG32(mmRLC_GPM_UCODE_DATA, le32_to_cpup(fw_data++));
4051 WREG32(mmRLC_GPM_UCODE_ADDR, adev->gfx.rlc_fw_version);
4053 /* XXX - find out what chips support lbpw */
4054 gfx_v7_0_enable_lbpw(adev, false);
4056 if (adev->asic_type == CHIP_BONAIRE)
4057 WREG32(mmRLC_DRIVER_CPDMA_STATUS, 0);
4059 gfx_v7_0_rlc_start(adev);
4064 static void gfx_v7_0_enable_cgcg(struct amdgpu_device *adev, bool enable)
4066 u32 data, orig, tmp, tmp2;
4068 orig = data = RREG32(mmRLC_CGCG_CGLS_CTRL);
4070 if (enable && (adev->cg_flags & AMDGPU_CG_SUPPORT_GFX_CGCG)) {
4071 gfx_v7_0_enable_gui_idle_interrupt(adev, true);
4073 tmp = gfx_v7_0_halt_rlc(adev);
4075 mutex_lock(&adev->grbm_idx_mutex);
4076 gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff);
4077 WREG32(mmRLC_SERDES_WR_CU_MASTER_MASK, 0xffffffff);
4078 WREG32(mmRLC_SERDES_WR_NONCU_MASTER_MASK, 0xffffffff);
4079 tmp2 = RLC_SERDES_WR_CTRL__BPM_ADDR_MASK |
4080 RLC_SERDES_WR_CTRL__CGCG_OVERRIDE_0_MASK |
4081 RLC_SERDES_WR_CTRL__CGLS_ENABLE_MASK;
4082 WREG32(mmRLC_SERDES_WR_CTRL, tmp2);
4083 mutex_unlock(&adev->grbm_idx_mutex);
4085 gfx_v7_0_update_rlc(adev, tmp);
4087 data |= RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK | RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK;
4089 gfx_v7_0_enable_gui_idle_interrupt(adev, false);
4091 RREG32(mmCB_CGTT_SCLK_CTRL);
4092 RREG32(mmCB_CGTT_SCLK_CTRL);
4093 RREG32(mmCB_CGTT_SCLK_CTRL);
4094 RREG32(mmCB_CGTT_SCLK_CTRL);
4096 data &= ~(RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK | RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK);
4100 WREG32(mmRLC_CGCG_CGLS_CTRL, data);
4104 static void gfx_v7_0_enable_mgcg(struct amdgpu_device *adev, bool enable)
4106 u32 data, orig, tmp = 0;
4108 if (enable && (adev->cg_flags & AMDGPU_CG_SUPPORT_GFX_MGCG)) {
4109 if (adev->cg_flags & AMDGPU_CG_SUPPORT_GFX_MGLS) {
4110 if (adev->cg_flags & AMDGPU_CG_SUPPORT_GFX_CP_LS) {
4111 orig = data = RREG32(mmCP_MEM_SLP_CNTL);
4112 data |= CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK;
4114 WREG32(mmCP_MEM_SLP_CNTL, data);
4118 orig = data = RREG32(mmRLC_CGTT_MGCG_OVERRIDE);
4122 WREG32(mmRLC_CGTT_MGCG_OVERRIDE, data);
4124 tmp = gfx_v7_0_halt_rlc(adev);
4126 mutex_lock(&adev->grbm_idx_mutex);
4127 gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff);
4128 WREG32(mmRLC_SERDES_WR_CU_MASTER_MASK, 0xffffffff);
4129 WREG32(mmRLC_SERDES_WR_NONCU_MASTER_MASK, 0xffffffff);
4130 data = RLC_SERDES_WR_CTRL__BPM_ADDR_MASK |
4131 RLC_SERDES_WR_CTRL__MGCG_OVERRIDE_0_MASK;
4132 WREG32(mmRLC_SERDES_WR_CTRL, data);
4133 mutex_unlock(&adev->grbm_idx_mutex);
4135 gfx_v7_0_update_rlc(adev, tmp);
4137 if (adev->cg_flags & AMDGPU_CG_SUPPORT_GFX_CGTS) {
4138 orig = data = RREG32(mmCGTS_SM_CTRL_REG);
4139 data &= ~CGTS_SM_CTRL_REG__SM_MODE_MASK;
4140 data |= (0x2 << CGTS_SM_CTRL_REG__SM_MODE__SHIFT);
4141 data |= CGTS_SM_CTRL_REG__SM_MODE_ENABLE_MASK;
4142 data &= ~CGTS_SM_CTRL_REG__OVERRIDE_MASK;
4143 if ((adev->cg_flags & AMDGPU_CG_SUPPORT_GFX_MGLS) &&
4144 (adev->cg_flags & AMDGPU_CG_SUPPORT_GFX_CGTS_LS))
4145 data &= ~CGTS_SM_CTRL_REG__LS_OVERRIDE_MASK;
4146 data &= ~CGTS_SM_CTRL_REG__ON_MONITOR_ADD_MASK;
4147 data |= CGTS_SM_CTRL_REG__ON_MONITOR_ADD_EN_MASK;
4148 data |= (0x96 << CGTS_SM_CTRL_REG__ON_MONITOR_ADD__SHIFT);
4150 WREG32(mmCGTS_SM_CTRL_REG, data);
4153 orig = data = RREG32(mmRLC_CGTT_MGCG_OVERRIDE);
4156 WREG32(mmRLC_CGTT_MGCG_OVERRIDE, data);
4158 data = RREG32(mmRLC_MEM_SLP_CNTL);
4159 if (data & RLC_MEM_SLP_CNTL__RLC_MEM_LS_EN_MASK) {
4160 data &= ~RLC_MEM_SLP_CNTL__RLC_MEM_LS_EN_MASK;
4161 WREG32(mmRLC_MEM_SLP_CNTL, data);
4164 data = RREG32(mmCP_MEM_SLP_CNTL);
4165 if (data & CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK) {
4166 data &= ~CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK;
4167 WREG32(mmCP_MEM_SLP_CNTL, data);
4170 orig = data = RREG32(mmCGTS_SM_CTRL_REG);
4171 data |= CGTS_SM_CTRL_REG__OVERRIDE_MASK | CGTS_SM_CTRL_REG__LS_OVERRIDE_MASK;
4173 WREG32(mmCGTS_SM_CTRL_REG, data);
4175 tmp = gfx_v7_0_halt_rlc(adev);
4177 mutex_lock(&adev->grbm_idx_mutex);
4178 gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff);
4179 WREG32(mmRLC_SERDES_WR_CU_MASTER_MASK, 0xffffffff);
4180 WREG32(mmRLC_SERDES_WR_NONCU_MASTER_MASK, 0xffffffff);
4181 data = RLC_SERDES_WR_CTRL__BPM_ADDR_MASK | RLC_SERDES_WR_CTRL__MGCG_OVERRIDE_1_MASK;
4182 WREG32(mmRLC_SERDES_WR_CTRL, data);
4183 mutex_unlock(&adev->grbm_idx_mutex);
4185 gfx_v7_0_update_rlc(adev, tmp);
4189 static void gfx_v7_0_update_cg(struct amdgpu_device *adev,
4192 gfx_v7_0_enable_gui_idle_interrupt(adev, false);
4193 /* order matters! */
4195 gfx_v7_0_enable_mgcg(adev, true);
4196 gfx_v7_0_enable_cgcg(adev, true);
4198 gfx_v7_0_enable_cgcg(adev, false);
4199 gfx_v7_0_enable_mgcg(adev, false);
4201 gfx_v7_0_enable_gui_idle_interrupt(adev, true);
4204 static void gfx_v7_0_enable_sclk_slowdown_on_pu(struct amdgpu_device *adev,
4209 orig = data = RREG32(mmRLC_PG_CNTL);
4210 if (enable && (adev->pg_flags & AMDGPU_PG_SUPPORT_RLC_SMU_HS))
4211 data |= RLC_PG_CNTL__SMU_CLK_SLOWDOWN_ON_PU_ENABLE_MASK;
4213 data &= ~RLC_PG_CNTL__SMU_CLK_SLOWDOWN_ON_PU_ENABLE_MASK;
4215 WREG32(mmRLC_PG_CNTL, data);
4218 static void gfx_v7_0_enable_sclk_slowdown_on_pd(struct amdgpu_device *adev,
4223 orig = data = RREG32(mmRLC_PG_CNTL);
4224 if (enable && (adev->pg_flags & AMDGPU_PG_SUPPORT_RLC_SMU_HS))
4225 data |= RLC_PG_CNTL__SMU_CLK_SLOWDOWN_ON_PD_ENABLE_MASK;
4227 data &= ~RLC_PG_CNTL__SMU_CLK_SLOWDOWN_ON_PD_ENABLE_MASK;
4229 WREG32(mmRLC_PG_CNTL, data);
4232 static void gfx_v7_0_enable_cp_pg(struct amdgpu_device *adev, bool enable)
4236 orig = data = RREG32(mmRLC_PG_CNTL);
4237 if (enable && (adev->pg_flags & AMDGPU_PG_SUPPORT_CP))
4242 WREG32(mmRLC_PG_CNTL, data);
4245 static void gfx_v7_0_enable_gds_pg(struct amdgpu_device *adev, bool enable)
4249 orig = data = RREG32(mmRLC_PG_CNTL);
4250 if (enable && (adev->pg_flags & AMDGPU_PG_SUPPORT_GDS))
4255 WREG32(mmRLC_PG_CNTL, data);
4258 static void gfx_v7_0_init_cp_pg_table(struct amdgpu_device *adev)
4260 const __le32 *fw_data;
4261 volatile u32 *dst_ptr;
4262 int me, i, max_me = 4;
4264 u32 table_offset, table_size;
4266 if (adev->asic_type == CHIP_KAVERI)
4269 if (adev->gfx.rlc.cp_table_ptr == NULL)
4272 /* write the cp table buffer */
4273 dst_ptr = adev->gfx.rlc.cp_table_ptr;
4274 for (me = 0; me < max_me; me++) {
4276 const struct gfx_firmware_header_v1_0 *hdr =
4277 (const struct gfx_firmware_header_v1_0 *)adev->gfx.ce_fw->data;
4278 fw_data = (const __le32 *)
4279 (adev->gfx.ce_fw->data +
4280 le32_to_cpu(hdr->header.ucode_array_offset_bytes));
4281 table_offset = le32_to_cpu(hdr->jt_offset);
4282 table_size = le32_to_cpu(hdr->jt_size);
4283 } else if (me == 1) {
4284 const struct gfx_firmware_header_v1_0 *hdr =
4285 (const struct gfx_firmware_header_v1_0 *)adev->gfx.pfp_fw->data;
4286 fw_data = (const __le32 *)
4287 (adev->gfx.pfp_fw->data +
4288 le32_to_cpu(hdr->header.ucode_array_offset_bytes));
4289 table_offset = le32_to_cpu(hdr->jt_offset);
4290 table_size = le32_to_cpu(hdr->jt_size);
4291 } else if (me == 2) {
4292 const struct gfx_firmware_header_v1_0 *hdr =
4293 (const struct gfx_firmware_header_v1_0 *)adev->gfx.me_fw->data;
4294 fw_data = (const __le32 *)
4295 (adev->gfx.me_fw->data +
4296 le32_to_cpu(hdr->header.ucode_array_offset_bytes));
4297 table_offset = le32_to_cpu(hdr->jt_offset);
4298 table_size = le32_to_cpu(hdr->jt_size);
4299 } else if (me == 3) {
4300 const struct gfx_firmware_header_v1_0 *hdr =
4301 (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data;
4302 fw_data = (const __le32 *)
4303 (adev->gfx.mec_fw->data +
4304 le32_to_cpu(hdr->header.ucode_array_offset_bytes));
4305 table_offset = le32_to_cpu(hdr->jt_offset);
4306 table_size = le32_to_cpu(hdr->jt_size);
4308 const struct gfx_firmware_header_v1_0 *hdr =
4309 (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec2_fw->data;
4310 fw_data = (const __le32 *)
4311 (adev->gfx.mec2_fw->data +
4312 le32_to_cpu(hdr->header.ucode_array_offset_bytes));
4313 table_offset = le32_to_cpu(hdr->jt_offset);
4314 table_size = le32_to_cpu(hdr->jt_size);
4317 for (i = 0; i < table_size; i ++) {
4318 dst_ptr[bo_offset + i] =
4319 cpu_to_le32(le32_to_cpu(fw_data[table_offset + i]));
4322 bo_offset += table_size;
4326 static void gfx_v7_0_enable_gfx_cgpg(struct amdgpu_device *adev,
4331 if (enable && (adev->pg_flags & AMDGPU_PG_SUPPORT_GFX_PG)) {
4332 orig = data = RREG32(mmRLC_PG_CNTL);
4333 data |= RLC_PG_CNTL__GFX_POWER_GATING_ENABLE_MASK;
4335 WREG32(mmRLC_PG_CNTL, data);
4337 orig = data = RREG32(mmRLC_AUTO_PG_CTRL);
4338 data |= RLC_AUTO_PG_CTRL__AUTO_PG_EN_MASK;
4340 WREG32(mmRLC_AUTO_PG_CTRL, data);
4342 orig = data = RREG32(mmRLC_PG_CNTL);
4343 data &= ~RLC_PG_CNTL__GFX_POWER_GATING_ENABLE_MASK;
4345 WREG32(mmRLC_PG_CNTL, data);
4347 orig = data = RREG32(mmRLC_AUTO_PG_CTRL);
4348 data &= ~RLC_AUTO_PG_CTRL__AUTO_PG_EN_MASK;
4350 WREG32(mmRLC_AUTO_PG_CTRL, data);
4352 data = RREG32(mmDB_RENDER_CONTROL);
4356 static u32 gfx_v7_0_get_cu_active_bitmap(struct amdgpu_device *adev,
4359 u32 mask = 0, tmp, tmp1;
4362 gfx_v7_0_select_se_sh(adev, se, sh);
4363 tmp = RREG32(mmCC_GC_SHADER_ARRAY_CONFIG);
4364 tmp1 = RREG32(mmGC_USER_SHADER_ARRAY_CONFIG);
4365 gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff);
4372 for (i = 0; i < adev->gfx.config.max_cu_per_sh; i ++) {
4377 return (~tmp) & mask;
4380 static void gfx_v7_0_init_ao_cu_mask(struct amdgpu_device *adev)
4382 uint32_t tmp, active_cu_number;
4383 struct amdgpu_cu_info cu_info;
4385 gfx_v7_0_get_cu_info(adev, &cu_info);
4386 tmp = cu_info.ao_cu_mask;
4387 active_cu_number = cu_info.number;
4389 WREG32(mmRLC_PG_ALWAYS_ON_CU_MASK, tmp);
4391 tmp = RREG32(mmRLC_MAX_PG_CU);
4392 tmp &= ~RLC_MAX_PG_CU__MAX_POWERED_UP_CU_MASK;
4393 tmp |= (active_cu_number << RLC_MAX_PG_CU__MAX_POWERED_UP_CU__SHIFT);
4394 WREG32(mmRLC_MAX_PG_CU, tmp);
4397 static void gfx_v7_0_enable_gfx_static_mgpg(struct amdgpu_device *adev,
4402 orig = data = RREG32(mmRLC_PG_CNTL);
4403 if (enable && (adev->pg_flags & AMDGPU_PG_SUPPORT_GFX_SMG))
4404 data |= RLC_PG_CNTL__STATIC_PER_CU_PG_ENABLE_MASK;
4406 data &= ~RLC_PG_CNTL__STATIC_PER_CU_PG_ENABLE_MASK;
4408 WREG32(mmRLC_PG_CNTL, data);
4411 static void gfx_v7_0_enable_gfx_dynamic_mgpg(struct amdgpu_device *adev,
4416 orig = data = RREG32(mmRLC_PG_CNTL);
4417 if (enable && (adev->pg_flags & AMDGPU_PG_SUPPORT_GFX_DMG))
4418 data |= RLC_PG_CNTL__DYN_PER_CU_PG_ENABLE_MASK;
4420 data &= ~RLC_PG_CNTL__DYN_PER_CU_PG_ENABLE_MASK;
4422 WREG32(mmRLC_PG_CNTL, data);
4425 #define RLC_SAVE_AND_RESTORE_STARTING_OFFSET 0x90
4426 #define RLC_CLEAR_STATE_DESCRIPTOR_OFFSET 0x3D
4428 static void gfx_v7_0_init_gfx_cgpg(struct amdgpu_device *adev)
4433 if (adev->gfx.rlc.cs_data) {
4434 WREG32(mmRLC_GPM_SCRATCH_ADDR, RLC_CLEAR_STATE_DESCRIPTOR_OFFSET);
4435 WREG32(mmRLC_GPM_SCRATCH_DATA, upper_32_bits(adev->gfx.rlc.clear_state_gpu_addr));
4436 WREG32(mmRLC_GPM_SCRATCH_DATA, lower_32_bits(adev->gfx.rlc.clear_state_gpu_addr));
4437 WREG32(mmRLC_GPM_SCRATCH_DATA, adev->gfx.rlc.clear_state_size);
4439 WREG32(mmRLC_GPM_SCRATCH_ADDR, RLC_CLEAR_STATE_DESCRIPTOR_OFFSET);
4440 for (i = 0; i < 3; i++)
4441 WREG32(mmRLC_GPM_SCRATCH_DATA, 0);
4443 if (adev->gfx.rlc.reg_list) {
4444 WREG32(mmRLC_GPM_SCRATCH_ADDR, RLC_SAVE_AND_RESTORE_STARTING_OFFSET);
4445 for (i = 0; i < adev->gfx.rlc.reg_list_size; i++)
4446 WREG32(mmRLC_GPM_SCRATCH_DATA, adev->gfx.rlc.reg_list[i]);
4449 orig = data = RREG32(mmRLC_PG_CNTL);
4450 data |= RLC_PG_CNTL__GFX_POWER_GATING_SRC_MASK;
4452 WREG32(mmRLC_PG_CNTL, data);
4454 WREG32(mmRLC_SAVE_AND_RESTORE_BASE, adev->gfx.rlc.save_restore_gpu_addr >> 8);
4455 WREG32(mmRLC_JUMP_TABLE_RESTORE, adev->gfx.rlc.cp_table_gpu_addr >> 8);
4457 data = RREG32(mmCP_RB_WPTR_POLL_CNTL);
4458 data &= ~CP_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT_MASK;
4459 data |= (0x60 << CP_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT);
4460 WREG32(mmCP_RB_WPTR_POLL_CNTL, data);
4463 WREG32(mmRLC_PG_DELAY, data);
4465 data = RREG32(mmRLC_PG_DELAY_2);
4468 WREG32(mmRLC_PG_DELAY_2, data);
4470 data = RREG32(mmRLC_AUTO_PG_CTRL);
4471 data &= ~RLC_AUTO_PG_CTRL__GRBM_REG_SAVE_GFX_IDLE_THRESHOLD_MASK;
4472 data |= (0x700 << RLC_AUTO_PG_CTRL__GRBM_REG_SAVE_GFX_IDLE_THRESHOLD__SHIFT);
4473 WREG32(mmRLC_AUTO_PG_CTRL, data);
4477 static void gfx_v7_0_update_gfx_pg(struct amdgpu_device *adev, bool enable)
4479 gfx_v7_0_enable_gfx_cgpg(adev, enable);
4480 gfx_v7_0_enable_gfx_static_mgpg(adev, enable);
4481 gfx_v7_0_enable_gfx_dynamic_mgpg(adev, enable);
4484 static u32 gfx_v7_0_get_csb_size(struct amdgpu_device *adev)
4487 const struct cs_section_def *sect = NULL;
4488 const struct cs_extent_def *ext = NULL;
4490 if (adev->gfx.rlc.cs_data == NULL)
4493 /* begin clear state */
4495 /* context control state */
4498 for (sect = adev->gfx.rlc.cs_data; sect->section != NULL; ++sect) {
4499 for (ext = sect->section; ext->extent != NULL; ++ext) {
4500 if (sect->id == SECT_CONTEXT)
4501 count += 2 + ext->reg_count;
4506 /* pa_sc_raster_config/pa_sc_raster_config1 */
4508 /* end clear state */
4516 static void gfx_v7_0_get_csb_buffer(struct amdgpu_device *adev,
4517 volatile u32 *buffer)
4520 const struct cs_section_def *sect = NULL;
4521 const struct cs_extent_def *ext = NULL;
4523 if (adev->gfx.rlc.cs_data == NULL)
4528 buffer[count++] = cpu_to_le32(PACKET3(PACKET3_PREAMBLE_CNTL, 0));
4529 buffer[count++] = cpu_to_le32(PACKET3_PREAMBLE_BEGIN_CLEAR_STATE);
4531 buffer[count++] = cpu_to_le32(PACKET3(PACKET3_CONTEXT_CONTROL, 1));
4532 buffer[count++] = cpu_to_le32(0x80000000);
4533 buffer[count++] = cpu_to_le32(0x80000000);
4535 for (sect = adev->gfx.rlc.cs_data; sect->section != NULL; ++sect) {
4536 for (ext = sect->section; ext->extent != NULL; ++ext) {
4537 if (sect->id == SECT_CONTEXT) {
4539 cpu_to_le32(PACKET3(PACKET3_SET_CONTEXT_REG, ext->reg_count));
4540 buffer[count++] = cpu_to_le32(ext->reg_index - PACKET3_SET_CONTEXT_REG_START);
4541 for (i = 0; i < ext->reg_count; i++)
4542 buffer[count++] = cpu_to_le32(ext->extent[i]);
4549 buffer[count++] = cpu_to_le32(PACKET3(PACKET3_SET_CONTEXT_REG, 2));
4550 buffer[count++] = cpu_to_le32(mmPA_SC_RASTER_CONFIG - PACKET3_SET_CONTEXT_REG_START);
4551 switch (adev->asic_type) {
4553 buffer[count++] = cpu_to_le32(0x16000012);
4554 buffer[count++] = cpu_to_le32(0x00000000);
4557 buffer[count++] = cpu_to_le32(0x00000000); /* XXX */
4558 buffer[count++] = cpu_to_le32(0x00000000);
4562 buffer[count++] = cpu_to_le32(0x00000000); /* XXX */
4563 buffer[count++] = cpu_to_le32(0x00000000);
4566 buffer[count++] = cpu_to_le32(0x3a00161a);
4567 buffer[count++] = cpu_to_le32(0x0000002e);
4570 buffer[count++] = cpu_to_le32(0x00000000);
4571 buffer[count++] = cpu_to_le32(0x00000000);
4575 buffer[count++] = cpu_to_le32(PACKET3(PACKET3_PREAMBLE_CNTL, 0));
4576 buffer[count++] = cpu_to_le32(PACKET3_PREAMBLE_END_CLEAR_STATE);
4578 buffer[count++] = cpu_to_le32(PACKET3(PACKET3_CLEAR_STATE, 0));
4579 buffer[count++] = cpu_to_le32(0);
4582 static void gfx_v7_0_init_pg(struct amdgpu_device *adev)
4584 if (adev->pg_flags & (AMDGPU_PG_SUPPORT_GFX_PG |
4585 AMDGPU_PG_SUPPORT_GFX_SMG |
4586 AMDGPU_PG_SUPPORT_GFX_DMG |
4587 AMDGPU_PG_SUPPORT_CP |
4588 AMDGPU_PG_SUPPORT_GDS |
4589 AMDGPU_PG_SUPPORT_RLC_SMU_HS)) {
4590 gfx_v7_0_enable_sclk_slowdown_on_pu(adev, true);
4591 gfx_v7_0_enable_sclk_slowdown_on_pd(adev, true);
4592 if (adev->pg_flags & AMDGPU_PG_SUPPORT_GFX_PG) {
4593 gfx_v7_0_init_gfx_cgpg(adev);
4594 gfx_v7_0_enable_cp_pg(adev, true);
4595 gfx_v7_0_enable_gds_pg(adev, true);
4597 gfx_v7_0_init_ao_cu_mask(adev);
4598 gfx_v7_0_update_gfx_pg(adev, true);
4602 static void gfx_v7_0_fini_pg(struct amdgpu_device *adev)
4604 if (adev->pg_flags & (AMDGPU_PG_SUPPORT_GFX_PG |
4605 AMDGPU_PG_SUPPORT_GFX_SMG |
4606 AMDGPU_PG_SUPPORT_GFX_DMG |
4607 AMDGPU_PG_SUPPORT_CP |
4608 AMDGPU_PG_SUPPORT_GDS |
4609 AMDGPU_PG_SUPPORT_RLC_SMU_HS)) {
4610 gfx_v7_0_update_gfx_pg(adev, false);
4611 if (adev->pg_flags & AMDGPU_PG_SUPPORT_GFX_PG) {
4612 gfx_v7_0_enable_cp_pg(adev, false);
4613 gfx_v7_0_enable_gds_pg(adev, false);
4619 * gfx_v7_0_get_gpu_clock_counter - return GPU clock counter snapshot
4621 * @adev: amdgpu_device pointer
4623 * Fetches a GPU clock counter snapshot (SI).
4624 * Returns the 64 bit clock counter snapshot.
4626 uint64_t gfx_v7_0_get_gpu_clock_counter(struct amdgpu_device *adev)
4630 mutex_lock(&adev->gfx.gpu_clock_mutex);
4631 WREG32(mmRLC_CAPTURE_GPU_CLOCK_COUNT, 1);
4632 clock = (uint64_t)RREG32(mmRLC_GPU_CLOCK_COUNT_LSB) |
4633 ((uint64_t)RREG32(mmRLC_GPU_CLOCK_COUNT_MSB) << 32ULL);
4634 mutex_unlock(&adev->gfx.gpu_clock_mutex);
4638 static void gfx_v7_0_ring_emit_gds_switch(struct amdgpu_ring *ring,
4640 uint32_t gds_base, uint32_t gds_size,
4641 uint32_t gws_base, uint32_t gws_size,
4642 uint32_t oa_base, uint32_t oa_size)
4644 gds_base = gds_base >> AMDGPU_GDS_SHIFT;
4645 gds_size = gds_size >> AMDGPU_GDS_SHIFT;
4647 gws_base = gws_base >> AMDGPU_GWS_SHIFT;
4648 gws_size = gws_size >> AMDGPU_GWS_SHIFT;
4650 oa_base = oa_base >> AMDGPU_OA_SHIFT;
4651 oa_size = oa_size >> AMDGPU_OA_SHIFT;
4654 amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
4655 amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
4656 WRITE_DATA_DST_SEL(0)));
4657 amdgpu_ring_write(ring, amdgpu_gds_reg_offset[vmid].mem_base);
4658 amdgpu_ring_write(ring, 0);
4659 amdgpu_ring_write(ring, gds_base);
4662 amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
4663 amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
4664 WRITE_DATA_DST_SEL(0)));
4665 amdgpu_ring_write(ring, amdgpu_gds_reg_offset[vmid].mem_size);
4666 amdgpu_ring_write(ring, 0);
4667 amdgpu_ring_write(ring, gds_size);
4670 amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
4671 amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
4672 WRITE_DATA_DST_SEL(0)));
4673 amdgpu_ring_write(ring, amdgpu_gds_reg_offset[vmid].gws);
4674 amdgpu_ring_write(ring, 0);
4675 amdgpu_ring_write(ring, gws_size << GDS_GWS_VMID0__SIZE__SHIFT | gws_base);
4678 amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
4679 amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
4680 WRITE_DATA_DST_SEL(0)));
4681 amdgpu_ring_write(ring, amdgpu_gds_reg_offset[vmid].oa);
4682 amdgpu_ring_write(ring, 0);
4683 amdgpu_ring_write(ring, (1 << (oa_size + oa_base)) - (1 << oa_base));
4686 static int gfx_v7_0_early_init(void *handle)
4688 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4690 adev->gfx.num_gfx_rings = GFX7_NUM_GFX_RINGS;
4691 adev->gfx.num_compute_rings = GFX7_NUM_COMPUTE_RINGS;
4692 gfx_v7_0_set_ring_funcs(adev);
4693 gfx_v7_0_set_irq_funcs(adev);
4694 gfx_v7_0_set_gds_init(adev);
4699 static int gfx_v7_0_sw_init(void *handle)
4701 struct amdgpu_ring *ring;
4702 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4706 r = amdgpu_irq_add_id(adev, 181, &adev->gfx.eop_irq);
4710 /* Privileged reg */
4711 r = amdgpu_irq_add_id(adev, 184, &adev->gfx.priv_reg_irq);
4715 /* Privileged inst */
4716 r = amdgpu_irq_add_id(adev, 185, &adev->gfx.priv_inst_irq);
4720 gfx_v7_0_scratch_init(adev);
4722 r = gfx_v7_0_init_microcode(adev);
4724 DRM_ERROR("Failed to load gfx firmware!\n");
4728 r = gfx_v7_0_rlc_init(adev);
4730 DRM_ERROR("Failed to init rlc BOs!\n");
4734 /* allocate mec buffers */
4735 r = gfx_v7_0_mec_init(adev);
4737 DRM_ERROR("Failed to init MEC BOs!\n");
4741 for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
4742 ring = &adev->gfx.gfx_ring[i];
4743 ring->ring_obj = NULL;
4744 sprintf(ring->name, "gfx");
4745 r = amdgpu_ring_init(adev, ring, 1024 * 1024,
4746 PACKET3(PACKET3_NOP, 0x3FFF), 0xf,
4747 &adev->gfx.eop_irq, AMDGPU_CP_IRQ_GFX_EOP,
4748 AMDGPU_RING_TYPE_GFX);
4753 /* set up the compute queues */
4754 for (i = 0; i < adev->gfx.num_compute_rings; i++) {
4757 /* max 32 queues per MEC */
4758 if ((i >= 32) || (i >= AMDGPU_MAX_COMPUTE_RINGS)) {
4759 DRM_ERROR("Too many (%d) compute rings!\n", i);
4762 ring = &adev->gfx.compute_ring[i];
4763 ring->ring_obj = NULL;
4764 ring->use_doorbell = true;
4765 ring->doorbell_index = AMDGPU_DOORBELL_MEC_RING0 + i;
4766 ring->me = 1; /* first MEC */
4768 ring->queue = i % 8;
4769 sprintf(ring->name, "comp %d.%d.%d", ring->me, ring->pipe, ring->queue);
4770 irq_type = AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP + ring->pipe;
4771 /* type-2 packets are deprecated on MEC, use type-3 instead */
4772 r = amdgpu_ring_init(adev, ring, 1024 * 1024,
4773 PACKET3(PACKET3_NOP, 0x3FFF), 0xf,
4774 &adev->gfx.eop_irq, irq_type,
4775 AMDGPU_RING_TYPE_COMPUTE);
4780 /* reserve GDS, GWS and OA resource for gfx */
4781 r = amdgpu_bo_create(adev, adev->gds.mem.gfx_partition_size,
4783 AMDGPU_GEM_DOMAIN_GDS, 0,
4784 NULL, NULL, &adev->gds.gds_gfx_bo);
4788 r = amdgpu_bo_create(adev, adev->gds.gws.gfx_partition_size,
4790 AMDGPU_GEM_DOMAIN_GWS, 0,
4791 NULL, NULL, &adev->gds.gws_gfx_bo);
4795 r = amdgpu_bo_create(adev, adev->gds.oa.gfx_partition_size,
4797 AMDGPU_GEM_DOMAIN_OA, 0,
4798 NULL, NULL, &adev->gds.oa_gfx_bo);
4805 static int gfx_v7_0_sw_fini(void *handle)
4808 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4810 amdgpu_bo_unref(&adev->gds.oa_gfx_bo);
4811 amdgpu_bo_unref(&adev->gds.gws_gfx_bo);
4812 amdgpu_bo_unref(&adev->gds.gds_gfx_bo);
4814 for (i = 0; i < adev->gfx.num_gfx_rings; i++)
4815 amdgpu_ring_fini(&adev->gfx.gfx_ring[i]);
4816 for (i = 0; i < adev->gfx.num_compute_rings; i++)
4817 amdgpu_ring_fini(&adev->gfx.compute_ring[i]);
4819 gfx_v7_0_cp_compute_fini(adev);
4820 gfx_v7_0_rlc_fini(adev);
4821 gfx_v7_0_mec_fini(adev);
4826 static int gfx_v7_0_hw_init(void *handle)
4829 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4831 gfx_v7_0_gpu_init(adev);
4834 r = gfx_v7_0_rlc_resume(adev);
4838 r = gfx_v7_0_cp_resume(adev);
4842 adev->gfx.ce_ram_size = 0x8000;
4847 static int gfx_v7_0_hw_fini(void *handle)
4849 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4851 gfx_v7_0_cp_enable(adev, false);
4852 gfx_v7_0_rlc_stop(adev);
4853 gfx_v7_0_fini_pg(adev);
4858 static int gfx_v7_0_suspend(void *handle)
4860 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4862 return gfx_v7_0_hw_fini(adev);
4865 static int gfx_v7_0_resume(void *handle)
4867 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4869 return gfx_v7_0_hw_init(adev);
4872 static bool gfx_v7_0_is_idle(void *handle)
4874 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4876 if (RREG32(mmGRBM_STATUS) & GRBM_STATUS__GUI_ACTIVE_MASK)
4882 static int gfx_v7_0_wait_for_idle(void *handle)
4886 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4888 for (i = 0; i < adev->usec_timeout; i++) {
4889 /* read MC_STATUS */
4890 tmp = RREG32(mmGRBM_STATUS) & GRBM_STATUS__GUI_ACTIVE_MASK;
4899 static void gfx_v7_0_print_status(void *handle)
4902 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4904 dev_info(adev->dev, "GFX 7.x registers\n");
4905 dev_info(adev->dev, " GRBM_STATUS=0x%08X\n",
4906 RREG32(mmGRBM_STATUS));
4907 dev_info(adev->dev, " GRBM_STATUS2=0x%08X\n",
4908 RREG32(mmGRBM_STATUS2));
4909 dev_info(adev->dev, " GRBM_STATUS_SE0=0x%08X\n",
4910 RREG32(mmGRBM_STATUS_SE0));
4911 dev_info(adev->dev, " GRBM_STATUS_SE1=0x%08X\n",
4912 RREG32(mmGRBM_STATUS_SE1));
4913 dev_info(adev->dev, " GRBM_STATUS_SE2=0x%08X\n",
4914 RREG32(mmGRBM_STATUS_SE2));
4915 dev_info(adev->dev, " GRBM_STATUS_SE3=0x%08X\n",
4916 RREG32(mmGRBM_STATUS_SE3));
4917 dev_info(adev->dev, " CP_STAT = 0x%08x\n", RREG32(mmCP_STAT));
4918 dev_info(adev->dev, " CP_STALLED_STAT1 = 0x%08x\n",
4919 RREG32(mmCP_STALLED_STAT1));
4920 dev_info(adev->dev, " CP_STALLED_STAT2 = 0x%08x\n",
4921 RREG32(mmCP_STALLED_STAT2));
4922 dev_info(adev->dev, " CP_STALLED_STAT3 = 0x%08x\n",
4923 RREG32(mmCP_STALLED_STAT3));
4924 dev_info(adev->dev, " CP_CPF_BUSY_STAT = 0x%08x\n",
4925 RREG32(mmCP_CPF_BUSY_STAT));
4926 dev_info(adev->dev, " CP_CPF_STALLED_STAT1 = 0x%08x\n",
4927 RREG32(mmCP_CPF_STALLED_STAT1));
4928 dev_info(adev->dev, " CP_CPF_STATUS = 0x%08x\n", RREG32(mmCP_CPF_STATUS));
4929 dev_info(adev->dev, " CP_CPC_BUSY_STAT = 0x%08x\n", RREG32(mmCP_CPC_BUSY_STAT));
4930 dev_info(adev->dev, " CP_CPC_STALLED_STAT1 = 0x%08x\n",
4931 RREG32(mmCP_CPC_STALLED_STAT1));
4932 dev_info(adev->dev, " CP_CPC_STATUS = 0x%08x\n", RREG32(mmCP_CPC_STATUS));
4934 for (i = 0; i < 32; i++) {
4935 dev_info(adev->dev, " GB_TILE_MODE%d=0x%08X\n",
4936 i, RREG32(mmGB_TILE_MODE0 + (i * 4)));
4938 for (i = 0; i < 16; i++) {
4939 dev_info(adev->dev, " GB_MACROTILE_MODE%d=0x%08X\n",
4940 i, RREG32(mmGB_MACROTILE_MODE0 + (i * 4)));
4942 for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
4943 dev_info(adev->dev, " se: %d\n", i);
4944 gfx_v7_0_select_se_sh(adev, i, 0xffffffff);
4945 dev_info(adev->dev, " PA_SC_RASTER_CONFIG=0x%08X\n",
4946 RREG32(mmPA_SC_RASTER_CONFIG));
4947 dev_info(adev->dev, " PA_SC_RASTER_CONFIG_1=0x%08X\n",
4948 RREG32(mmPA_SC_RASTER_CONFIG_1));
4950 gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff);
4952 dev_info(adev->dev, " GB_ADDR_CONFIG=0x%08X\n",
4953 RREG32(mmGB_ADDR_CONFIG));
4954 dev_info(adev->dev, " HDP_ADDR_CONFIG=0x%08X\n",
4955 RREG32(mmHDP_ADDR_CONFIG));
4956 dev_info(adev->dev, " DMIF_ADDR_CALC=0x%08X\n",
4957 RREG32(mmDMIF_ADDR_CALC));
4958 dev_info(adev->dev, " SDMA0_TILING_CONFIG=0x%08X\n",
4959 RREG32(mmSDMA0_TILING_CONFIG + SDMA0_REGISTER_OFFSET));
4960 dev_info(adev->dev, " SDMA1_TILING_CONFIG=0x%08X\n",
4961 RREG32(mmSDMA0_TILING_CONFIG + SDMA1_REGISTER_OFFSET));
4962 dev_info(adev->dev, " UVD_UDEC_ADDR_CONFIG=0x%08X\n",
4963 RREG32(mmUVD_UDEC_ADDR_CONFIG));
4964 dev_info(adev->dev, " UVD_UDEC_DB_ADDR_CONFIG=0x%08X\n",
4965 RREG32(mmUVD_UDEC_DB_ADDR_CONFIG));
4966 dev_info(adev->dev, " UVD_UDEC_DBW_ADDR_CONFIG=0x%08X\n",
4967 RREG32(mmUVD_UDEC_DBW_ADDR_CONFIG));
4969 dev_info(adev->dev, " CP_MEQ_THRESHOLDS=0x%08X\n",
4970 RREG32(mmCP_MEQ_THRESHOLDS));
4971 dev_info(adev->dev, " SX_DEBUG_1=0x%08X\n",
4972 RREG32(mmSX_DEBUG_1));
4973 dev_info(adev->dev, " TA_CNTL_AUX=0x%08X\n",
4974 RREG32(mmTA_CNTL_AUX));
4975 dev_info(adev->dev, " SPI_CONFIG_CNTL=0x%08X\n",
4976 RREG32(mmSPI_CONFIG_CNTL));
4977 dev_info(adev->dev, " SQ_CONFIG=0x%08X\n",
4978 RREG32(mmSQ_CONFIG));
4979 dev_info(adev->dev, " DB_DEBUG=0x%08X\n",
4980 RREG32(mmDB_DEBUG));
4981 dev_info(adev->dev, " DB_DEBUG2=0x%08X\n",
4982 RREG32(mmDB_DEBUG2));
4983 dev_info(adev->dev, " DB_DEBUG3=0x%08X\n",
4984 RREG32(mmDB_DEBUG3));
4985 dev_info(adev->dev, " CB_HW_CONTROL=0x%08X\n",
4986 RREG32(mmCB_HW_CONTROL));
4987 dev_info(adev->dev, " SPI_CONFIG_CNTL_1=0x%08X\n",
4988 RREG32(mmSPI_CONFIG_CNTL_1));
4989 dev_info(adev->dev, " PA_SC_FIFO_SIZE=0x%08X\n",
4990 RREG32(mmPA_SC_FIFO_SIZE));
4991 dev_info(adev->dev, " VGT_NUM_INSTANCES=0x%08X\n",
4992 RREG32(mmVGT_NUM_INSTANCES));
4993 dev_info(adev->dev, " CP_PERFMON_CNTL=0x%08X\n",
4994 RREG32(mmCP_PERFMON_CNTL));
4995 dev_info(adev->dev, " PA_SC_FORCE_EOV_MAX_CNTS=0x%08X\n",
4996 RREG32(mmPA_SC_FORCE_EOV_MAX_CNTS));
4997 dev_info(adev->dev, " VGT_CACHE_INVALIDATION=0x%08X\n",
4998 RREG32(mmVGT_CACHE_INVALIDATION));
4999 dev_info(adev->dev, " VGT_GS_VERTEX_REUSE=0x%08X\n",
5000 RREG32(mmVGT_GS_VERTEX_REUSE));
5001 dev_info(adev->dev, " PA_SC_LINE_STIPPLE_STATE=0x%08X\n",
5002 RREG32(mmPA_SC_LINE_STIPPLE_STATE));
5003 dev_info(adev->dev, " PA_CL_ENHANCE=0x%08X\n",
5004 RREG32(mmPA_CL_ENHANCE));
5005 dev_info(adev->dev, " PA_SC_ENHANCE=0x%08X\n",
5006 RREG32(mmPA_SC_ENHANCE));
5008 dev_info(adev->dev, " CP_ME_CNTL=0x%08X\n",
5009 RREG32(mmCP_ME_CNTL));
5010 dev_info(adev->dev, " CP_MAX_CONTEXT=0x%08X\n",
5011 RREG32(mmCP_MAX_CONTEXT));
5012 dev_info(adev->dev, " CP_ENDIAN_SWAP=0x%08X\n",
5013 RREG32(mmCP_ENDIAN_SWAP));
5014 dev_info(adev->dev, " CP_DEVICE_ID=0x%08X\n",
5015 RREG32(mmCP_DEVICE_ID));
5017 dev_info(adev->dev, " CP_SEM_WAIT_TIMER=0x%08X\n",
5018 RREG32(mmCP_SEM_WAIT_TIMER));
5019 if (adev->asic_type != CHIP_HAWAII)
5020 dev_info(adev->dev, " CP_SEM_INCOMPLETE_TIMER_CNTL=0x%08X\n",
5021 RREG32(mmCP_SEM_INCOMPLETE_TIMER_CNTL));
5023 dev_info(adev->dev, " CP_RB_WPTR_DELAY=0x%08X\n",
5024 RREG32(mmCP_RB_WPTR_DELAY));
5025 dev_info(adev->dev, " CP_RB_VMID=0x%08X\n",
5026 RREG32(mmCP_RB_VMID));
5027 dev_info(adev->dev, " CP_RB0_CNTL=0x%08X\n",
5028 RREG32(mmCP_RB0_CNTL));
5029 dev_info(adev->dev, " CP_RB0_WPTR=0x%08X\n",
5030 RREG32(mmCP_RB0_WPTR));
5031 dev_info(adev->dev, " CP_RB0_RPTR_ADDR=0x%08X\n",
5032 RREG32(mmCP_RB0_RPTR_ADDR));
5033 dev_info(adev->dev, " CP_RB0_RPTR_ADDR_HI=0x%08X\n",
5034 RREG32(mmCP_RB0_RPTR_ADDR_HI));
5035 dev_info(adev->dev, " CP_RB0_CNTL=0x%08X\n",
5036 RREG32(mmCP_RB0_CNTL));
5037 dev_info(adev->dev, " CP_RB0_BASE=0x%08X\n",
5038 RREG32(mmCP_RB0_BASE));
5039 dev_info(adev->dev, " CP_RB0_BASE_HI=0x%08X\n",
5040 RREG32(mmCP_RB0_BASE_HI));
5041 dev_info(adev->dev, " CP_MEC_CNTL=0x%08X\n",
5042 RREG32(mmCP_MEC_CNTL));
5043 dev_info(adev->dev, " CP_CPF_DEBUG=0x%08X\n",
5044 RREG32(mmCP_CPF_DEBUG));
5046 dev_info(adev->dev, " SCRATCH_ADDR=0x%08X\n",
5047 RREG32(mmSCRATCH_ADDR));
5048 dev_info(adev->dev, " SCRATCH_UMSK=0x%08X\n",
5049 RREG32(mmSCRATCH_UMSK));
5051 /* init the pipes */
5052 mutex_lock(&adev->srbm_mutex);
5053 for (i = 0; i < (adev->gfx.mec.num_pipe * adev->gfx.mec.num_mec); i++) {
5054 int me = (i < 4) ? 1 : 2;
5055 int pipe = (i < 4) ? i : (i - 4);
5058 dev_info(adev->dev, " me: %d, pipe: %d\n", me, pipe);
5059 cik_srbm_select(adev, me, pipe, 0, 0);
5060 dev_info(adev->dev, " CP_HPD_EOP_BASE_ADDR=0x%08X\n",
5061 RREG32(mmCP_HPD_EOP_BASE_ADDR));
5062 dev_info(adev->dev, " CP_HPD_EOP_BASE_ADDR_HI=0x%08X\n",
5063 RREG32(mmCP_HPD_EOP_BASE_ADDR_HI));
5064 dev_info(adev->dev, " CP_HPD_EOP_VMID=0x%08X\n",
5065 RREG32(mmCP_HPD_EOP_VMID));
5066 dev_info(adev->dev, " CP_HPD_EOP_CONTROL=0x%08X\n",
5067 RREG32(mmCP_HPD_EOP_CONTROL));
5069 for (queue = 0; queue < 8; queue++) {
5070 cik_srbm_select(adev, me, pipe, queue, 0);
5071 dev_info(adev->dev, " queue: %d\n", queue);
5072 dev_info(adev->dev, " CP_PQ_WPTR_POLL_CNTL=0x%08X\n",
5073 RREG32(mmCP_PQ_WPTR_POLL_CNTL));
5074 dev_info(adev->dev, " CP_HQD_PQ_DOORBELL_CONTROL=0x%08X\n",
5075 RREG32(mmCP_HQD_PQ_DOORBELL_CONTROL));
5076 dev_info(adev->dev, " CP_HQD_ACTIVE=0x%08X\n",
5077 RREG32(mmCP_HQD_ACTIVE));
5078 dev_info(adev->dev, " CP_HQD_DEQUEUE_REQUEST=0x%08X\n",
5079 RREG32(mmCP_HQD_DEQUEUE_REQUEST));
5080 dev_info(adev->dev, " CP_HQD_PQ_RPTR=0x%08X\n",
5081 RREG32(mmCP_HQD_PQ_RPTR));
5082 dev_info(adev->dev, " CP_HQD_PQ_WPTR=0x%08X\n",
5083 RREG32(mmCP_HQD_PQ_WPTR));
5084 dev_info(adev->dev, " CP_HQD_PQ_BASE=0x%08X\n",
5085 RREG32(mmCP_HQD_PQ_BASE));
5086 dev_info(adev->dev, " CP_HQD_PQ_BASE_HI=0x%08X\n",
5087 RREG32(mmCP_HQD_PQ_BASE_HI));
5088 dev_info(adev->dev, " CP_HQD_PQ_CONTROL=0x%08X\n",
5089 RREG32(mmCP_HQD_PQ_CONTROL));
5090 dev_info(adev->dev, " CP_HQD_PQ_WPTR_POLL_ADDR=0x%08X\n",
5091 RREG32(mmCP_HQD_PQ_WPTR_POLL_ADDR));
5092 dev_info(adev->dev, " CP_HQD_PQ_WPTR_POLL_ADDR_HI=0x%08X\n",
5093 RREG32(mmCP_HQD_PQ_WPTR_POLL_ADDR_HI));
5094 dev_info(adev->dev, " CP_HQD_PQ_RPTR_REPORT_ADDR=0x%08X\n",
5095 RREG32(mmCP_HQD_PQ_RPTR_REPORT_ADDR));
5096 dev_info(adev->dev, " CP_HQD_PQ_RPTR_REPORT_ADDR_HI=0x%08X\n",
5097 RREG32(mmCP_HQD_PQ_RPTR_REPORT_ADDR_HI));
5098 dev_info(adev->dev, " CP_HQD_PQ_DOORBELL_CONTROL=0x%08X\n",
5099 RREG32(mmCP_HQD_PQ_DOORBELL_CONTROL));
5100 dev_info(adev->dev, " CP_HQD_PQ_WPTR=0x%08X\n",
5101 RREG32(mmCP_HQD_PQ_WPTR));
5102 dev_info(adev->dev, " CP_HQD_VMID=0x%08X\n",
5103 RREG32(mmCP_HQD_VMID));
5104 dev_info(adev->dev, " CP_MQD_BASE_ADDR=0x%08X\n",
5105 RREG32(mmCP_MQD_BASE_ADDR));
5106 dev_info(adev->dev, " CP_MQD_BASE_ADDR_HI=0x%08X\n",
5107 RREG32(mmCP_MQD_BASE_ADDR_HI));
5108 dev_info(adev->dev, " CP_MQD_CONTROL=0x%08X\n",
5109 RREG32(mmCP_MQD_CONTROL));
5112 cik_srbm_select(adev, 0, 0, 0, 0);
5113 mutex_unlock(&adev->srbm_mutex);
5115 dev_info(adev->dev, " CP_INT_CNTL_RING0=0x%08X\n",
5116 RREG32(mmCP_INT_CNTL_RING0));
5117 dev_info(adev->dev, " RLC_LB_CNTL=0x%08X\n",
5118 RREG32(mmRLC_LB_CNTL));
5119 dev_info(adev->dev, " RLC_CNTL=0x%08X\n",
5120 RREG32(mmRLC_CNTL));
5121 dev_info(adev->dev, " RLC_CGCG_CGLS_CTRL=0x%08X\n",
5122 RREG32(mmRLC_CGCG_CGLS_CTRL));
5123 dev_info(adev->dev, " RLC_LB_CNTR_INIT=0x%08X\n",
5124 RREG32(mmRLC_LB_CNTR_INIT));
5125 dev_info(adev->dev, " RLC_LB_CNTR_MAX=0x%08X\n",
5126 RREG32(mmRLC_LB_CNTR_MAX));
5127 dev_info(adev->dev, " RLC_LB_INIT_CU_MASK=0x%08X\n",
5128 RREG32(mmRLC_LB_INIT_CU_MASK));
5129 dev_info(adev->dev, " RLC_LB_PARAMS=0x%08X\n",
5130 RREG32(mmRLC_LB_PARAMS));
5131 dev_info(adev->dev, " RLC_LB_CNTL=0x%08X\n",
5132 RREG32(mmRLC_LB_CNTL));
5133 dev_info(adev->dev, " RLC_MC_CNTL=0x%08X\n",
5134 RREG32(mmRLC_MC_CNTL));
5135 dev_info(adev->dev, " RLC_UCODE_CNTL=0x%08X\n",
5136 RREG32(mmRLC_UCODE_CNTL));
5138 if (adev->asic_type == CHIP_BONAIRE)
5139 dev_info(adev->dev, " RLC_DRIVER_CPDMA_STATUS=0x%08X\n",
5140 RREG32(mmRLC_DRIVER_CPDMA_STATUS));
5142 mutex_lock(&adev->srbm_mutex);
5143 for (i = 0; i < 16; i++) {
5144 cik_srbm_select(adev, 0, 0, 0, i);
5145 dev_info(adev->dev, " VM %d:\n", i);
5146 dev_info(adev->dev, " SH_MEM_CONFIG=0x%08X\n",
5147 RREG32(mmSH_MEM_CONFIG));
5148 dev_info(adev->dev, " SH_MEM_APE1_BASE=0x%08X\n",
5149 RREG32(mmSH_MEM_APE1_BASE));
5150 dev_info(adev->dev, " SH_MEM_APE1_LIMIT=0x%08X\n",
5151 RREG32(mmSH_MEM_APE1_LIMIT));
5152 dev_info(adev->dev, " SH_MEM_BASES=0x%08X\n",
5153 RREG32(mmSH_MEM_BASES));
5155 cik_srbm_select(adev, 0, 0, 0, 0);
5156 mutex_unlock(&adev->srbm_mutex);
5159 static int gfx_v7_0_soft_reset(void *handle)
5161 u32 grbm_soft_reset = 0, srbm_soft_reset = 0;
5163 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
5166 tmp = RREG32(mmGRBM_STATUS);
5167 if (tmp & (GRBM_STATUS__PA_BUSY_MASK | GRBM_STATUS__SC_BUSY_MASK |
5168 GRBM_STATUS__BCI_BUSY_MASK | GRBM_STATUS__SX_BUSY_MASK |
5169 GRBM_STATUS__TA_BUSY_MASK | GRBM_STATUS__VGT_BUSY_MASK |
5170 GRBM_STATUS__DB_BUSY_MASK | GRBM_STATUS__CB_BUSY_MASK |
5171 GRBM_STATUS__GDS_BUSY_MASK | GRBM_STATUS__SPI_BUSY_MASK |
5172 GRBM_STATUS__IA_BUSY_MASK | GRBM_STATUS__IA_BUSY_NO_DMA_MASK))
5173 grbm_soft_reset |= GRBM_SOFT_RESET__SOFT_RESET_CP_MASK |
5174 GRBM_SOFT_RESET__SOFT_RESET_GFX_MASK;
5176 if (tmp & (GRBM_STATUS__CP_BUSY_MASK | GRBM_STATUS__CP_COHERENCY_BUSY_MASK)) {
5177 grbm_soft_reset |= GRBM_SOFT_RESET__SOFT_RESET_CP_MASK;
5178 srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_GRBM_MASK;
5182 tmp = RREG32(mmGRBM_STATUS2);
5183 if (tmp & GRBM_STATUS2__RLC_BUSY_MASK)
5184 grbm_soft_reset |= GRBM_SOFT_RESET__SOFT_RESET_RLC_MASK;
5187 tmp = RREG32(mmSRBM_STATUS);
5188 if (tmp & SRBM_STATUS__GRBM_RQ_PENDING_MASK)
5189 srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_GRBM_MASK;
5191 if (grbm_soft_reset || srbm_soft_reset) {
5192 gfx_v7_0_print_status((void *)adev);
5194 gfx_v7_0_fini_pg(adev);
5195 gfx_v7_0_update_cg(adev, false);
5198 gfx_v7_0_rlc_stop(adev);
5200 /* Disable GFX parsing/prefetching */
5201 WREG32(mmCP_ME_CNTL, CP_ME_CNTL__ME_HALT_MASK | CP_ME_CNTL__PFP_HALT_MASK | CP_ME_CNTL__CE_HALT_MASK);
5203 /* Disable MEC parsing/prefetching */
5204 WREG32(mmCP_MEC_CNTL, CP_MEC_CNTL__MEC_ME1_HALT_MASK | CP_MEC_CNTL__MEC_ME2_HALT_MASK);
5206 if (grbm_soft_reset) {
5207 tmp = RREG32(mmGRBM_SOFT_RESET);
5208 tmp |= grbm_soft_reset;
5209 dev_info(adev->dev, "GRBM_SOFT_RESET=0x%08X\n", tmp);
5210 WREG32(mmGRBM_SOFT_RESET, tmp);
5211 tmp = RREG32(mmGRBM_SOFT_RESET);
5215 tmp &= ~grbm_soft_reset;
5216 WREG32(mmGRBM_SOFT_RESET, tmp);
5217 tmp = RREG32(mmGRBM_SOFT_RESET);
5220 if (srbm_soft_reset) {
5221 tmp = RREG32(mmSRBM_SOFT_RESET);
5222 tmp |= srbm_soft_reset;
5223 dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
5224 WREG32(mmSRBM_SOFT_RESET, tmp);
5225 tmp = RREG32(mmSRBM_SOFT_RESET);
5229 tmp &= ~srbm_soft_reset;
5230 WREG32(mmSRBM_SOFT_RESET, tmp);
5231 tmp = RREG32(mmSRBM_SOFT_RESET);
5233 /* Wait a little for things to settle down */
5235 gfx_v7_0_print_status((void *)adev);
5240 static void gfx_v7_0_set_gfx_eop_interrupt_state(struct amdgpu_device *adev,
5241 enum amdgpu_interrupt_state state)
5246 case AMDGPU_IRQ_STATE_DISABLE:
5247 cp_int_cntl = RREG32(mmCP_INT_CNTL_RING0);
5248 cp_int_cntl &= ~CP_INT_CNTL_RING0__TIME_STAMP_INT_ENABLE_MASK;
5249 WREG32(mmCP_INT_CNTL_RING0, cp_int_cntl);
5251 case AMDGPU_IRQ_STATE_ENABLE:
5252 cp_int_cntl = RREG32(mmCP_INT_CNTL_RING0);
5253 cp_int_cntl |= CP_INT_CNTL_RING0__TIME_STAMP_INT_ENABLE_MASK;
5254 WREG32(mmCP_INT_CNTL_RING0, cp_int_cntl);
5261 static void gfx_v7_0_set_compute_eop_interrupt_state(struct amdgpu_device *adev,
5263 enum amdgpu_interrupt_state state)
5265 u32 mec_int_cntl, mec_int_cntl_reg;
5268 * amdgpu controls only pipe 0 of MEC1. That's why this function only
5269 * handles the setting of interrupts for this specific pipe. All other
5270 * pipes' interrupts are set by amdkfd.
5276 mec_int_cntl_reg = mmCP_ME1_PIPE0_INT_CNTL;
5279 DRM_DEBUG("invalid pipe %d\n", pipe);
5283 DRM_DEBUG("invalid me %d\n", me);
5288 case AMDGPU_IRQ_STATE_DISABLE:
5289 mec_int_cntl = RREG32(mec_int_cntl_reg);
5290 mec_int_cntl &= ~CP_INT_CNTL_RING0__TIME_STAMP_INT_ENABLE_MASK;
5291 WREG32(mec_int_cntl_reg, mec_int_cntl);
5293 case AMDGPU_IRQ_STATE_ENABLE:
5294 mec_int_cntl = RREG32(mec_int_cntl_reg);
5295 mec_int_cntl |= CP_INT_CNTL_RING0__TIME_STAMP_INT_ENABLE_MASK;
5296 WREG32(mec_int_cntl_reg, mec_int_cntl);
5303 static int gfx_v7_0_set_priv_reg_fault_state(struct amdgpu_device *adev,
5304 struct amdgpu_irq_src *src,
5306 enum amdgpu_interrupt_state state)
5311 case AMDGPU_IRQ_STATE_DISABLE:
5312 cp_int_cntl = RREG32(mmCP_INT_CNTL_RING0);
5313 cp_int_cntl &= ~CP_INT_CNTL_RING0__PRIV_REG_INT_ENABLE_MASK;
5314 WREG32(mmCP_INT_CNTL_RING0, cp_int_cntl);
5316 case AMDGPU_IRQ_STATE_ENABLE:
5317 cp_int_cntl = RREG32(mmCP_INT_CNTL_RING0);
5318 cp_int_cntl |= CP_INT_CNTL_RING0__PRIV_REG_INT_ENABLE_MASK;
5319 WREG32(mmCP_INT_CNTL_RING0, cp_int_cntl);
5328 static int gfx_v7_0_set_priv_inst_fault_state(struct amdgpu_device *adev,
5329 struct amdgpu_irq_src *src,
5331 enum amdgpu_interrupt_state state)
5336 case AMDGPU_IRQ_STATE_DISABLE:
5337 cp_int_cntl = RREG32(mmCP_INT_CNTL_RING0);
5338 cp_int_cntl &= ~CP_INT_CNTL_RING0__PRIV_INSTR_INT_ENABLE_MASK;
5339 WREG32(mmCP_INT_CNTL_RING0, cp_int_cntl);
5341 case AMDGPU_IRQ_STATE_ENABLE:
5342 cp_int_cntl = RREG32(mmCP_INT_CNTL_RING0);
5343 cp_int_cntl |= CP_INT_CNTL_RING0__PRIV_INSTR_INT_ENABLE_MASK;
5344 WREG32(mmCP_INT_CNTL_RING0, cp_int_cntl);
5353 static int gfx_v7_0_set_eop_interrupt_state(struct amdgpu_device *adev,
5354 struct amdgpu_irq_src *src,
5356 enum amdgpu_interrupt_state state)
5359 case AMDGPU_CP_IRQ_GFX_EOP:
5360 gfx_v7_0_set_gfx_eop_interrupt_state(adev, state);
5362 case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP:
5363 gfx_v7_0_set_compute_eop_interrupt_state(adev, 1, 0, state);
5365 case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE1_EOP:
5366 gfx_v7_0_set_compute_eop_interrupt_state(adev, 1, 1, state);
5368 case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE2_EOP:
5369 gfx_v7_0_set_compute_eop_interrupt_state(adev, 1, 2, state);
5371 case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE3_EOP:
5372 gfx_v7_0_set_compute_eop_interrupt_state(adev, 1, 3, state);
5374 case AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE0_EOP:
5375 gfx_v7_0_set_compute_eop_interrupt_state(adev, 2, 0, state);
5377 case AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE1_EOP:
5378 gfx_v7_0_set_compute_eop_interrupt_state(adev, 2, 1, state);
5380 case AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE2_EOP:
5381 gfx_v7_0_set_compute_eop_interrupt_state(adev, 2, 2, state);
5383 case AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE3_EOP:
5384 gfx_v7_0_set_compute_eop_interrupt_state(adev, 2, 3, state);
5392 static int gfx_v7_0_eop_irq(struct amdgpu_device *adev,
5393 struct amdgpu_irq_src *source,
5394 struct amdgpu_iv_entry *entry)
5397 struct amdgpu_ring *ring;
5400 DRM_DEBUG("IH: CP EOP\n");
5401 me_id = (entry->ring_id & 0x0c) >> 2;
5402 pipe_id = (entry->ring_id & 0x03) >> 0;
5405 amdgpu_fence_process(&adev->gfx.gfx_ring[0]);
5409 for (i = 0; i < adev->gfx.num_compute_rings; i++) {
5410 ring = &adev->gfx.compute_ring[i];
5411 if ((ring->me == me_id) && (ring->pipe == pipe_id))
5412 amdgpu_fence_process(ring);
5419 static int gfx_v7_0_priv_reg_irq(struct amdgpu_device *adev,
5420 struct amdgpu_irq_src *source,
5421 struct amdgpu_iv_entry *entry)
5423 DRM_ERROR("Illegal register access in command stream\n");
5424 schedule_work(&adev->reset_work);
5428 static int gfx_v7_0_priv_inst_irq(struct amdgpu_device *adev,
5429 struct amdgpu_irq_src *source,
5430 struct amdgpu_iv_entry *entry)
5432 DRM_ERROR("Illegal instruction in command stream\n");
5433 // XXX soft reset the gfx block only
5434 schedule_work(&adev->reset_work);
5438 static int gfx_v7_0_set_clockgating_state(void *handle,
5439 enum amd_clockgating_state state)
5442 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
5444 if (state == AMD_CG_STATE_GATE)
5447 gfx_v7_0_enable_gui_idle_interrupt(adev, false);
5448 /* order matters! */
5450 gfx_v7_0_enable_mgcg(adev, true);
5451 gfx_v7_0_enable_cgcg(adev, true);
5453 gfx_v7_0_enable_cgcg(adev, false);
5454 gfx_v7_0_enable_mgcg(adev, false);
5456 gfx_v7_0_enable_gui_idle_interrupt(adev, true);
5461 static int gfx_v7_0_set_powergating_state(void *handle,
5462 enum amd_powergating_state state)
5465 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
5467 if (state == AMD_PG_STATE_GATE)
5470 if (adev->pg_flags & (AMDGPU_PG_SUPPORT_GFX_PG |
5471 AMDGPU_PG_SUPPORT_GFX_SMG |
5472 AMDGPU_PG_SUPPORT_GFX_DMG |
5473 AMDGPU_PG_SUPPORT_CP |
5474 AMDGPU_PG_SUPPORT_GDS |
5475 AMDGPU_PG_SUPPORT_RLC_SMU_HS)) {
5476 gfx_v7_0_update_gfx_pg(adev, gate);
5477 if (adev->pg_flags & AMDGPU_PG_SUPPORT_GFX_PG) {
5478 gfx_v7_0_enable_cp_pg(adev, gate);
5479 gfx_v7_0_enable_gds_pg(adev, gate);
5486 const struct amd_ip_funcs gfx_v7_0_ip_funcs = {
5487 .early_init = gfx_v7_0_early_init,
5489 .sw_init = gfx_v7_0_sw_init,
5490 .sw_fini = gfx_v7_0_sw_fini,
5491 .hw_init = gfx_v7_0_hw_init,
5492 .hw_fini = gfx_v7_0_hw_fini,
5493 .suspend = gfx_v7_0_suspend,
5494 .resume = gfx_v7_0_resume,
5495 .is_idle = gfx_v7_0_is_idle,
5496 .wait_for_idle = gfx_v7_0_wait_for_idle,
5497 .soft_reset = gfx_v7_0_soft_reset,
5498 .print_status = gfx_v7_0_print_status,
5499 .set_clockgating_state = gfx_v7_0_set_clockgating_state,
5500 .set_powergating_state = gfx_v7_0_set_powergating_state,
5503 static const struct amdgpu_ring_funcs gfx_v7_0_ring_funcs_gfx = {
5504 .get_rptr = gfx_v7_0_ring_get_rptr_gfx,
5505 .get_wptr = gfx_v7_0_ring_get_wptr_gfx,
5506 .set_wptr = gfx_v7_0_ring_set_wptr_gfx,
5508 .emit_ib = gfx_v7_0_ring_emit_ib_gfx,
5509 .emit_fence = gfx_v7_0_ring_emit_fence_gfx,
5510 .emit_semaphore = gfx_v7_0_ring_emit_semaphore,
5511 .emit_vm_flush = gfx_v7_0_ring_emit_vm_flush,
5512 .emit_gds_switch = gfx_v7_0_ring_emit_gds_switch,
5513 .emit_hdp_flush = gfx_v7_0_ring_emit_hdp_flush,
5514 .test_ring = gfx_v7_0_ring_test_ring,
5515 .test_ib = gfx_v7_0_ring_test_ib,
5516 .insert_nop = amdgpu_ring_insert_nop,
5519 static const struct amdgpu_ring_funcs gfx_v7_0_ring_funcs_compute = {
5520 .get_rptr = gfx_v7_0_ring_get_rptr_compute,
5521 .get_wptr = gfx_v7_0_ring_get_wptr_compute,
5522 .set_wptr = gfx_v7_0_ring_set_wptr_compute,
5524 .emit_ib = gfx_v7_0_ring_emit_ib_compute,
5525 .emit_fence = gfx_v7_0_ring_emit_fence_compute,
5526 .emit_semaphore = gfx_v7_0_ring_emit_semaphore,
5527 .emit_vm_flush = gfx_v7_0_ring_emit_vm_flush,
5528 .emit_gds_switch = gfx_v7_0_ring_emit_gds_switch,
5529 .emit_hdp_flush = gfx_v7_0_ring_emit_hdp_flush,
5530 .test_ring = gfx_v7_0_ring_test_ring,
5531 .test_ib = gfx_v7_0_ring_test_ib,
5532 .insert_nop = amdgpu_ring_insert_nop,
5535 static void gfx_v7_0_set_ring_funcs(struct amdgpu_device *adev)
5539 for (i = 0; i < adev->gfx.num_gfx_rings; i++)
5540 adev->gfx.gfx_ring[i].funcs = &gfx_v7_0_ring_funcs_gfx;
5541 for (i = 0; i < adev->gfx.num_compute_rings; i++)
5542 adev->gfx.compute_ring[i].funcs = &gfx_v7_0_ring_funcs_compute;
5545 static const struct amdgpu_irq_src_funcs gfx_v7_0_eop_irq_funcs = {
5546 .set = gfx_v7_0_set_eop_interrupt_state,
5547 .process = gfx_v7_0_eop_irq,
5550 static const struct amdgpu_irq_src_funcs gfx_v7_0_priv_reg_irq_funcs = {
5551 .set = gfx_v7_0_set_priv_reg_fault_state,
5552 .process = gfx_v7_0_priv_reg_irq,
5555 static const struct amdgpu_irq_src_funcs gfx_v7_0_priv_inst_irq_funcs = {
5556 .set = gfx_v7_0_set_priv_inst_fault_state,
5557 .process = gfx_v7_0_priv_inst_irq,
5560 static void gfx_v7_0_set_irq_funcs(struct amdgpu_device *adev)
5562 adev->gfx.eop_irq.num_types = AMDGPU_CP_IRQ_LAST;
5563 adev->gfx.eop_irq.funcs = &gfx_v7_0_eop_irq_funcs;
5565 adev->gfx.priv_reg_irq.num_types = 1;
5566 adev->gfx.priv_reg_irq.funcs = &gfx_v7_0_priv_reg_irq_funcs;
5568 adev->gfx.priv_inst_irq.num_types = 1;
5569 adev->gfx.priv_inst_irq.funcs = &gfx_v7_0_priv_inst_irq_funcs;
5572 static void gfx_v7_0_set_gds_init(struct amdgpu_device *adev)
5574 /* init asci gds info */
5575 adev->gds.mem.total_size = RREG32(mmGDS_VMID0_SIZE);
5576 adev->gds.gws.total_size = 64;
5577 adev->gds.oa.total_size = 16;
5579 if (adev->gds.mem.total_size == 64 * 1024) {
5580 adev->gds.mem.gfx_partition_size = 4096;
5581 adev->gds.mem.cs_partition_size = 4096;
5583 adev->gds.gws.gfx_partition_size = 4;
5584 adev->gds.gws.cs_partition_size = 4;
5586 adev->gds.oa.gfx_partition_size = 4;
5587 adev->gds.oa.cs_partition_size = 1;
5589 adev->gds.mem.gfx_partition_size = 1024;
5590 adev->gds.mem.cs_partition_size = 1024;
5592 adev->gds.gws.gfx_partition_size = 16;
5593 adev->gds.gws.cs_partition_size = 16;
5595 adev->gds.oa.gfx_partition_size = 4;
5596 adev->gds.oa.cs_partition_size = 4;
5601 int gfx_v7_0_get_cu_info(struct amdgpu_device *adev,
5602 struct amdgpu_cu_info *cu_info)
5604 int i, j, k, counter, active_cu_number = 0;
5605 u32 mask, bitmap, ao_bitmap, ao_cu_mask = 0;
5607 if (!adev || !cu_info)
5610 mutex_lock(&adev->grbm_idx_mutex);
5611 for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
5612 for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
5616 bitmap = gfx_v7_0_get_cu_active_bitmap(adev, i, j);
5617 cu_info->bitmap[i][j] = bitmap;
5619 for (k = 0; k < adev->gfx.config.max_cu_per_sh; k ++) {
5620 if (bitmap & mask) {
5627 active_cu_number += counter;
5628 ao_cu_mask |= (ao_bitmap << (i * 16 + j * 8));
5632 cu_info->number = active_cu_number;
5633 cu_info->ao_cu_mask = ao_cu_mask;
5634 mutex_unlock(&adev->grbm_idx_mutex);