2 * Copyright 2014 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
23 #include <linux/slab.h>
26 #include "amdgpu_atombios.h"
27 #include "amdgpu_ih.h"
28 #include "amdgpu_uvd.h"
29 #include "amdgpu_vce.h"
30 #include "amdgpu_ucode.h"
34 #include "gmc/gmc_8_1_d.h"
35 #include "gmc/gmc_8_1_sh_mask.h"
37 #include "oss/oss_3_0_d.h"
38 #include "oss/oss_3_0_sh_mask.h"
40 #include "bif/bif_5_0_d.h"
41 #include "bif/bif_5_0_sh_mask.h"
43 #include "gca/gfx_8_0_d.h"
44 #include "gca/gfx_8_0_sh_mask.h"
46 #include "smu/smu_7_1_1_d.h"
47 #include "smu/smu_7_1_1_sh_mask.h"
49 #include "uvd/uvd_5_0_d.h"
50 #include "uvd/uvd_5_0_sh_mask.h"
52 #include "vce/vce_3_0_d.h"
53 #include "vce/vce_3_0_sh_mask.h"
55 #include "dce/dce_10_0_d.h"
56 #include "dce/dce_10_0_sh_mask.h"
64 #include "sdma_v2_4.h"
65 #include "sdma_v3_0.h"
66 #include "dce_v10_0.h"
67 #include "dce_v11_0.h"
68 #include "iceland_ih.h"
74 #if defined(CONFIG_DRM_AMD_ACP)
75 #include "amdgpu_acp.h"
77 #include "dce_virtual.h"
79 #include "amdgpu_dm.h"
82 * Indirect registers accessor
84 static u32 vi_pcie_rreg(struct amdgpu_device *adev, u32 reg)
89 spin_lock_irqsave(&adev->pcie_idx_lock, flags);
90 WREG32(mmPCIE_INDEX, reg);
91 (void)RREG32(mmPCIE_INDEX);
92 r = RREG32(mmPCIE_DATA);
93 spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
97 static void vi_pcie_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
101 spin_lock_irqsave(&adev->pcie_idx_lock, flags);
102 WREG32(mmPCIE_INDEX, reg);
103 (void)RREG32(mmPCIE_INDEX);
104 WREG32(mmPCIE_DATA, v);
105 (void)RREG32(mmPCIE_DATA);
106 spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
109 static u32 vi_smc_rreg(struct amdgpu_device *adev, u32 reg)
114 spin_lock_irqsave(&adev->smc_idx_lock, flags);
115 WREG32_NO_KIQ(mmSMC_IND_INDEX_11, (reg));
116 r = RREG32_NO_KIQ(mmSMC_IND_DATA_11);
117 spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
121 static void vi_smc_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
125 spin_lock_irqsave(&adev->smc_idx_lock, flags);
126 WREG32(mmSMC_IND_INDEX_11, (reg));
127 WREG32(mmSMC_IND_DATA_11, (v));
128 spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
132 #define mmMP0PUB_IND_INDEX 0x180
133 #define mmMP0PUB_IND_DATA 0x181
135 static u32 cz_smc_rreg(struct amdgpu_device *adev, u32 reg)
140 spin_lock_irqsave(&adev->smc_idx_lock, flags);
141 WREG32(mmMP0PUB_IND_INDEX, (reg));
142 r = RREG32(mmMP0PUB_IND_DATA);
143 spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
147 static void cz_smc_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
151 spin_lock_irqsave(&adev->smc_idx_lock, flags);
152 WREG32(mmMP0PUB_IND_INDEX, (reg));
153 WREG32(mmMP0PUB_IND_DATA, (v));
154 spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
157 static u32 vi_uvd_ctx_rreg(struct amdgpu_device *adev, u32 reg)
162 spin_lock_irqsave(&adev->uvd_ctx_idx_lock, flags);
163 WREG32(mmUVD_CTX_INDEX, ((reg) & 0x1ff));
164 r = RREG32(mmUVD_CTX_DATA);
165 spin_unlock_irqrestore(&adev->uvd_ctx_idx_lock, flags);
169 static void vi_uvd_ctx_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
173 spin_lock_irqsave(&adev->uvd_ctx_idx_lock, flags);
174 WREG32(mmUVD_CTX_INDEX, ((reg) & 0x1ff));
175 WREG32(mmUVD_CTX_DATA, (v));
176 spin_unlock_irqrestore(&adev->uvd_ctx_idx_lock, flags);
179 static u32 vi_didt_rreg(struct amdgpu_device *adev, u32 reg)
184 spin_lock_irqsave(&adev->didt_idx_lock, flags);
185 WREG32(mmDIDT_IND_INDEX, (reg));
186 r = RREG32(mmDIDT_IND_DATA);
187 spin_unlock_irqrestore(&adev->didt_idx_lock, flags);
191 static void vi_didt_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
195 spin_lock_irqsave(&adev->didt_idx_lock, flags);
196 WREG32(mmDIDT_IND_INDEX, (reg));
197 WREG32(mmDIDT_IND_DATA, (v));
198 spin_unlock_irqrestore(&adev->didt_idx_lock, flags);
201 static u32 vi_gc_cac_rreg(struct amdgpu_device *adev, u32 reg)
206 spin_lock_irqsave(&adev->gc_cac_idx_lock, flags);
207 WREG32(mmGC_CAC_IND_INDEX, (reg));
208 r = RREG32(mmGC_CAC_IND_DATA);
209 spin_unlock_irqrestore(&adev->gc_cac_idx_lock, flags);
213 static void vi_gc_cac_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
217 spin_lock_irqsave(&adev->gc_cac_idx_lock, flags);
218 WREG32(mmGC_CAC_IND_INDEX, (reg));
219 WREG32(mmGC_CAC_IND_DATA, (v));
220 spin_unlock_irqrestore(&adev->gc_cac_idx_lock, flags);
224 static const u32 tonga_mgcg_cgcg_init[] =
226 mmCGTT_DRM_CLK_CTRL0, 0xffffffff, 0x00600100,
227 mmPCIE_INDEX, 0xffffffff, 0x0140001c,
228 mmPCIE_DATA, 0x000f0000, 0x00000000,
229 mmSMC_IND_INDEX_4, 0xffffffff, 0xC060000C,
230 mmSMC_IND_DATA_4, 0xc0000fff, 0x00000100,
231 mmCGTT_DRM_CLK_CTRL0, 0xff000fff, 0x00000100,
232 mmHDP_XDP_CGTT_BLK_CTRL, 0xc0000fff, 0x00000104,
235 static const u32 fiji_mgcg_cgcg_init[] =
237 mmCGTT_DRM_CLK_CTRL0, 0xffffffff, 0x00600100,
238 mmPCIE_INDEX, 0xffffffff, 0x0140001c,
239 mmPCIE_DATA, 0x000f0000, 0x00000000,
240 mmSMC_IND_INDEX_4, 0xffffffff, 0xC060000C,
241 mmSMC_IND_DATA_4, 0xc0000fff, 0x00000100,
242 mmCGTT_DRM_CLK_CTRL0, 0xff000fff, 0x00000100,
243 mmHDP_XDP_CGTT_BLK_CTRL, 0xc0000fff, 0x00000104,
246 static const u32 iceland_mgcg_cgcg_init[] =
248 mmPCIE_INDEX, 0xffffffff, ixPCIE_CNTL2,
249 mmPCIE_DATA, 0x000f0000, 0x00000000,
250 mmSMC_IND_INDEX_4, 0xffffffff, ixCGTT_ROM_CLK_CTRL0,
251 mmSMC_IND_DATA_4, 0xc0000fff, 0x00000100,
252 mmHDP_XDP_CGTT_BLK_CTRL, 0xc0000fff, 0x00000104,
255 static const u32 cz_mgcg_cgcg_init[] =
257 mmCGTT_DRM_CLK_CTRL0, 0xffffffff, 0x00600100,
258 mmPCIE_INDEX, 0xffffffff, 0x0140001c,
259 mmPCIE_DATA, 0x000f0000, 0x00000000,
260 mmCGTT_DRM_CLK_CTRL0, 0xff000fff, 0x00000100,
261 mmHDP_XDP_CGTT_BLK_CTRL, 0xc0000fff, 0x00000104,
264 static const u32 stoney_mgcg_cgcg_init[] =
266 mmCGTT_DRM_CLK_CTRL0, 0xffffffff, 0x00000100,
267 mmHDP_XDP_CGTT_BLK_CTRL, 0xffffffff, 0x00000104,
268 mmHDP_HOST_PATH_CNTL, 0xffffffff, 0x0f000027,
271 static void vi_init_golden_registers(struct amdgpu_device *adev)
273 /* Some of the registers might be dependent on GRBM_GFX_INDEX */
274 mutex_lock(&adev->grbm_idx_mutex);
276 if (amdgpu_sriov_vf(adev)) {
277 xgpu_vi_init_golden_registers(adev);
278 mutex_unlock(&adev->grbm_idx_mutex);
282 switch (adev->asic_type) {
284 amdgpu_device_program_register_sequence(adev,
285 iceland_mgcg_cgcg_init,
286 ARRAY_SIZE(iceland_mgcg_cgcg_init));
289 amdgpu_device_program_register_sequence(adev,
291 ARRAY_SIZE(fiji_mgcg_cgcg_init));
294 amdgpu_device_program_register_sequence(adev,
295 tonga_mgcg_cgcg_init,
296 ARRAY_SIZE(tonga_mgcg_cgcg_init));
299 amdgpu_device_program_register_sequence(adev,
301 ARRAY_SIZE(cz_mgcg_cgcg_init));
304 amdgpu_device_program_register_sequence(adev,
305 stoney_mgcg_cgcg_init,
306 ARRAY_SIZE(stoney_mgcg_cgcg_init));
315 mutex_unlock(&adev->grbm_idx_mutex);
319 * vi_get_xclk - get the xclk
321 * @adev: amdgpu_device pointer
323 * Returns the reference clock used by the gfx engine
326 static u32 vi_get_xclk(struct amdgpu_device *adev)
328 u32 reference_clock = adev->clock.spll.reference_freq;
331 if (adev->flags & AMD_IS_APU) {
332 switch (adev->asic_type) {
334 /* vbios says 48Mhz, but the actual freq is 100Mhz */
337 return reference_clock;
341 tmp = RREG32_SMC(ixCG_CLKPIN_CNTL_2);
342 if (REG_GET_FIELD(tmp, CG_CLKPIN_CNTL_2, MUX_TCLK_TO_XCLK))
345 tmp = RREG32_SMC(ixCG_CLKPIN_CNTL);
346 if (REG_GET_FIELD(tmp, CG_CLKPIN_CNTL, XTALIN_DIVIDE))
347 return reference_clock / 4;
349 return reference_clock;
353 * vi_srbm_select - select specific register instances
355 * @adev: amdgpu_device pointer
356 * @me: selected ME (micro engine)
361 * Switches the currently active registers instances. Some
362 * registers are instanced per VMID, others are instanced per
363 * me/pipe/queue combination.
365 void vi_srbm_select(struct amdgpu_device *adev,
366 u32 me, u32 pipe, u32 queue, u32 vmid)
368 u32 srbm_gfx_cntl = 0;
369 srbm_gfx_cntl = REG_SET_FIELD(srbm_gfx_cntl, SRBM_GFX_CNTL, PIPEID, pipe);
370 srbm_gfx_cntl = REG_SET_FIELD(srbm_gfx_cntl, SRBM_GFX_CNTL, MEID, me);
371 srbm_gfx_cntl = REG_SET_FIELD(srbm_gfx_cntl, SRBM_GFX_CNTL, VMID, vmid);
372 srbm_gfx_cntl = REG_SET_FIELD(srbm_gfx_cntl, SRBM_GFX_CNTL, QUEUEID, queue);
373 WREG32(mmSRBM_GFX_CNTL, srbm_gfx_cntl);
376 static void vi_vga_set_state(struct amdgpu_device *adev, bool state)
381 static bool vi_read_disabled_bios(struct amdgpu_device *adev)
384 u32 d1vga_control = 0;
385 u32 d2vga_control = 0;
386 u32 vga_render_control = 0;
390 bus_cntl = RREG32(mmBUS_CNTL);
391 if (adev->mode_info.num_crtc) {
392 d1vga_control = RREG32(mmD1VGA_CONTROL);
393 d2vga_control = RREG32(mmD2VGA_CONTROL);
394 vga_render_control = RREG32(mmVGA_RENDER_CONTROL);
396 rom_cntl = RREG32_SMC(ixROM_CNTL);
399 WREG32(mmBUS_CNTL, (bus_cntl & ~BUS_CNTL__BIOS_ROM_DIS_MASK));
400 if (adev->mode_info.num_crtc) {
401 /* Disable VGA mode */
402 WREG32(mmD1VGA_CONTROL,
403 (d1vga_control & ~(D1VGA_CONTROL__D1VGA_MODE_ENABLE_MASK |
404 D1VGA_CONTROL__D1VGA_TIMING_SELECT_MASK)));
405 WREG32(mmD2VGA_CONTROL,
406 (d2vga_control & ~(D2VGA_CONTROL__D2VGA_MODE_ENABLE_MASK |
407 D2VGA_CONTROL__D2VGA_TIMING_SELECT_MASK)));
408 WREG32(mmVGA_RENDER_CONTROL,
409 (vga_render_control & ~VGA_RENDER_CONTROL__VGA_VSTATUS_CNTL_MASK));
411 WREG32_SMC(ixROM_CNTL, rom_cntl | ROM_CNTL__SCK_OVERWRITE_MASK);
413 r = amdgpu_read_bios(adev);
416 WREG32(mmBUS_CNTL, bus_cntl);
417 if (adev->mode_info.num_crtc) {
418 WREG32(mmD1VGA_CONTROL, d1vga_control);
419 WREG32(mmD2VGA_CONTROL, d2vga_control);
420 WREG32(mmVGA_RENDER_CONTROL, vga_render_control);
422 WREG32_SMC(ixROM_CNTL, rom_cntl);
426 static bool vi_read_bios_from_rom(struct amdgpu_device *adev,
427 u8 *bios, u32 length_bytes)
435 if (length_bytes == 0)
437 /* APU vbios image is part of sbios image */
438 if (adev->flags & AMD_IS_APU)
441 dw_ptr = (u32 *)bios;
442 length_dw = ALIGN(length_bytes, 4) / 4;
443 /* take the smc lock since we are using the smc index */
444 spin_lock_irqsave(&adev->smc_idx_lock, flags);
445 /* set rom index to 0 */
446 WREG32(mmSMC_IND_INDEX_11, ixROM_INDEX);
447 WREG32(mmSMC_IND_DATA_11, 0);
448 /* set index to data for continous read */
449 WREG32(mmSMC_IND_INDEX_11, ixROM_DATA);
450 for (i = 0; i < length_dw; i++)
451 dw_ptr[i] = RREG32(mmSMC_IND_DATA_11);
452 spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
457 static void vi_detect_hw_virtualization(struct amdgpu_device *adev)
461 if (adev->asic_type == CHIP_TONGA ||
462 adev->asic_type == CHIP_FIJI) {
463 reg = RREG32(mmBIF_IOV_FUNC_IDENTIFIER);
464 /* bit0: 0 means pf and 1 means vf */
465 if (REG_GET_FIELD(reg, BIF_IOV_FUNC_IDENTIFIER, FUNC_IDENTIFIER))
466 adev->virt.caps |= AMDGPU_SRIOV_CAPS_IS_VF;
467 /* bit31: 0 means disable IOV and 1 means enable */
468 if (REG_GET_FIELD(reg, BIF_IOV_FUNC_IDENTIFIER, IOV_ENABLE))
469 adev->virt.caps |= AMDGPU_SRIOV_CAPS_ENABLE_IOV;
473 if (is_virtual_machine()) /* passthrough mode exclus sr-iov mode */
474 adev->virt.caps |= AMDGPU_PASSTHROUGH_MODE;
478 static const struct amdgpu_allowed_register_entry vi_allowed_read_registers[] = {
488 {mmSDMA0_STATUS_REG + SDMA0_REGISTER_OFFSET},
489 {mmSDMA0_STATUS_REG + SDMA1_REGISTER_OFFSET},
491 {mmCP_STALLED_STAT1},
492 {mmCP_STALLED_STAT2},
493 {mmCP_STALLED_STAT3},
494 {mmCP_CPF_BUSY_STAT},
495 {mmCP_CPF_STALLED_STAT1},
497 {mmCP_CPC_BUSY_STAT},
498 {mmCP_CPC_STALLED_STAT1},
534 {mmGB_MACROTILE_MODE0},
535 {mmGB_MACROTILE_MODE1},
536 {mmGB_MACROTILE_MODE2},
537 {mmGB_MACROTILE_MODE3},
538 {mmGB_MACROTILE_MODE4},
539 {mmGB_MACROTILE_MODE5},
540 {mmGB_MACROTILE_MODE6},
541 {mmGB_MACROTILE_MODE7},
542 {mmGB_MACROTILE_MODE8},
543 {mmGB_MACROTILE_MODE9},
544 {mmGB_MACROTILE_MODE10},
545 {mmGB_MACROTILE_MODE11},
546 {mmGB_MACROTILE_MODE12},
547 {mmGB_MACROTILE_MODE13},
548 {mmGB_MACROTILE_MODE14},
549 {mmGB_MACROTILE_MODE15},
550 {mmCC_RB_BACKEND_DISABLE, true},
551 {mmGC_USER_RB_BACKEND_DISABLE, true},
552 {mmGB_BACKEND_MAP, false},
553 {mmPA_SC_RASTER_CONFIG, true},
554 {mmPA_SC_RASTER_CONFIG_1, true},
557 static uint32_t vi_get_register_value(struct amdgpu_device *adev,
558 bool indexed, u32 se_num,
559 u32 sh_num, u32 reg_offset)
563 unsigned se_idx = (se_num == 0xffffffff) ? 0 : se_num;
564 unsigned sh_idx = (sh_num == 0xffffffff) ? 0 : sh_num;
566 switch (reg_offset) {
567 case mmCC_RB_BACKEND_DISABLE:
568 return adev->gfx.config.rb_config[se_idx][sh_idx].rb_backend_disable;
569 case mmGC_USER_RB_BACKEND_DISABLE:
570 return adev->gfx.config.rb_config[se_idx][sh_idx].user_rb_backend_disable;
571 case mmPA_SC_RASTER_CONFIG:
572 return adev->gfx.config.rb_config[se_idx][sh_idx].raster_config;
573 case mmPA_SC_RASTER_CONFIG_1:
574 return adev->gfx.config.rb_config[se_idx][sh_idx].raster_config_1;
577 mutex_lock(&adev->grbm_idx_mutex);
578 if (se_num != 0xffffffff || sh_num != 0xffffffff)
579 amdgpu_gfx_select_se_sh(adev, se_num, sh_num, 0xffffffff);
581 val = RREG32(reg_offset);
583 if (se_num != 0xffffffff || sh_num != 0xffffffff)
584 amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
585 mutex_unlock(&adev->grbm_idx_mutex);
590 switch (reg_offset) {
591 case mmGB_ADDR_CONFIG:
592 return adev->gfx.config.gb_addr_config;
593 case mmMC_ARB_RAMCFG:
594 return adev->gfx.config.mc_arb_ramcfg;
595 case mmGB_TILE_MODE0:
596 case mmGB_TILE_MODE1:
597 case mmGB_TILE_MODE2:
598 case mmGB_TILE_MODE3:
599 case mmGB_TILE_MODE4:
600 case mmGB_TILE_MODE5:
601 case mmGB_TILE_MODE6:
602 case mmGB_TILE_MODE7:
603 case mmGB_TILE_MODE8:
604 case mmGB_TILE_MODE9:
605 case mmGB_TILE_MODE10:
606 case mmGB_TILE_MODE11:
607 case mmGB_TILE_MODE12:
608 case mmGB_TILE_MODE13:
609 case mmGB_TILE_MODE14:
610 case mmGB_TILE_MODE15:
611 case mmGB_TILE_MODE16:
612 case mmGB_TILE_MODE17:
613 case mmGB_TILE_MODE18:
614 case mmGB_TILE_MODE19:
615 case mmGB_TILE_MODE20:
616 case mmGB_TILE_MODE21:
617 case mmGB_TILE_MODE22:
618 case mmGB_TILE_MODE23:
619 case mmGB_TILE_MODE24:
620 case mmGB_TILE_MODE25:
621 case mmGB_TILE_MODE26:
622 case mmGB_TILE_MODE27:
623 case mmGB_TILE_MODE28:
624 case mmGB_TILE_MODE29:
625 case mmGB_TILE_MODE30:
626 case mmGB_TILE_MODE31:
627 idx = (reg_offset - mmGB_TILE_MODE0);
628 return adev->gfx.config.tile_mode_array[idx];
629 case mmGB_MACROTILE_MODE0:
630 case mmGB_MACROTILE_MODE1:
631 case mmGB_MACROTILE_MODE2:
632 case mmGB_MACROTILE_MODE3:
633 case mmGB_MACROTILE_MODE4:
634 case mmGB_MACROTILE_MODE5:
635 case mmGB_MACROTILE_MODE6:
636 case mmGB_MACROTILE_MODE7:
637 case mmGB_MACROTILE_MODE8:
638 case mmGB_MACROTILE_MODE9:
639 case mmGB_MACROTILE_MODE10:
640 case mmGB_MACROTILE_MODE11:
641 case mmGB_MACROTILE_MODE12:
642 case mmGB_MACROTILE_MODE13:
643 case mmGB_MACROTILE_MODE14:
644 case mmGB_MACROTILE_MODE15:
645 idx = (reg_offset - mmGB_MACROTILE_MODE0);
646 return adev->gfx.config.macrotile_mode_array[idx];
648 return RREG32(reg_offset);
653 static int vi_read_register(struct amdgpu_device *adev, u32 se_num,
654 u32 sh_num, u32 reg_offset, u32 *value)
659 for (i = 0; i < ARRAY_SIZE(vi_allowed_read_registers); i++) {
660 bool indexed = vi_allowed_read_registers[i].grbm_indexed;
662 if (reg_offset != vi_allowed_read_registers[i].reg_offset)
665 *value = vi_get_register_value(adev, indexed, se_num, sh_num,
672 static int vi_gpu_pci_config_reset(struct amdgpu_device *adev)
676 dev_info(adev->dev, "GPU pci config reset\n");
679 pci_clear_master(adev->pdev);
681 amdgpu_device_pci_config_reset(adev);
685 /* wait for asic to come out of reset */
686 for (i = 0; i < adev->usec_timeout; i++) {
687 if (RREG32(mmCONFIG_MEMSIZE) != 0xffffffff) {
689 pci_set_master(adev->pdev);
690 adev->has_hw_reset = true;
699 * vi_asic_reset - soft reset GPU
701 * @adev: amdgpu_device pointer
703 * Look up which blocks are hung and attempt
705 * Returns 0 for success.
707 static int vi_asic_reset(struct amdgpu_device *adev)
711 amdgpu_atombios_scratch_regs_engine_hung(adev, true);
713 r = vi_gpu_pci_config_reset(adev);
715 amdgpu_atombios_scratch_regs_engine_hung(adev, false);
720 static u32 vi_get_config_memsize(struct amdgpu_device *adev)
722 return RREG32(mmCONFIG_MEMSIZE);
725 static int vi_set_uvd_clock(struct amdgpu_device *adev, u32 clock,
726 u32 cntl_reg, u32 status_reg)
729 struct atom_clock_dividers dividers;
732 r = amdgpu_atombios_get_clock_dividers(adev,
733 COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
734 clock, false, ÷rs);
738 tmp = RREG32_SMC(cntl_reg);
740 if (adev->flags & AMD_IS_APU)
741 tmp &= ~CG_DCLK_CNTL__DCLK_DIVIDER_MASK;
743 tmp &= ~(CG_DCLK_CNTL__DCLK_DIR_CNTL_EN_MASK |
744 CG_DCLK_CNTL__DCLK_DIVIDER_MASK);
745 tmp |= dividers.post_divider;
746 WREG32_SMC(cntl_reg, tmp);
748 for (i = 0; i < 100; i++) {
749 tmp = RREG32_SMC(status_reg);
750 if (adev->flags & AMD_IS_APU) {
754 if (tmp & CG_DCLK_STATUS__DCLK_STATUS_MASK)
764 #define ixGNB_CLK1_DFS_CNTL 0xD82200F0
765 #define ixGNB_CLK1_STATUS 0xD822010C
766 #define ixGNB_CLK2_DFS_CNTL 0xD8220110
767 #define ixGNB_CLK2_STATUS 0xD822012C
768 #define ixGNB_CLK3_DFS_CNTL 0xD8220130
769 #define ixGNB_CLK3_STATUS 0xD822014C
771 static int vi_set_uvd_clocks(struct amdgpu_device *adev, u32 vclk, u32 dclk)
775 if (adev->flags & AMD_IS_APU) {
776 r = vi_set_uvd_clock(adev, vclk, ixGNB_CLK2_DFS_CNTL, ixGNB_CLK2_STATUS);
780 r = vi_set_uvd_clock(adev, dclk, ixGNB_CLK1_DFS_CNTL, ixGNB_CLK1_STATUS);
784 r = vi_set_uvd_clock(adev, vclk, ixCG_VCLK_CNTL, ixCG_VCLK_STATUS);
788 r = vi_set_uvd_clock(adev, dclk, ixCG_DCLK_CNTL, ixCG_DCLK_STATUS);
796 static int vi_set_vce_clocks(struct amdgpu_device *adev, u32 evclk, u32 ecclk)
799 struct atom_clock_dividers dividers;
806 if (adev->flags & AMD_IS_APU) {
807 reg_ctrl = ixGNB_CLK3_DFS_CNTL;
808 reg_status = ixGNB_CLK3_STATUS;
809 status_mask = 0x00010000;
810 reg_mask = CG_ECLK_CNTL__ECLK_DIVIDER_MASK;
812 reg_ctrl = ixCG_ECLK_CNTL;
813 reg_status = ixCG_ECLK_STATUS;
814 status_mask = CG_ECLK_STATUS__ECLK_STATUS_MASK;
815 reg_mask = CG_ECLK_CNTL__ECLK_DIR_CNTL_EN_MASK | CG_ECLK_CNTL__ECLK_DIVIDER_MASK;
818 r = amdgpu_atombios_get_clock_dividers(adev,
819 COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
820 ecclk, false, ÷rs);
824 for (i = 0; i < 100; i++) {
825 if (RREG32_SMC(reg_status) & status_mask)
833 tmp = RREG32_SMC(reg_ctrl);
835 tmp |= dividers.post_divider;
836 WREG32_SMC(reg_ctrl, tmp);
838 for (i = 0; i < 100; i++) {
839 if (RREG32_SMC(reg_status) & status_mask)
850 static void vi_pcie_gen3_enable(struct amdgpu_device *adev)
852 if (pci_is_root_bus(adev->pdev->bus))
855 if (amdgpu_pcie_gen2 == 0)
858 if (adev->flags & AMD_IS_APU)
861 if (!(adev->pm.pcie_gen_mask & (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 |
862 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3)))
868 static void vi_program_aspm(struct amdgpu_device *adev)
871 if (amdgpu_aspm == 0)
877 static void vi_enable_doorbell_aperture(struct amdgpu_device *adev,
882 /* not necessary on CZ */
883 if (adev->flags & AMD_IS_APU)
886 tmp = RREG32(mmBIF_DOORBELL_APER_EN);
888 tmp = REG_SET_FIELD(tmp, BIF_DOORBELL_APER_EN, BIF_DOORBELL_APER_EN, 1);
890 tmp = REG_SET_FIELD(tmp, BIF_DOORBELL_APER_EN, BIF_DOORBELL_APER_EN, 0);
892 WREG32(mmBIF_DOORBELL_APER_EN, tmp);
895 #define ATI_REV_ID_FUSE_MACRO__ADDRESS 0xC0014044
896 #define ATI_REV_ID_FUSE_MACRO__SHIFT 9
897 #define ATI_REV_ID_FUSE_MACRO__MASK 0x00001E00
899 static uint32_t vi_get_rev_id(struct amdgpu_device *adev)
901 if (adev->flags & AMD_IS_APU)
902 return (RREG32_SMC(ATI_REV_ID_FUSE_MACRO__ADDRESS) & ATI_REV_ID_FUSE_MACRO__MASK)
903 >> ATI_REV_ID_FUSE_MACRO__SHIFT;
905 return (RREG32(mmPCIE_EFUSE4) & PCIE_EFUSE4__STRAP_BIF_ATI_REV_ID_MASK)
906 >> PCIE_EFUSE4__STRAP_BIF_ATI_REV_ID__SHIFT;
909 static void vi_flush_hdp(struct amdgpu_device *adev, struct amdgpu_ring *ring)
911 if (!ring || !ring->funcs->emit_wreg) {
912 WREG32(mmHDP_MEM_COHERENCY_FLUSH_CNTL, 1);
913 RREG32(mmHDP_MEM_COHERENCY_FLUSH_CNTL);
915 amdgpu_ring_emit_wreg(ring, mmHDP_MEM_COHERENCY_FLUSH_CNTL, 1);
919 static void vi_invalidate_hdp(struct amdgpu_device *adev,
920 struct amdgpu_ring *ring)
922 if (!ring || !ring->funcs->emit_wreg) {
923 WREG32(mmHDP_DEBUG0, 1);
924 RREG32(mmHDP_DEBUG0);
926 amdgpu_ring_emit_wreg(ring, mmHDP_DEBUG0, 1);
930 static bool vi_need_full_reset(struct amdgpu_device *adev)
932 switch (adev->asic_type) {
935 /* CZ has hang issues with full reset at the moment */
939 /* XXX: soft reset should work on fiji and tonga */
946 /* change this when we support soft reset */
951 static const struct amdgpu_asic_funcs vi_asic_funcs =
953 .read_disabled_bios = &vi_read_disabled_bios,
954 .read_bios_from_rom = &vi_read_bios_from_rom,
955 .read_register = &vi_read_register,
956 .reset = &vi_asic_reset,
957 .set_vga_state = &vi_vga_set_state,
958 .get_xclk = &vi_get_xclk,
959 .set_uvd_clocks = &vi_set_uvd_clocks,
960 .set_vce_clocks = &vi_set_vce_clocks,
961 .get_config_memsize = &vi_get_config_memsize,
962 .flush_hdp = &vi_flush_hdp,
963 .invalidate_hdp = &vi_invalidate_hdp,
964 .need_full_reset = &vi_need_full_reset,
967 #define CZ_REV_BRISTOL(rev) \
968 ((rev >= 0xC8 && rev <= 0xCE) || (rev >= 0xE1 && rev <= 0xE6))
970 static int vi_common_early_init(void *handle)
972 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
974 if (adev->flags & AMD_IS_APU) {
975 adev->smc_rreg = &cz_smc_rreg;
976 adev->smc_wreg = &cz_smc_wreg;
978 adev->smc_rreg = &vi_smc_rreg;
979 adev->smc_wreg = &vi_smc_wreg;
981 adev->pcie_rreg = &vi_pcie_rreg;
982 adev->pcie_wreg = &vi_pcie_wreg;
983 adev->uvd_ctx_rreg = &vi_uvd_ctx_rreg;
984 adev->uvd_ctx_wreg = &vi_uvd_ctx_wreg;
985 adev->didt_rreg = &vi_didt_rreg;
986 adev->didt_wreg = &vi_didt_wreg;
987 adev->gc_cac_rreg = &vi_gc_cac_rreg;
988 adev->gc_cac_wreg = &vi_gc_cac_wreg;
990 adev->asic_funcs = &vi_asic_funcs;
992 adev->rev_id = vi_get_rev_id(adev);
993 adev->external_rev_id = 0xFF;
994 switch (adev->asic_type) {
998 adev->external_rev_id = 0x1;
1001 adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
1002 AMD_CG_SUPPORT_GFX_MGLS |
1003 AMD_CG_SUPPORT_GFX_RLC_LS |
1004 AMD_CG_SUPPORT_GFX_CP_LS |
1005 AMD_CG_SUPPORT_GFX_CGTS |
1006 AMD_CG_SUPPORT_GFX_CGTS_LS |
1007 AMD_CG_SUPPORT_GFX_CGCG |
1008 AMD_CG_SUPPORT_GFX_CGLS |
1009 AMD_CG_SUPPORT_SDMA_MGCG |
1010 AMD_CG_SUPPORT_SDMA_LS |
1011 AMD_CG_SUPPORT_BIF_LS |
1012 AMD_CG_SUPPORT_HDP_MGCG |
1013 AMD_CG_SUPPORT_HDP_LS |
1014 AMD_CG_SUPPORT_ROM_MGCG |
1015 AMD_CG_SUPPORT_MC_MGCG |
1016 AMD_CG_SUPPORT_MC_LS |
1017 AMD_CG_SUPPORT_UVD_MGCG;
1019 adev->external_rev_id = adev->rev_id + 0x3c;
1022 adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
1023 AMD_CG_SUPPORT_GFX_CGCG |
1024 AMD_CG_SUPPORT_GFX_CGLS |
1025 AMD_CG_SUPPORT_SDMA_MGCG |
1026 AMD_CG_SUPPORT_SDMA_LS |
1027 AMD_CG_SUPPORT_BIF_LS |
1028 AMD_CG_SUPPORT_HDP_MGCG |
1029 AMD_CG_SUPPORT_HDP_LS |
1030 AMD_CG_SUPPORT_ROM_MGCG |
1031 AMD_CG_SUPPORT_MC_MGCG |
1032 AMD_CG_SUPPORT_MC_LS |
1033 AMD_CG_SUPPORT_DRM_LS |
1034 AMD_CG_SUPPORT_UVD_MGCG;
1036 adev->external_rev_id = adev->rev_id + 0x14;
1038 case CHIP_POLARIS11:
1039 adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
1040 AMD_CG_SUPPORT_GFX_RLC_LS |
1041 AMD_CG_SUPPORT_GFX_CP_LS |
1042 AMD_CG_SUPPORT_GFX_CGCG |
1043 AMD_CG_SUPPORT_GFX_CGLS |
1044 AMD_CG_SUPPORT_GFX_3D_CGCG |
1045 AMD_CG_SUPPORT_GFX_3D_CGLS |
1046 AMD_CG_SUPPORT_SDMA_MGCG |
1047 AMD_CG_SUPPORT_SDMA_LS |
1048 AMD_CG_SUPPORT_BIF_MGCG |
1049 AMD_CG_SUPPORT_BIF_LS |
1050 AMD_CG_SUPPORT_HDP_MGCG |
1051 AMD_CG_SUPPORT_HDP_LS |
1052 AMD_CG_SUPPORT_ROM_MGCG |
1053 AMD_CG_SUPPORT_MC_MGCG |
1054 AMD_CG_SUPPORT_MC_LS |
1055 AMD_CG_SUPPORT_DRM_LS |
1056 AMD_CG_SUPPORT_UVD_MGCG |
1057 AMD_CG_SUPPORT_VCE_MGCG;
1059 adev->external_rev_id = adev->rev_id + 0x5A;
1061 case CHIP_POLARIS10:
1062 adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
1063 AMD_CG_SUPPORT_GFX_RLC_LS |
1064 AMD_CG_SUPPORT_GFX_CP_LS |
1065 AMD_CG_SUPPORT_GFX_CGCG |
1066 AMD_CG_SUPPORT_GFX_CGLS |
1067 AMD_CG_SUPPORT_GFX_3D_CGCG |
1068 AMD_CG_SUPPORT_GFX_3D_CGLS |
1069 AMD_CG_SUPPORT_SDMA_MGCG |
1070 AMD_CG_SUPPORT_SDMA_LS |
1071 AMD_CG_SUPPORT_BIF_MGCG |
1072 AMD_CG_SUPPORT_BIF_LS |
1073 AMD_CG_SUPPORT_HDP_MGCG |
1074 AMD_CG_SUPPORT_HDP_LS |
1075 AMD_CG_SUPPORT_ROM_MGCG |
1076 AMD_CG_SUPPORT_MC_MGCG |
1077 AMD_CG_SUPPORT_MC_LS |
1078 AMD_CG_SUPPORT_DRM_LS |
1079 AMD_CG_SUPPORT_UVD_MGCG |
1080 AMD_CG_SUPPORT_VCE_MGCG;
1082 adev->external_rev_id = adev->rev_id + 0x50;
1084 case CHIP_POLARIS12:
1085 adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
1086 AMD_CG_SUPPORT_GFX_RLC_LS |
1087 AMD_CG_SUPPORT_GFX_CP_LS |
1088 AMD_CG_SUPPORT_GFX_CGCG |
1089 AMD_CG_SUPPORT_GFX_CGLS |
1090 AMD_CG_SUPPORT_GFX_3D_CGCG |
1091 AMD_CG_SUPPORT_GFX_3D_CGLS |
1092 AMD_CG_SUPPORT_SDMA_MGCG |
1093 AMD_CG_SUPPORT_SDMA_LS |
1094 AMD_CG_SUPPORT_BIF_MGCG |
1095 AMD_CG_SUPPORT_BIF_LS |
1096 AMD_CG_SUPPORT_HDP_MGCG |
1097 AMD_CG_SUPPORT_HDP_LS |
1098 AMD_CG_SUPPORT_ROM_MGCG |
1099 AMD_CG_SUPPORT_MC_MGCG |
1100 AMD_CG_SUPPORT_MC_LS |
1101 AMD_CG_SUPPORT_DRM_LS |
1102 AMD_CG_SUPPORT_UVD_MGCG |
1103 AMD_CG_SUPPORT_VCE_MGCG;
1105 adev->external_rev_id = adev->rev_id + 0x64;
1109 /*AMD_CG_SUPPORT_GFX_MGCG |
1110 AMD_CG_SUPPORT_GFX_RLC_LS |
1111 AMD_CG_SUPPORT_GFX_CP_LS |
1112 AMD_CG_SUPPORT_GFX_CGCG |
1113 AMD_CG_SUPPORT_GFX_CGLS |
1114 AMD_CG_SUPPORT_GFX_3D_CGCG |
1115 AMD_CG_SUPPORT_GFX_3D_CGLS |
1116 AMD_CG_SUPPORT_SDMA_MGCG |
1117 AMD_CG_SUPPORT_SDMA_LS |
1118 AMD_CG_SUPPORT_BIF_MGCG |
1119 AMD_CG_SUPPORT_BIF_LS |
1120 AMD_CG_SUPPORT_HDP_MGCG |
1121 AMD_CG_SUPPORT_HDP_LS |
1122 AMD_CG_SUPPORT_ROM_MGCG |
1123 AMD_CG_SUPPORT_MC_MGCG |
1124 AMD_CG_SUPPORT_MC_LS |
1125 AMD_CG_SUPPORT_DRM_LS |
1126 AMD_CG_SUPPORT_UVD_MGCG |
1127 AMD_CG_SUPPORT_VCE_MGCG;*/
1129 adev->external_rev_id = adev->rev_id + 0x6E;
1132 adev->cg_flags = AMD_CG_SUPPORT_UVD_MGCG |
1133 AMD_CG_SUPPORT_GFX_MGCG |
1134 AMD_CG_SUPPORT_GFX_MGLS |
1135 AMD_CG_SUPPORT_GFX_RLC_LS |
1136 AMD_CG_SUPPORT_GFX_CP_LS |
1137 AMD_CG_SUPPORT_GFX_CGTS |
1138 AMD_CG_SUPPORT_GFX_CGTS_LS |
1139 AMD_CG_SUPPORT_GFX_CGCG |
1140 AMD_CG_SUPPORT_GFX_CGLS |
1141 AMD_CG_SUPPORT_BIF_LS |
1142 AMD_CG_SUPPORT_HDP_MGCG |
1143 AMD_CG_SUPPORT_HDP_LS |
1144 AMD_CG_SUPPORT_SDMA_MGCG |
1145 AMD_CG_SUPPORT_SDMA_LS |
1146 AMD_CG_SUPPORT_VCE_MGCG;
1147 /* rev0 hardware requires workarounds to support PG */
1149 if (adev->rev_id != 0x00 || CZ_REV_BRISTOL(adev->pdev->revision)) {
1150 adev->pg_flags |= AMD_PG_SUPPORT_GFX_SMG |
1151 AMD_PG_SUPPORT_GFX_PIPELINE |
1153 AMD_PG_SUPPORT_UVD |
1156 adev->external_rev_id = adev->rev_id + 0x1;
1159 adev->cg_flags = AMD_CG_SUPPORT_UVD_MGCG |
1160 AMD_CG_SUPPORT_GFX_MGCG |
1161 AMD_CG_SUPPORT_GFX_MGLS |
1162 AMD_CG_SUPPORT_GFX_RLC_LS |
1163 AMD_CG_SUPPORT_GFX_CP_LS |
1164 AMD_CG_SUPPORT_GFX_CGTS |
1165 AMD_CG_SUPPORT_GFX_CGTS_LS |
1166 AMD_CG_SUPPORT_GFX_CGLS |
1167 AMD_CG_SUPPORT_BIF_LS |
1168 AMD_CG_SUPPORT_HDP_MGCG |
1169 AMD_CG_SUPPORT_HDP_LS |
1170 AMD_CG_SUPPORT_SDMA_MGCG |
1171 AMD_CG_SUPPORT_SDMA_LS |
1172 AMD_CG_SUPPORT_VCE_MGCG;
1173 adev->pg_flags = AMD_PG_SUPPORT_GFX_PG |
1174 AMD_PG_SUPPORT_GFX_SMG |
1175 AMD_PG_SUPPORT_GFX_PIPELINE |
1177 AMD_PG_SUPPORT_UVD |
1179 adev->external_rev_id = adev->rev_id + 0x61;
1182 /* FIXME: not supported yet */
1186 if (amdgpu_sriov_vf(adev)) {
1187 amdgpu_virt_init_setting(adev);
1188 xgpu_vi_mailbox_set_irq_funcs(adev);
1194 static int vi_common_late_init(void *handle)
1196 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1198 if (amdgpu_sriov_vf(adev))
1199 xgpu_vi_mailbox_get_irq(adev);
1204 static int vi_common_sw_init(void *handle)
1206 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1208 if (amdgpu_sriov_vf(adev))
1209 xgpu_vi_mailbox_add_irq_id(adev);
1214 static int vi_common_sw_fini(void *handle)
1219 static int vi_common_hw_init(void *handle)
1221 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1223 /* move the golden regs per IP block */
1224 vi_init_golden_registers(adev);
1225 /* enable pcie gen2/3 link */
1226 vi_pcie_gen3_enable(adev);
1228 vi_program_aspm(adev);
1229 /* enable the doorbell aperture */
1230 vi_enable_doorbell_aperture(adev, true);
1235 static int vi_common_hw_fini(void *handle)
1237 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1239 /* enable the doorbell aperture */
1240 vi_enable_doorbell_aperture(adev, false);
1242 if (amdgpu_sriov_vf(adev))
1243 xgpu_vi_mailbox_put_irq(adev);
1248 static int vi_common_suspend(void *handle)
1250 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1252 return vi_common_hw_fini(adev);
1255 static int vi_common_resume(void *handle)
1257 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1259 return vi_common_hw_init(adev);
1262 static bool vi_common_is_idle(void *handle)
1267 static int vi_common_wait_for_idle(void *handle)
1272 static int vi_common_soft_reset(void *handle)
1277 static void vi_update_bif_medium_grain_light_sleep(struct amdgpu_device *adev,
1280 uint32_t temp, data;
1282 temp = data = RREG32_PCIE(ixPCIE_CNTL2);
1284 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_BIF_LS))
1285 data |= PCIE_CNTL2__SLV_MEM_LS_EN_MASK |
1286 PCIE_CNTL2__MST_MEM_LS_EN_MASK |
1287 PCIE_CNTL2__REPLAY_MEM_LS_EN_MASK;
1289 data &= ~(PCIE_CNTL2__SLV_MEM_LS_EN_MASK |
1290 PCIE_CNTL2__MST_MEM_LS_EN_MASK |
1291 PCIE_CNTL2__REPLAY_MEM_LS_EN_MASK);
1294 WREG32_PCIE(ixPCIE_CNTL2, data);
1297 static void vi_update_hdp_medium_grain_clock_gating(struct amdgpu_device *adev,
1300 uint32_t temp, data;
1302 temp = data = RREG32(mmHDP_HOST_PATH_CNTL);
1304 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_HDP_MGCG))
1305 data &= ~HDP_HOST_PATH_CNTL__CLOCK_GATING_DIS_MASK;
1307 data |= HDP_HOST_PATH_CNTL__CLOCK_GATING_DIS_MASK;
1310 WREG32(mmHDP_HOST_PATH_CNTL, data);
1313 static void vi_update_hdp_light_sleep(struct amdgpu_device *adev,
1316 uint32_t temp, data;
1318 temp = data = RREG32(mmHDP_MEM_POWER_LS);
1320 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_HDP_LS))
1321 data |= HDP_MEM_POWER_LS__LS_ENABLE_MASK;
1323 data &= ~HDP_MEM_POWER_LS__LS_ENABLE_MASK;
1326 WREG32(mmHDP_MEM_POWER_LS, data);
1329 static void vi_update_drm_light_sleep(struct amdgpu_device *adev,
1332 uint32_t temp, data;
1334 temp = data = RREG32(0x157a);
1336 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_DRM_LS))
1342 WREG32(0x157a, data);
1346 static void vi_update_rom_medium_grain_clock_gating(struct amdgpu_device *adev,
1349 uint32_t temp, data;
1351 temp = data = RREG32_SMC(ixCGTT_ROM_CLK_CTRL0);
1353 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_ROM_MGCG))
1354 data &= ~(CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE0_MASK |
1355 CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE1_MASK);
1357 data |= CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE0_MASK |
1358 CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE1_MASK;
1361 WREG32_SMC(ixCGTT_ROM_CLK_CTRL0, data);
1364 static int vi_common_set_clockgating_state_by_smu(void *handle,
1365 enum amd_clockgating_state state)
1367 uint32_t msg_id, pp_state = 0;
1368 uint32_t pp_support_state = 0;
1369 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1371 if (adev->cg_flags & (AMD_CG_SUPPORT_MC_LS | AMD_CG_SUPPORT_MC_MGCG)) {
1372 if (adev->cg_flags & AMD_CG_SUPPORT_MC_LS) {
1373 pp_support_state = PP_STATE_SUPPORT_LS;
1374 pp_state = PP_STATE_LS;
1376 if (adev->cg_flags & AMD_CG_SUPPORT_MC_MGCG) {
1377 pp_support_state |= PP_STATE_SUPPORT_CG;
1378 pp_state |= PP_STATE_CG;
1380 if (state == AMD_CG_STATE_UNGATE)
1382 msg_id = PP_CG_MSG_ID(PP_GROUP_SYS,
1386 if (adev->powerplay.pp_funcs->set_clockgating_by_smu)
1387 amdgpu_dpm_set_clockgating_by_smu(adev, msg_id);
1390 if (adev->cg_flags & (AMD_CG_SUPPORT_SDMA_LS | AMD_CG_SUPPORT_SDMA_MGCG)) {
1391 if (adev->cg_flags & AMD_CG_SUPPORT_SDMA_LS) {
1392 pp_support_state = PP_STATE_SUPPORT_LS;
1393 pp_state = PP_STATE_LS;
1395 if (adev->cg_flags & AMD_CG_SUPPORT_SDMA_MGCG) {
1396 pp_support_state |= PP_STATE_SUPPORT_CG;
1397 pp_state |= PP_STATE_CG;
1399 if (state == AMD_CG_STATE_UNGATE)
1401 msg_id = PP_CG_MSG_ID(PP_GROUP_SYS,
1405 if (adev->powerplay.pp_funcs->set_clockgating_by_smu)
1406 amdgpu_dpm_set_clockgating_by_smu(adev, msg_id);
1409 if (adev->cg_flags & (AMD_CG_SUPPORT_HDP_LS | AMD_CG_SUPPORT_HDP_MGCG)) {
1410 if (adev->cg_flags & AMD_CG_SUPPORT_HDP_LS) {
1411 pp_support_state = PP_STATE_SUPPORT_LS;
1412 pp_state = PP_STATE_LS;
1414 if (adev->cg_flags & AMD_CG_SUPPORT_HDP_MGCG) {
1415 pp_support_state |= PP_STATE_SUPPORT_CG;
1416 pp_state |= PP_STATE_CG;
1418 if (state == AMD_CG_STATE_UNGATE)
1420 msg_id = PP_CG_MSG_ID(PP_GROUP_SYS,
1424 if (adev->powerplay.pp_funcs->set_clockgating_by_smu)
1425 amdgpu_dpm_set_clockgating_by_smu(adev, msg_id);
1429 if (adev->cg_flags & AMD_CG_SUPPORT_BIF_LS) {
1430 if (state == AMD_CG_STATE_UNGATE)
1433 pp_state = PP_STATE_LS;
1435 msg_id = PP_CG_MSG_ID(PP_GROUP_SYS,
1437 PP_STATE_SUPPORT_LS,
1439 if (adev->powerplay.pp_funcs->set_clockgating_by_smu)
1440 amdgpu_dpm_set_clockgating_by_smu(adev, msg_id);
1442 if (adev->cg_flags & AMD_CG_SUPPORT_BIF_MGCG) {
1443 if (state == AMD_CG_STATE_UNGATE)
1446 pp_state = PP_STATE_CG;
1448 msg_id = PP_CG_MSG_ID(PP_GROUP_SYS,
1450 PP_STATE_SUPPORT_CG,
1452 if (adev->powerplay.pp_funcs->set_clockgating_by_smu)
1453 amdgpu_dpm_set_clockgating_by_smu(adev, msg_id);
1456 if (adev->cg_flags & AMD_CG_SUPPORT_DRM_LS) {
1458 if (state == AMD_CG_STATE_UNGATE)
1461 pp_state = PP_STATE_LS;
1463 msg_id = PP_CG_MSG_ID(PP_GROUP_SYS,
1465 PP_STATE_SUPPORT_LS,
1467 if (adev->powerplay.pp_funcs->set_clockgating_by_smu)
1468 amdgpu_dpm_set_clockgating_by_smu(adev, msg_id);
1471 if (adev->cg_flags & AMD_CG_SUPPORT_ROM_MGCG) {
1473 if (state == AMD_CG_STATE_UNGATE)
1476 pp_state = PP_STATE_CG;
1478 msg_id = PP_CG_MSG_ID(PP_GROUP_SYS,
1480 PP_STATE_SUPPORT_CG,
1482 if (adev->powerplay.pp_funcs->set_clockgating_by_smu)
1483 amdgpu_dpm_set_clockgating_by_smu(adev, msg_id);
1488 static int vi_common_set_clockgating_state(void *handle,
1489 enum amd_clockgating_state state)
1491 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1493 if (amdgpu_sriov_vf(adev))
1496 switch (adev->asic_type) {
1498 vi_update_bif_medium_grain_light_sleep(adev,
1499 state == AMD_CG_STATE_GATE);
1500 vi_update_hdp_medium_grain_clock_gating(adev,
1501 state == AMD_CG_STATE_GATE);
1502 vi_update_hdp_light_sleep(adev,
1503 state == AMD_CG_STATE_GATE);
1504 vi_update_rom_medium_grain_clock_gating(adev,
1505 state == AMD_CG_STATE_GATE);
1509 vi_update_bif_medium_grain_light_sleep(adev,
1510 state == AMD_CG_STATE_GATE);
1511 vi_update_hdp_medium_grain_clock_gating(adev,
1512 state == AMD_CG_STATE_GATE);
1513 vi_update_hdp_light_sleep(adev,
1514 state == AMD_CG_STATE_GATE);
1515 vi_update_drm_light_sleep(adev,
1516 state == AMD_CG_STATE_GATE);
1519 case CHIP_POLARIS10:
1520 case CHIP_POLARIS11:
1521 case CHIP_POLARIS12:
1523 vi_common_set_clockgating_state_by_smu(adev, state);
1530 static int vi_common_set_powergating_state(void *handle,
1531 enum amd_powergating_state state)
1536 static void vi_common_get_clockgating_state(void *handle, u32 *flags)
1538 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1541 if (amdgpu_sriov_vf(adev))
1544 /* AMD_CG_SUPPORT_BIF_LS */
1545 data = RREG32_PCIE(ixPCIE_CNTL2);
1546 if (data & PCIE_CNTL2__SLV_MEM_LS_EN_MASK)
1547 *flags |= AMD_CG_SUPPORT_BIF_LS;
1549 /* AMD_CG_SUPPORT_HDP_LS */
1550 data = RREG32(mmHDP_MEM_POWER_LS);
1551 if (data & HDP_MEM_POWER_LS__LS_ENABLE_MASK)
1552 *flags |= AMD_CG_SUPPORT_HDP_LS;
1554 /* AMD_CG_SUPPORT_HDP_MGCG */
1555 data = RREG32(mmHDP_HOST_PATH_CNTL);
1556 if (!(data & HDP_HOST_PATH_CNTL__CLOCK_GATING_DIS_MASK))
1557 *flags |= AMD_CG_SUPPORT_HDP_MGCG;
1559 /* AMD_CG_SUPPORT_ROM_MGCG */
1560 data = RREG32_SMC(ixCGTT_ROM_CLK_CTRL0);
1561 if (!(data & CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE0_MASK))
1562 *flags |= AMD_CG_SUPPORT_ROM_MGCG;
1565 static const struct amd_ip_funcs vi_common_ip_funcs = {
1566 .name = "vi_common",
1567 .early_init = vi_common_early_init,
1568 .late_init = vi_common_late_init,
1569 .sw_init = vi_common_sw_init,
1570 .sw_fini = vi_common_sw_fini,
1571 .hw_init = vi_common_hw_init,
1572 .hw_fini = vi_common_hw_fini,
1573 .suspend = vi_common_suspend,
1574 .resume = vi_common_resume,
1575 .is_idle = vi_common_is_idle,
1576 .wait_for_idle = vi_common_wait_for_idle,
1577 .soft_reset = vi_common_soft_reset,
1578 .set_clockgating_state = vi_common_set_clockgating_state,
1579 .set_powergating_state = vi_common_set_powergating_state,
1580 .get_clockgating_state = vi_common_get_clockgating_state,
1583 static const struct amdgpu_ip_block_version vi_common_ip_block =
1585 .type = AMD_IP_BLOCK_TYPE_COMMON,
1589 .funcs = &vi_common_ip_funcs,
1592 int vi_set_ip_blocks(struct amdgpu_device *adev)
1594 /* in early init stage, vbios code won't work */
1595 vi_detect_hw_virtualization(adev);
1597 if (amdgpu_sriov_vf(adev))
1598 adev->virt.ops = &xgpu_vi_virt_ops;
1600 switch (adev->asic_type) {
1602 /* topaz has no DCE, UVD, VCE */
1603 amdgpu_device_ip_block_add(adev, &vi_common_ip_block);
1604 amdgpu_device_ip_block_add(adev, &gmc_v7_4_ip_block);
1605 amdgpu_device_ip_block_add(adev, &iceland_ih_ip_block);
1606 amdgpu_device_ip_block_add(adev, &pp_smu_ip_block);
1607 if (adev->enable_virtual_display)
1608 amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
1609 amdgpu_device_ip_block_add(adev, &gfx_v8_0_ip_block);
1610 amdgpu_device_ip_block_add(adev, &sdma_v2_4_ip_block);
1613 amdgpu_device_ip_block_add(adev, &vi_common_ip_block);
1614 amdgpu_device_ip_block_add(adev, &gmc_v8_5_ip_block);
1615 amdgpu_device_ip_block_add(adev, &tonga_ih_ip_block);
1616 amdgpu_device_ip_block_add(adev, &pp_smu_ip_block);
1617 if (adev->enable_virtual_display || amdgpu_sriov_vf(adev))
1618 amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
1619 #if defined(CONFIG_DRM_AMD_DC)
1620 else if (amdgpu_device_has_dc_support(adev))
1621 amdgpu_device_ip_block_add(adev, &dm_ip_block);
1624 amdgpu_device_ip_block_add(adev, &dce_v10_1_ip_block);
1625 amdgpu_device_ip_block_add(adev, &gfx_v8_0_ip_block);
1626 amdgpu_device_ip_block_add(adev, &sdma_v3_0_ip_block);
1627 if (!amdgpu_sriov_vf(adev)) {
1628 amdgpu_device_ip_block_add(adev, &uvd_v6_0_ip_block);
1629 amdgpu_device_ip_block_add(adev, &vce_v3_0_ip_block);
1633 amdgpu_device_ip_block_add(adev, &vi_common_ip_block);
1634 amdgpu_device_ip_block_add(adev, &gmc_v8_0_ip_block);
1635 amdgpu_device_ip_block_add(adev, &tonga_ih_ip_block);
1636 amdgpu_device_ip_block_add(adev, &pp_smu_ip_block);
1637 if (adev->enable_virtual_display || amdgpu_sriov_vf(adev))
1638 amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
1639 #if defined(CONFIG_DRM_AMD_DC)
1640 else if (amdgpu_device_has_dc_support(adev))
1641 amdgpu_device_ip_block_add(adev, &dm_ip_block);
1644 amdgpu_device_ip_block_add(adev, &dce_v10_0_ip_block);
1645 amdgpu_device_ip_block_add(adev, &gfx_v8_0_ip_block);
1646 amdgpu_device_ip_block_add(adev, &sdma_v3_0_ip_block);
1647 if (!amdgpu_sriov_vf(adev)) {
1648 amdgpu_device_ip_block_add(adev, &uvd_v5_0_ip_block);
1649 amdgpu_device_ip_block_add(adev, &vce_v3_0_ip_block);
1652 case CHIP_POLARIS10:
1653 case CHIP_POLARIS11:
1654 case CHIP_POLARIS12:
1656 amdgpu_device_ip_block_add(adev, &vi_common_ip_block);
1657 amdgpu_device_ip_block_add(adev, &gmc_v8_1_ip_block);
1658 amdgpu_device_ip_block_add(adev, &tonga_ih_ip_block);
1659 amdgpu_device_ip_block_add(adev, &pp_smu_ip_block);
1660 if (adev->enable_virtual_display)
1661 amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
1662 #if defined(CONFIG_DRM_AMD_DC)
1663 else if (amdgpu_device_has_dc_support(adev))
1664 amdgpu_device_ip_block_add(adev, &dm_ip_block);
1667 amdgpu_device_ip_block_add(adev, &dce_v11_2_ip_block);
1668 amdgpu_device_ip_block_add(adev, &gfx_v8_0_ip_block);
1669 amdgpu_device_ip_block_add(adev, &sdma_v3_1_ip_block);
1670 amdgpu_device_ip_block_add(adev, &uvd_v6_3_ip_block);
1671 amdgpu_device_ip_block_add(adev, &vce_v3_4_ip_block);
1674 amdgpu_device_ip_block_add(adev, &vi_common_ip_block);
1675 amdgpu_device_ip_block_add(adev, &gmc_v8_0_ip_block);
1676 amdgpu_device_ip_block_add(adev, &cz_ih_ip_block);
1677 amdgpu_device_ip_block_add(adev, &pp_smu_ip_block);
1678 if (adev->enable_virtual_display)
1679 amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
1680 #if defined(CONFIG_DRM_AMD_DC)
1681 else if (amdgpu_device_has_dc_support(adev))
1682 amdgpu_device_ip_block_add(adev, &dm_ip_block);
1685 amdgpu_device_ip_block_add(adev, &dce_v11_0_ip_block);
1686 amdgpu_device_ip_block_add(adev, &gfx_v8_0_ip_block);
1687 amdgpu_device_ip_block_add(adev, &sdma_v3_0_ip_block);
1688 amdgpu_device_ip_block_add(adev, &uvd_v6_0_ip_block);
1689 amdgpu_device_ip_block_add(adev, &vce_v3_1_ip_block);
1690 #if defined(CONFIG_DRM_AMD_ACP)
1691 amdgpu_device_ip_block_add(adev, &acp_ip_block);
1695 amdgpu_device_ip_block_add(adev, &vi_common_ip_block);
1696 amdgpu_device_ip_block_add(adev, &gmc_v8_0_ip_block);
1697 amdgpu_device_ip_block_add(adev, &cz_ih_ip_block);
1698 amdgpu_device_ip_block_add(adev, &pp_smu_ip_block);
1699 if (adev->enable_virtual_display)
1700 amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
1701 #if defined(CONFIG_DRM_AMD_DC)
1702 else if (amdgpu_device_has_dc_support(adev))
1703 amdgpu_device_ip_block_add(adev, &dm_ip_block);
1706 amdgpu_device_ip_block_add(adev, &dce_v11_0_ip_block);
1707 amdgpu_device_ip_block_add(adev, &gfx_v8_1_ip_block);
1708 amdgpu_device_ip_block_add(adev, &sdma_v3_0_ip_block);
1709 amdgpu_device_ip_block_add(adev, &uvd_v6_2_ip_block);
1710 amdgpu_device_ip_block_add(adev, &vce_v3_4_ip_block);
1711 #if defined(CONFIG_DRM_AMD_ACP)
1712 amdgpu_device_ip_block_add(adev, &acp_ip_block);
1716 /* FIXME: not supported yet */