3 * Copyright 2014 Advanced Micro Devices, Inc.
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
12 * The above copyright notice and this permission notice shall be included in
13 * all copies or substantial portions of the Software.
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
19 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
21 * OTHER DEALINGS IN THE SOFTWARE.
24 #include <linux/firmware.h>
28 #include "amdgpu_ucode.h"
31 static void gmc_v6_0_set_gart_funcs(struct amdgpu_device *adev);
32 static void gmc_v6_0_set_irq_funcs(struct amdgpu_device *adev);
33 static int gmc_v6_0_wait_for_idle(void *handle);
37 static const u32 crtc_offsets[6] =
39 SI_CRTC0_REGISTER_OFFSET,
40 SI_CRTC1_REGISTER_OFFSET,
41 SI_CRTC2_REGISTER_OFFSET,
42 SI_CRTC3_REGISTER_OFFSET,
43 SI_CRTC4_REGISTER_OFFSET,
44 SI_CRTC5_REGISTER_OFFSET
47 static void gmc_v6_0_mc_stop(struct amdgpu_device *adev,
48 struct amdgpu_mode_mc_save *save)
52 if (adev->mode_info.num_crtc)
53 amdgpu_display_stop_mc_access(adev, save);
55 gmc_v6_0_wait_for_idle((void *)adev);
57 blackout = RREG32(MC_SHARED_BLACKOUT_CNTL);
58 if (REG_GET_FIELD(blackout, mmMC_SHARED_BLACKOUT_CNTL, xxBLACKOUT_MODE) != 1) {
59 /* Block CPU access */
62 blackout = REG_SET_FIELD(blackout,
63 mmMC_SHARED_BLACKOUT_CNTL, xxBLACKOUT_MODE, 0);
64 WREG32(MC_SHARED_BLACKOUT_CNTL, blackout | 1);
66 /* wait for the MC to settle */
71 static void gmc_v6_0_mc_resume(struct amdgpu_device *adev,
72 struct amdgpu_mode_mc_save *save)
76 /* unblackout the MC */
77 tmp = RREG32(MC_SHARED_BLACKOUT_CNTL);
78 tmp = REG_SET_FIELD(tmp, mmMC_SHARED_BLACKOUT_CNTL, xxBLACKOUT_MODE, 0);
79 WREG32(MC_SHARED_BLACKOUT_CNTL, tmp);
80 /* allow CPU access */
81 tmp = REG_SET_FIELD(0, mmBIF_FB_EN, xxFB_READ_EN, 1);
82 tmp = REG_SET_FIELD(tmp, mmBIF_FB_EN, xxFB_WRITE_EN, 1);
83 WREG32(BIF_FB_EN, tmp);
85 if (adev->mode_info.num_crtc)
86 amdgpu_display_resume_mc_access(adev, save);
90 static int gmc_v6_0_init_microcode(struct amdgpu_device *adev)
92 const char *chip_name;
98 switch (adev->asic_type) {
100 chip_name = "tahiti";
103 chip_name = "pitcairn";
112 chip_name = "hainan";
117 snprintf(fw_name, sizeof(fw_name), "/*(DEBLOBBED)*/", chip_name);
118 err = reject_firmware(&adev->mc.fw, fw_name, adev->dev);
122 err = amdgpu_ucode_validate(adev->mc.fw);
127 "si_mc: Failed to load firmware \"%s\"\n",
129 release_firmware(adev->mc.fw);
135 static int gmc_v6_0_mc_load_microcode(struct amdgpu_device *adev)
137 const __le32 *new_fw_data = NULL;
139 const __le32 *new_io_mc_regs = NULL;
140 int i, regs_size, ucode_size;
141 const struct mc_firmware_header_v1_0 *hdr;
146 hdr = (const struct mc_firmware_header_v1_0 *)adev->mc.fw->data;
148 amdgpu_ucode_print_mc_hdr(&hdr->header);
150 adev->mc.fw_version = le32_to_cpu(hdr->header.ucode_version);
151 regs_size = le32_to_cpu(hdr->io_debug_size_bytes) / (4 * 2);
152 new_io_mc_regs = (const __le32 *)
153 (adev->mc.fw->data + le32_to_cpu(hdr->io_debug_array_offset_bytes));
154 ucode_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4;
155 new_fw_data = (const __le32 *)
156 (adev->mc.fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes));
158 running = RREG32(MC_SEQ_SUP_CNTL) & RUN_MASK;
162 /* reset the engine and set to writable */
163 WREG32(MC_SEQ_SUP_CNTL, 0x00000008);
164 WREG32(MC_SEQ_SUP_CNTL, 0x00000010);
166 /* load mc io regs */
167 for (i = 0; i < regs_size; i++) {
168 WREG32(MC_SEQ_IO_DEBUG_INDEX, le32_to_cpup(new_io_mc_regs++));
169 WREG32(MC_SEQ_IO_DEBUG_DATA, le32_to_cpup(new_io_mc_regs++));
171 /* load the MC ucode */
172 for (i = 0; i < ucode_size; i++) {
173 WREG32(MC_SEQ_SUP_PGM, le32_to_cpup(new_fw_data++));
176 /* put the engine back into the active state */
177 WREG32(MC_SEQ_SUP_CNTL, 0x00000008);
178 WREG32(MC_SEQ_SUP_CNTL, 0x00000004);
179 WREG32(MC_SEQ_SUP_CNTL, 0x00000001);
181 /* wait for training to complete */
182 for (i = 0; i < adev->usec_timeout; i++) {
183 if (RREG32(MC_SEQ_TRAIN_WAKEUP_CNTL) & TRAIN_DONE_D0)
187 for (i = 0; i < adev->usec_timeout; i++) {
188 if (RREG32(MC_SEQ_TRAIN_WAKEUP_CNTL) & TRAIN_DONE_D1)
198 static void gmc_v6_0_vram_gtt_location(struct amdgpu_device *adev,
199 struct amdgpu_mc *mc)
201 if (mc->mc_vram_size > 0xFFC0000000ULL) {
202 dev_warn(adev->dev, "limiting VRAM\n");
203 mc->real_vram_size = 0xFFC0000000ULL;
204 mc->mc_vram_size = 0xFFC0000000ULL;
206 amdgpu_vram_location(adev, &adev->mc, 0);
207 adev->mc.gtt_base_align = 0;
208 amdgpu_gtt_location(adev, mc);
211 static void gmc_v6_0_mc_program(struct amdgpu_device *adev)
213 struct amdgpu_mode_mc_save save;
218 for (i = 0, j = 0; i < 32; i++, j += 0x6) {
219 WREG32((0xb05 + j), 0x00000000);
220 WREG32((0xb06 + j), 0x00000000);
221 WREG32((0xb07 + j), 0x00000000);
222 WREG32((0xb08 + j), 0x00000000);
223 WREG32((0xb09 + j), 0x00000000);
225 WREG32(HDP_REG_COHERENCY_FLUSH_CNTL, 0);
227 if (adev->mode_info.num_crtc)
228 amdgpu_display_set_vga_render_state(adev, false);
230 gmc_v6_0_mc_stop(adev, &save);
232 if (gmc_v6_0_wait_for_idle((void *)adev)) {
233 dev_warn(adev->dev, "Wait for MC idle timedout !\n");
236 WREG32(VGA_HDP_CONTROL, VGA_MEMORY_DISABLE);
237 /* Update configuration */
238 WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR,
239 adev->mc.vram_start >> 12);
240 WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR,
241 adev->mc.vram_end >> 12);
242 WREG32(MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR,
243 adev->vram_scratch.gpu_addr >> 12);
244 tmp = ((adev->mc.vram_end >> 24) & 0xFFFF) << 16;
245 tmp |= ((adev->mc.vram_start >> 24) & 0xFFFF);
246 WREG32(MC_VM_FB_LOCATION, tmp);
247 /* XXX double check these! */
248 WREG32(HDP_NONSURFACE_BASE, (adev->mc.vram_start >> 8));
249 WREG32(HDP_NONSURFACE_INFO, (2 << 7) | (1 << 30));
250 WREG32(HDP_NONSURFACE_SIZE, 0x3FFFFFFF);
251 WREG32(MC_VM_AGP_BASE, 0);
252 WREG32(MC_VM_AGP_TOP, 0x0FFFFFFF);
253 WREG32(MC_VM_AGP_BOT, 0x0FFFFFFF);
255 if (gmc_v6_0_wait_for_idle((void *)adev)) {
256 dev_warn(adev->dev, "Wait for MC idle timedout !\n");
258 gmc_v6_0_mc_resume(adev, &save);
261 static int gmc_v6_0_mc_init(struct amdgpu_device *adev)
265 int chansize, numchan;
267 tmp = RREG32(MC_ARB_RAMCFG);
268 if (tmp & CHANSIZE_OVERRIDE) {
270 } else if (tmp & CHANSIZE_MASK) {
275 tmp = RREG32(MC_SHARED_CHMAP);
276 switch ((tmp & NOOFCHAN_MASK) >> NOOFCHAN_SHIFT) {
306 adev->mc.vram_width = numchan * chansize;
307 /* Could aper size report 0 ? */
308 adev->mc.aper_base = pci_resource_start(adev->pdev, 0);
309 adev->mc.aper_size = pci_resource_len(adev->pdev, 0);
310 /* size in MB on si */
311 adev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE) * 1024ULL * 1024ULL;
312 adev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE) * 1024ULL * 1024ULL;
313 adev->mc.visible_vram_size = adev->mc.aper_size;
315 /* unless the user had overridden it, set the gart
316 * size equal to the 1024 or vram, whichever is larger.
318 if (amdgpu_gart_size == -1)
319 adev->mc.gtt_size = amdgpu_ttm_get_gtt_mem_size(adev);
321 adev->mc.gtt_size = (uint64_t)amdgpu_gart_size << 20;
323 gmc_v6_0_vram_gtt_location(adev, &adev->mc);
328 static void gmc_v6_0_gart_flush_gpu_tlb(struct amdgpu_device *adev,
331 WREG32(HDP_MEM_COHERENCY_FLUSH_CNTL, 0);
333 WREG32(VM_INVALIDATE_REQUEST, 1 << vmid);
336 static int gmc_v6_0_gart_set_pte_pde(struct amdgpu_device *adev,
338 uint32_t gpu_page_idx,
342 void __iomem *ptr = (void *)cpu_pt_addr;
345 value = addr & 0xFFFFFFFFFFFFF000ULL;
347 writeq(value, ptr + (gpu_page_idx * 8));
352 static void gmc_v6_0_set_fault_enable_default(struct amdgpu_device *adev,
357 tmp = RREG32(VM_CONTEXT1_CNTL);
358 tmp = REG_SET_FIELD(tmp, mmVM_CONTEXT1_CNTL,
359 xxRANGE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
360 tmp = REG_SET_FIELD(tmp, mmVM_CONTEXT1_CNTL,
361 xxDUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
362 tmp = REG_SET_FIELD(tmp, mmVM_CONTEXT1_CNTL,
363 xxPDE0_PROTECTION_FAULT_ENABLE_DEFAULT, value);
364 tmp = REG_SET_FIELD(tmp, mmVM_CONTEXT1_CNTL,
365 xxVALID_PROTECTION_FAULT_ENABLE_DEFAULT, value);
366 tmp = REG_SET_FIELD(tmp, mmVM_CONTEXT1_CNTL,
367 xxREAD_PROTECTION_FAULT_ENABLE_DEFAULT, value);
368 tmp = REG_SET_FIELD(tmp, mmVM_CONTEXT1_CNTL,
369 xxWRITE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
370 WREG32(VM_CONTEXT1_CNTL, tmp);
373 static int gmc_v6_0_gart_enable(struct amdgpu_device *adev)
377 if (adev->gart.robj == NULL) {
378 dev_err(adev->dev, "No VRAM object for PCIE GART.\n");
381 r = amdgpu_gart_table_vram_pin(adev);
384 /* Setup TLB control */
385 WREG32(MC_VM_MX_L1_TLB_CNTL,
388 ENABLE_L1_FRAGMENT_PROCESSING |
389 SYSTEM_ACCESS_MODE_NOT_IN_SYS |
390 ENABLE_ADVANCED_DRIVER_MODEL |
391 SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU);
393 WREG32(VM_L2_CNTL, ENABLE_L2_CACHE |
394 ENABLE_L2_FRAGMENT_PROCESSING |
395 ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE |
396 ENABLE_L2_PDE0_CACHE_LRU_UPDATE_BY_WRITE |
397 EFFECTIVE_L2_QUEUE_SIZE(7) |
398 CONTEXT1_IDENTITY_ACCESS_MODE(1));
399 WREG32(VM_L2_CNTL2, INVALIDATE_ALL_L1_TLBS | INVALIDATE_L2_CACHE);
400 WREG32(VM_L2_CNTL3, L2_CACHE_BIGK_ASSOCIATIVITY |
402 L2_CACHE_BIGK_FRAGMENT_SIZE(4));
404 WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR, adev->mc.gtt_start >> 12);
405 WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR, adev->mc.gtt_end >> 12);
406 WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR, adev->gart.table_addr >> 12);
407 WREG32(VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR,
408 (u32)(adev->dummy_page.addr >> 12));
409 WREG32(VM_CONTEXT0_CNTL2, 0);
410 WREG32(VM_CONTEXT0_CNTL, (ENABLE_CONTEXT | PAGE_TABLE_DEPTH(0) |
411 RANGE_PROTECTION_FAULT_ENABLE_DEFAULT));
417 /* empty context1-15 */
418 /* set vm size, must be a multiple of 4 */
419 WREG32(VM_CONTEXT1_PAGE_TABLE_START_ADDR, 0);
420 WREG32(VM_CONTEXT1_PAGE_TABLE_END_ADDR, adev->vm_manager.max_pfn - 1);
421 /* Assign the pt base to something valid for now; the pts used for
422 * the VMs are determined by the application and setup and assigned
423 * on the fly in the vm part of radeon_gart.c
425 for (i = 1; i < 16; i++) {
427 WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + i,
428 adev->gart.table_addr >> 12);
430 WREG32(VM_CONTEXT8_PAGE_TABLE_BASE_ADDR + i - 8,
431 adev->gart.table_addr >> 12);
434 /* enable context1-15 */
435 WREG32(VM_CONTEXT1_PROTECTION_FAULT_DEFAULT_ADDR,
436 (u32)(adev->dummy_page.addr >> 12));
437 WREG32(VM_CONTEXT1_CNTL2, 4);
438 WREG32(VM_CONTEXT1_CNTL, ENABLE_CONTEXT | PAGE_TABLE_DEPTH(1) |
439 PAGE_TABLE_BLOCK_SIZE(amdgpu_vm_block_size - 9) |
440 RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT |
441 RANGE_PROTECTION_FAULT_ENABLE_DEFAULT |
442 DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT |
443 DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT |
444 PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT |
445 PDE0_PROTECTION_FAULT_ENABLE_DEFAULT |
446 VALID_PROTECTION_FAULT_ENABLE_INTERRUPT |
447 VALID_PROTECTION_FAULT_ENABLE_DEFAULT |
448 READ_PROTECTION_FAULT_ENABLE_INTERRUPT |
449 READ_PROTECTION_FAULT_ENABLE_DEFAULT |
450 WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT |
451 WRITE_PROTECTION_FAULT_ENABLE_DEFAULT);
453 gmc_v6_0_gart_flush_gpu_tlb(adev, 0);
454 dev_info(adev->dev, "PCIE GART of %uM enabled (table at 0x%016llX).\n",
455 (unsigned)(adev->mc.gtt_size >> 20),
456 (unsigned long long)adev->gart.table_addr);
457 adev->gart.ready = true;
461 static int gmc_v6_0_gart_init(struct amdgpu_device *adev)
465 if (adev->gart.robj) {
466 dev_warn(adev->dev, "gmc_v6_0 PCIE GART already initialized\n");
469 r = amdgpu_gart_init(adev);
472 adev->gart.table_size = adev->gart.num_gpu_pages * 8;
473 return amdgpu_gart_table_vram_alloc(adev);
476 static void gmc_v6_0_gart_disable(struct amdgpu_device *adev)
480 for (i = 1; i < 16; ++i) {
483 reg = VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + i ;
485 reg = VM_CONTEXT8_PAGE_TABLE_BASE_ADDR + (i - 8);
486 adev->vm_manager.saved_table_addr[i] = RREG32(reg);
489 /* Disable all tables */
490 WREG32(VM_CONTEXT0_CNTL, 0);
491 WREG32(VM_CONTEXT1_CNTL, 0);
492 /* Setup TLB control */
493 WREG32(MC_VM_MX_L1_TLB_CNTL, SYSTEM_ACCESS_MODE_NOT_IN_SYS |
494 SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU);
496 WREG32(VM_L2_CNTL, ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE |
497 ENABLE_L2_PDE0_CACHE_LRU_UPDATE_BY_WRITE |
498 EFFECTIVE_L2_QUEUE_SIZE(7) |
499 CONTEXT1_IDENTITY_ACCESS_MODE(1));
500 WREG32(VM_L2_CNTL2, 0);
501 WREG32(VM_L2_CNTL3, L2_CACHE_BIGK_ASSOCIATIVITY |
502 L2_CACHE_BIGK_FRAGMENT_SIZE(0));
503 amdgpu_gart_table_vram_unpin(adev);
506 static void gmc_v6_0_gart_fini(struct amdgpu_device *adev)
508 amdgpu_gart_table_vram_free(adev);
509 amdgpu_gart_fini(adev);
512 static int gmc_v6_0_vm_init(struct amdgpu_device *adev)
516 * VMID 0 is reserved for System
517 * amdgpu graphics/compute will use VMIDs 1-7
518 * amdkfd will use VMIDs 8-15
520 adev->vm_manager.num_ids = AMDGPU_NUM_OF_VMIDS;
521 amdgpu_vm_manager_init(adev);
523 /* base offset of vram pages */
524 if (adev->flags & AMD_IS_APU) {
525 u64 tmp = RREG32(MC_VM_FB_OFFSET);
527 adev->vm_manager.vram_base_offset = tmp;
529 adev->vm_manager.vram_base_offset = 0;
534 static void gmc_v6_0_vm_fini(struct amdgpu_device *adev)
538 static void gmc_v6_0_vm_decode_fault(struct amdgpu_device *adev,
539 u32 status, u32 addr, u32 mc_client)
542 u32 vmid = REG_GET_FIELD(status, mmVM_CONTEXT1_PROTECTION_FAULT_STATUS, xxVMID);
543 u32 protections = REG_GET_FIELD(status, mmVM_CONTEXT1_PROTECTION_FAULT_STATUS,
545 char block[5] = { mc_client >> 24, (mc_client >> 16) & 0xff,
546 (mc_client >> 8) & 0xff, mc_client & 0xff, 0 };
548 mc_id = REG_GET_FIELD(status, mmVM_CONTEXT1_PROTECTION_FAULT_STATUS,
551 dev_err(adev->dev, "VM fault (0x%02x, vmid %d) at page %u, %s from '%s' (0x%08x) (%d)\n",
552 protections, vmid, addr,
553 REG_GET_FIELD(status, mmVM_CONTEXT1_PROTECTION_FAULT_STATUS,
554 xxMEMORY_CLIENT_RW) ?
555 "write" : "read", block, mc_client, mc_id);
559 static const u32 mc_cg_registers[] = {
571 static const u32 mc_cg_ls_en[] = {
572 MC_HUB_MISC_HUB_CG__MEM_LS_ENABLE_MASK,
573 MC_HUB_MISC_SIP_CG__MEM_LS_ENABLE_MASK,
574 MC_HUB_MISC_VM_CG__MEM_LS_ENABLE_MASK,
575 MC_XPB_CLK_GAT__MEM_LS_ENABLE_MASK,
576 ATC_MISC_CG__MEM_LS_ENABLE_MASK,
577 MC_CITF_MISC_WR_CG__MEM_LS_ENABLE_MASK,
578 MC_CITF_MISC_RD_CG__MEM_LS_ENABLE_MASK,
579 MC_CITF_MISC_VM_CG__MEM_LS_ENABLE_MASK,
580 VM_L2_CG__MEM_LS_ENABLE_MASK,
583 static const u32 mc_cg_en[] = {
584 MC_HUB_MISC_HUB_CG__ENABLE_MASK,
585 MC_HUB_MISC_SIP_CG__ENABLE_MASK,
586 MC_HUB_MISC_VM_CG__ENABLE_MASK,
587 MC_XPB_CLK_GAT__ENABLE_MASK,
588 ATC_MISC_CG__ENABLE_MASK,
589 MC_CITF_MISC_WR_CG__ENABLE_MASK,
590 MC_CITF_MISC_RD_CG__ENABLE_MASK,
591 MC_CITF_MISC_VM_CG__ENABLE_MASK,
592 VM_L2_CG__ENABLE_MASK,
595 static void gmc_v6_0_enable_mc_ls(struct amdgpu_device *adev,
601 for (i = 0; i < ARRAY_SIZE(mc_cg_registers); i++) {
602 orig = data = RREG32(mc_cg_registers[i]);
603 if (enable && (adev->cg_flags & AMDGPU_CG_SUPPORT_MC_LS))
604 data |= mc_cg_ls_en[i];
606 data &= ~mc_cg_ls_en[i];
608 WREG32(mc_cg_registers[i], data);
612 static void gmc_v6_0_enable_mc_mgcg(struct amdgpu_device *adev,
618 for (i = 0; i < ARRAY_SIZE(mc_cg_registers); i++) {
619 orig = data = RREG32(mc_cg_registers[i]);
620 if (enable && (adev->cg_flags & AMDGPU_CG_SUPPORT_MC_MGCG))
623 data &= ~mc_cg_en[i];
625 WREG32(mc_cg_registers[i], data);
629 static void gmc_v6_0_enable_bif_mgls(struct amdgpu_device *adev,
634 orig = data = RREG32_PCIE(ixPCIE_CNTL2);
636 if (enable && (adev->cg_flags & AMDGPU_CG_SUPPORT_BIF_LS)) {
637 data = REG_SET_FIELD(data, PCIE_CNTL2, SLV_MEM_LS_EN, 1);
638 data = REG_SET_FIELD(data, PCIE_CNTL2, MST_MEM_LS_EN, 1);
639 data = REG_SET_FIELD(data, PCIE_CNTL2, REPLAY_MEM_LS_EN, 1);
640 data = REG_SET_FIELD(data, PCIE_CNTL2, SLV_MEM_AGGRESSIVE_LS_EN, 1);
642 data = REG_SET_FIELD(data, PCIE_CNTL2, SLV_MEM_LS_EN, 0);
643 data = REG_SET_FIELD(data, PCIE_CNTL2, MST_MEM_LS_EN, 0);
644 data = REG_SET_FIELD(data, PCIE_CNTL2, REPLAY_MEM_LS_EN, 0);
645 data = REG_SET_FIELD(data, PCIE_CNTL2, SLV_MEM_AGGRESSIVE_LS_EN, 0);
649 WREG32_PCIE(ixPCIE_CNTL2, data);
652 static void gmc_v6_0_enable_hdp_mgcg(struct amdgpu_device *adev,
657 orig = data = RREG32(HDP_HOST_PATH_CNTL);
659 if (enable && (adev->cg_flags & AMDGPU_CG_SUPPORT_HDP_MGCG))
660 data = REG_SET_FIELD(data, HDP_HOST_PATH_CNTL, CLOCK_GATING_DIS, 0);
662 data = REG_SET_FIELD(data, HDP_HOST_PATH_CNTL, CLOCK_GATING_DIS, 1);
665 WREG32(HDP_HOST_PATH_CNTL, data);
668 static void gmc_v6_0_enable_hdp_ls(struct amdgpu_device *adev,
673 orig = data = RREG32(HDP_MEM_POWER_LS);
675 if (enable && (adev->cg_flags & AMDGPU_CG_SUPPORT_HDP_LS))
676 data = REG_SET_FIELD(data, HDP_MEM_POWER_LS, LS_ENABLE, 1);
678 data = REG_SET_FIELD(data, HDP_MEM_POWER_LS, LS_ENABLE, 0);
681 WREG32(HDP_MEM_POWER_LS, data);
685 static int gmc_v6_0_convert_vram_type(int mc_seq_vram_type)
687 switch (mc_seq_vram_type) {
688 case MC_SEQ_MISC0__MT__GDDR1:
689 return AMDGPU_VRAM_TYPE_GDDR1;
690 case MC_SEQ_MISC0__MT__DDR2:
691 return AMDGPU_VRAM_TYPE_DDR2;
692 case MC_SEQ_MISC0__MT__GDDR3:
693 return AMDGPU_VRAM_TYPE_GDDR3;
694 case MC_SEQ_MISC0__MT__GDDR4:
695 return AMDGPU_VRAM_TYPE_GDDR4;
696 case MC_SEQ_MISC0__MT__GDDR5:
697 return AMDGPU_VRAM_TYPE_GDDR5;
698 case MC_SEQ_MISC0__MT__DDR3:
699 return AMDGPU_VRAM_TYPE_DDR3;
701 return AMDGPU_VRAM_TYPE_UNKNOWN;
705 static int gmc_v6_0_early_init(void *handle)
707 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
709 gmc_v6_0_set_gart_funcs(adev);
710 gmc_v6_0_set_irq_funcs(adev);
712 if (adev->flags & AMD_IS_APU) {
713 adev->mc.vram_type = AMDGPU_VRAM_TYPE_UNKNOWN;
715 u32 tmp = RREG32(MC_SEQ_MISC0);
716 tmp &= MC_SEQ_MISC0__MT__MASK;
717 adev->mc.vram_type = gmc_v6_0_convert_vram_type(tmp);
723 static int gmc_v6_0_late_init(void *handle)
725 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
727 return amdgpu_irq_get(adev, &adev->mc.vm_fault, 0);
730 static int gmc_v6_0_sw_init(void *handle)
734 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
736 r = amdgpu_irq_add_id(adev, 146, &adev->mc.vm_fault);
740 r = amdgpu_irq_add_id(adev, 147, &adev->mc.vm_fault);
744 adev->vm_manager.max_pfn = amdgpu_vm_size << 18;
746 adev->mc.mc_mask = 0xffffffffffULL;
748 adev->need_dma32 = false;
749 dma_bits = adev->need_dma32 ? 32 : 40;
750 r = pci_set_dma_mask(adev->pdev, DMA_BIT_MASK(dma_bits));
752 adev->need_dma32 = true;
754 dev_warn(adev->dev, "amdgpu: No suitable DMA available.\n");
756 r = pci_set_consistent_dma_mask(adev->pdev, DMA_BIT_MASK(dma_bits));
758 pci_set_consistent_dma_mask(adev->pdev, DMA_BIT_MASK(32));
759 dev_warn(adev->dev, "amdgpu: No coherent DMA available.\n");
762 r = gmc_v6_0_init_microcode(adev);
764 dev_err(adev->dev, "Failed to load mc firmware!\n");
768 r = amdgpu_ttm_global_init(adev);
773 r = gmc_v6_0_mc_init(adev);
777 r = amdgpu_bo_init(adev);
781 r = gmc_v6_0_gart_init(adev);
785 if (!adev->vm_manager.enabled) {
786 r = gmc_v6_0_vm_init(adev);
788 dev_err(adev->dev, "vm manager initialization failed (%d).\n", r);
791 adev->vm_manager.enabled = true;
797 static int gmc_v6_0_sw_fini(void *handle)
799 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
801 if (adev->vm_manager.enabled) {
802 gmc_v6_0_vm_fini(adev);
803 adev->vm_manager.enabled = false;
805 gmc_v6_0_gart_fini(adev);
806 amdgpu_gem_force_release(adev);
807 amdgpu_bo_fini(adev);
812 static int gmc_v6_0_hw_init(void *handle)
815 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
817 gmc_v6_0_mc_program(adev);
819 if (!(adev->flags & AMD_IS_APU)) {
820 r = gmc_v6_0_mc_load_microcode(adev);
822 dev_err(adev->dev, "Failed to load MC firmware!\n");
827 r = gmc_v6_0_gart_enable(adev);
834 static int gmc_v6_0_hw_fini(void *handle)
836 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
838 amdgpu_irq_put(adev, &adev->mc.vm_fault, 0);
839 gmc_v6_0_gart_disable(adev);
844 static int gmc_v6_0_suspend(void *handle)
846 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
848 if (adev->vm_manager.enabled) {
849 gmc_v6_0_vm_fini(adev);
850 adev->vm_manager.enabled = false;
852 gmc_v6_0_hw_fini(adev);
857 static int gmc_v6_0_resume(void *handle)
860 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
862 r = gmc_v6_0_hw_init(adev);
866 if (!adev->vm_manager.enabled) {
867 r = gmc_v6_0_vm_init(adev);
869 dev_err(adev->dev, "vm manager initialization failed (%d).\n", r);
872 adev->vm_manager.enabled = true;
878 static bool gmc_v6_0_is_idle(void *handle)
880 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
881 u32 tmp = RREG32(SRBM_STATUS);
883 if (tmp & (SRBM_STATUS__MCB_BUSY_MASK | SRBM_STATUS__MCB_NON_DISPLAY_BUSY_MASK |
884 SRBM_STATUS__MCC_BUSY_MASK | SRBM_STATUS__MCD_BUSY_MASK | SRBM_STATUS__VMC_BUSY_MASK))
890 static int gmc_v6_0_wait_for_idle(void *handle)
894 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
896 for (i = 0; i < adev->usec_timeout; i++) {
897 tmp = RREG32(SRBM_STATUS) & (SRBM_STATUS__MCB_BUSY_MASK |
898 SRBM_STATUS__MCB_NON_DISPLAY_BUSY_MASK |
899 SRBM_STATUS__MCC_BUSY_MASK |
900 SRBM_STATUS__MCD_BUSY_MASK |
901 SRBM_STATUS__VMC_BUSY_MASK);
910 static int gmc_v6_0_soft_reset(void *handle)
912 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
913 struct amdgpu_mode_mc_save save;
914 u32 srbm_soft_reset = 0;
915 u32 tmp = RREG32(SRBM_STATUS);
917 if (tmp & SRBM_STATUS__VMC_BUSY_MASK)
918 srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset,
919 mmSRBM_SOFT_RESET, xxSOFT_RESET_VMC, 1);
921 if (tmp & (SRBM_STATUS__MCB_BUSY_MASK | SRBM_STATUS__MCB_NON_DISPLAY_BUSY_MASK |
922 SRBM_STATUS__MCC_BUSY_MASK | SRBM_STATUS__MCD_BUSY_MASK)) {
923 if (!(adev->flags & AMD_IS_APU))
924 srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset,
925 mmSRBM_SOFT_RESET, xxSOFT_RESET_MC, 1);
928 if (srbm_soft_reset) {
929 gmc_v6_0_mc_stop(adev, &save);
930 if (gmc_v6_0_wait_for_idle(adev)) {
931 dev_warn(adev->dev, "Wait for GMC idle timed out !\n");
935 tmp = RREG32(SRBM_SOFT_RESET);
936 tmp |= srbm_soft_reset;
937 dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
938 WREG32(SRBM_SOFT_RESET, tmp);
939 tmp = RREG32(SRBM_SOFT_RESET);
943 tmp &= ~srbm_soft_reset;
944 WREG32(SRBM_SOFT_RESET, tmp);
945 tmp = RREG32(SRBM_SOFT_RESET);
949 gmc_v6_0_mc_resume(adev, &save);
956 static int gmc_v6_0_vm_fault_interrupt_state(struct amdgpu_device *adev,
957 struct amdgpu_irq_src *src,
959 enum amdgpu_interrupt_state state)
962 u32 bits = (VM_CONTEXT1_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
963 VM_CONTEXT1_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
964 VM_CONTEXT1_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
965 VM_CONTEXT1_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
966 VM_CONTEXT1_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
967 VM_CONTEXT1_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK);
970 case AMDGPU_IRQ_STATE_DISABLE:
971 tmp = RREG32(VM_CONTEXT0_CNTL);
973 WREG32(VM_CONTEXT0_CNTL, tmp);
974 tmp = RREG32(VM_CONTEXT1_CNTL);
976 WREG32(VM_CONTEXT1_CNTL, tmp);
978 case AMDGPU_IRQ_STATE_ENABLE:
979 tmp = RREG32(VM_CONTEXT0_CNTL);
981 WREG32(VM_CONTEXT0_CNTL, tmp);
982 tmp = RREG32(VM_CONTEXT1_CNTL);
984 WREG32(VM_CONTEXT1_CNTL, tmp);
993 static int gmc_v6_0_process_interrupt(struct amdgpu_device *adev,
994 struct amdgpu_irq_src *source,
995 struct amdgpu_iv_entry *entry)
999 addr = RREG32(VM_CONTEXT1_PROTECTION_FAULT_ADDR);
1000 status = RREG32(VM_CONTEXT1_PROTECTION_FAULT_STATUS);
1001 WREG32_P(VM_CONTEXT1_CNTL2, 1, ~1);
1003 if (!addr && !status)
1006 if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_FIRST)
1007 gmc_v6_0_set_fault_enable_default(adev, false);
1009 dev_err(adev->dev, "GPU fault detected: %d 0x%08x\n",
1010 entry->src_id, entry->src_data);
1011 dev_err(adev->dev, " VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x%08X\n",
1013 dev_err(adev->dev, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n",
1015 gmc_v6_0_vm_decode_fault(adev, status, addr, 0);
1020 static int gmc_v6_0_set_clockgating_state(void *handle,
1021 enum amd_clockgating_state state)
1026 static int gmc_v6_0_set_powergating_state(void *handle,
1027 enum amd_powergating_state state)
1032 const struct amd_ip_funcs gmc_v6_0_ip_funcs = {
1034 .early_init = gmc_v6_0_early_init,
1035 .late_init = gmc_v6_0_late_init,
1036 .sw_init = gmc_v6_0_sw_init,
1037 .sw_fini = gmc_v6_0_sw_fini,
1038 .hw_init = gmc_v6_0_hw_init,
1039 .hw_fini = gmc_v6_0_hw_fini,
1040 .suspend = gmc_v6_0_suspend,
1041 .resume = gmc_v6_0_resume,
1042 .is_idle = gmc_v6_0_is_idle,
1043 .wait_for_idle = gmc_v6_0_wait_for_idle,
1044 .soft_reset = gmc_v6_0_soft_reset,
1045 .set_clockgating_state = gmc_v6_0_set_clockgating_state,
1046 .set_powergating_state = gmc_v6_0_set_powergating_state,
1049 static const struct amdgpu_gart_funcs gmc_v6_0_gart_funcs = {
1050 .flush_gpu_tlb = gmc_v6_0_gart_flush_gpu_tlb,
1051 .set_pte_pde = gmc_v6_0_gart_set_pte_pde,
1054 static const struct amdgpu_irq_src_funcs gmc_v6_0_irq_funcs = {
1055 .set = gmc_v6_0_vm_fault_interrupt_state,
1056 .process = gmc_v6_0_process_interrupt,
1059 static void gmc_v6_0_set_gart_funcs(struct amdgpu_device *adev)
1061 if (adev->gart.gart_funcs == NULL)
1062 adev->gart.gart_funcs = &gmc_v6_0_gart_funcs;
1065 static void gmc_v6_0_set_irq_funcs(struct amdgpu_device *adev)
1067 adev->mc.vm_fault.num_types = 1;
1068 adev->mc.vm_fault.funcs = &gmc_v6_0_irq_funcs;