2 * Copyright 2014 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
23 #include <linux/firmware.h>
29 #include "amdgpu_ucode.h"
31 #include "bif/bif_4_1_d.h"
32 #include "bif/bif_4_1_sh_mask.h"
34 #include "gmc/gmc_7_1_d.h"
35 #include "gmc/gmc_7_1_sh_mask.h"
37 #include "oss/oss_2_0_d.h"
38 #include "oss/oss_2_0_sh_mask.h"
40 static void gmc_v7_0_set_gart_funcs(struct amdgpu_device *adev);
41 static void gmc_v7_0_set_irq_funcs(struct amdgpu_device *adev);
42 static int gmc_v7_0_wait_for_idle(void *handle);
46 static const u32 golden_settings_iceland_a11[] =
48 mmVM_PRT_APERTURE0_LOW_ADDR, 0x0fffffff, 0x0fffffff,
49 mmVM_PRT_APERTURE1_LOW_ADDR, 0x0fffffff, 0x0fffffff,
50 mmVM_PRT_APERTURE2_LOW_ADDR, 0x0fffffff, 0x0fffffff,
51 mmVM_PRT_APERTURE3_LOW_ADDR, 0x0fffffff, 0x0fffffff
54 static const u32 iceland_mgcg_cgcg_init[] =
56 mmMC_MEM_POWER_LS, 0xffffffff, 0x00000104
59 static void gmc_v7_0_init_golden_registers(struct amdgpu_device *adev)
61 switch (adev->asic_type) {
63 amdgpu_program_register_sequence(adev,
64 iceland_mgcg_cgcg_init,
65 (const u32)ARRAY_SIZE(iceland_mgcg_cgcg_init));
66 amdgpu_program_register_sequence(adev,
67 golden_settings_iceland_a11,
68 (const u32)ARRAY_SIZE(golden_settings_iceland_a11));
75 static void gmc_v7_0_mc_stop(struct amdgpu_device *adev,
76 struct amdgpu_mode_mc_save *save)
80 if (adev->mode_info.num_crtc)
81 amdgpu_display_stop_mc_access(adev, save);
83 gmc_v7_0_wait_for_idle((void *)adev);
85 blackout = RREG32(mmMC_SHARED_BLACKOUT_CNTL);
86 if (REG_GET_FIELD(blackout, MC_SHARED_BLACKOUT_CNTL, BLACKOUT_MODE) != 1) {
87 /* Block CPU access */
88 WREG32(mmBIF_FB_EN, 0);
90 blackout = REG_SET_FIELD(blackout,
91 MC_SHARED_BLACKOUT_CNTL, BLACKOUT_MODE, 0);
92 WREG32(mmMC_SHARED_BLACKOUT_CNTL, blackout | 1);
94 /* wait for the MC to settle */
98 static void gmc_v7_0_mc_resume(struct amdgpu_device *adev,
99 struct amdgpu_mode_mc_save *save)
103 /* unblackout the MC */
104 tmp = RREG32(mmMC_SHARED_BLACKOUT_CNTL);
105 tmp = REG_SET_FIELD(tmp, MC_SHARED_BLACKOUT_CNTL, BLACKOUT_MODE, 0);
106 WREG32(mmMC_SHARED_BLACKOUT_CNTL, tmp);
107 /* allow CPU access */
108 tmp = REG_SET_FIELD(0, BIF_FB_EN, FB_READ_EN, 1);
109 tmp = REG_SET_FIELD(tmp, BIF_FB_EN, FB_WRITE_EN, 1);
110 WREG32(mmBIF_FB_EN, tmp);
112 if (adev->mode_info.num_crtc)
113 amdgpu_display_resume_mc_access(adev, save);
117 * gmc_v7_0_init_microcode - load ucode images from disk
119 * @adev: amdgpu_device pointer
121 * Use the firmware interface to load the ucode images into
122 * the driver (not loaded into hw).
123 * Returns 0 on success, error on failure.
125 static int gmc_v7_0_init_microcode(struct amdgpu_device *adev)
127 const char *chip_name;
133 switch (adev->asic_type) {
135 chip_name = "bonaire";
138 chip_name = "hawaii";
150 if (adev->asic_type == CHIP_TOPAZ)
151 snprintf(fw_name, sizeof(fw_name), "/*(DEBLOBBED)*/", chip_name);
153 snprintf(fw_name, sizeof(fw_name), "/*(DEBLOBBED)*/", chip_name);
155 err = reject_firmware(&adev->mc.fw, fw_name, adev->dev);
158 err = amdgpu_ucode_validate(adev->mc.fw);
163 "cik_mc: Failed to load firmware \"%s\"\n",
165 release_firmware(adev->mc.fw);
172 * gmc_v7_0_mc_load_microcode - load MC ucode into the hw
174 * @adev: amdgpu_device pointer
176 * Load the GDDR MC ucode into the hw (CIK).
177 * Returns 0 on success, error on failure.
179 static int gmc_v7_0_mc_load_microcode(struct amdgpu_device *adev)
181 const struct mc_firmware_header_v1_0 *hdr;
182 const __le32 *fw_data = NULL;
183 const __le32 *io_mc_regs = NULL;
185 int i, ucode_size, regs_size;
190 hdr = (const struct mc_firmware_header_v1_0 *)adev->mc.fw->data;
191 amdgpu_ucode_print_mc_hdr(&hdr->header);
193 adev->mc.fw_version = le32_to_cpu(hdr->header.ucode_version);
194 regs_size = le32_to_cpu(hdr->io_debug_size_bytes) / (4 * 2);
195 io_mc_regs = (const __le32 *)
196 (adev->mc.fw->data + le32_to_cpu(hdr->io_debug_array_offset_bytes));
197 ucode_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4;
198 fw_data = (const __le32 *)
199 (adev->mc.fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes));
201 running = REG_GET_FIELD(RREG32(mmMC_SEQ_SUP_CNTL), MC_SEQ_SUP_CNTL, RUN);
204 /* reset the engine and set to writable */
205 WREG32(mmMC_SEQ_SUP_CNTL, 0x00000008);
206 WREG32(mmMC_SEQ_SUP_CNTL, 0x00000010);
208 /* load mc io regs */
209 for (i = 0; i < regs_size; i++) {
210 WREG32(mmMC_SEQ_IO_DEBUG_INDEX, le32_to_cpup(io_mc_regs++));
211 WREG32(mmMC_SEQ_IO_DEBUG_DATA, le32_to_cpup(io_mc_regs++));
213 /* load the MC ucode */
214 for (i = 0; i < ucode_size; i++)
215 WREG32(mmMC_SEQ_SUP_PGM, le32_to_cpup(fw_data++));
217 /* put the engine back into the active state */
218 WREG32(mmMC_SEQ_SUP_CNTL, 0x00000008);
219 WREG32(mmMC_SEQ_SUP_CNTL, 0x00000004);
220 WREG32(mmMC_SEQ_SUP_CNTL, 0x00000001);
222 /* wait for training to complete */
223 for (i = 0; i < adev->usec_timeout; i++) {
224 if (REG_GET_FIELD(RREG32(mmMC_SEQ_TRAIN_WAKEUP_CNTL),
225 MC_SEQ_TRAIN_WAKEUP_CNTL, TRAIN_DONE_D0))
229 for (i = 0; i < adev->usec_timeout; i++) {
230 if (REG_GET_FIELD(RREG32(mmMC_SEQ_TRAIN_WAKEUP_CNTL),
231 MC_SEQ_TRAIN_WAKEUP_CNTL, TRAIN_DONE_D1))
240 static void gmc_v7_0_vram_gtt_location(struct amdgpu_device *adev,
241 struct amdgpu_mc *mc)
243 if (mc->mc_vram_size > 0xFFC0000000ULL) {
244 /* leave room for at least 1024M GTT */
245 dev_warn(adev->dev, "limiting VRAM\n");
246 mc->real_vram_size = 0xFFC0000000ULL;
247 mc->mc_vram_size = 0xFFC0000000ULL;
249 amdgpu_vram_location(adev, &adev->mc, 0);
250 adev->mc.gtt_base_align = 0;
251 amdgpu_gtt_location(adev, mc);
255 * gmc_v7_0_mc_program - program the GPU memory controller
257 * @adev: amdgpu_device pointer
259 * Set the location of vram, gart, and AGP in the GPU's
260 * physical address space (CIK).
262 static void gmc_v7_0_mc_program(struct amdgpu_device *adev)
264 struct amdgpu_mode_mc_save save;
269 for (i = 0, j = 0; i < 32; i++, j += 0x6) {
270 WREG32((0xb05 + j), 0x00000000);
271 WREG32((0xb06 + j), 0x00000000);
272 WREG32((0xb07 + j), 0x00000000);
273 WREG32((0xb08 + j), 0x00000000);
274 WREG32((0xb09 + j), 0x00000000);
276 WREG32(mmHDP_REG_COHERENCY_FLUSH_CNTL, 0);
278 if (adev->mode_info.num_crtc)
279 amdgpu_display_set_vga_render_state(adev, false);
281 gmc_v7_0_mc_stop(adev, &save);
282 if (gmc_v7_0_wait_for_idle((void *)adev)) {
283 dev_warn(adev->dev, "Wait for MC idle timedout !\n");
285 /* Update configuration */
286 WREG32(mmMC_VM_SYSTEM_APERTURE_LOW_ADDR,
287 adev->mc.vram_start >> 12);
288 WREG32(mmMC_VM_SYSTEM_APERTURE_HIGH_ADDR,
289 adev->mc.vram_end >> 12);
290 WREG32(mmMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR,
291 adev->vram_scratch.gpu_addr >> 12);
292 tmp = ((adev->mc.vram_end >> 24) & 0xFFFF) << 16;
293 tmp |= ((adev->mc.vram_start >> 24) & 0xFFFF);
294 WREG32(mmMC_VM_FB_LOCATION, tmp);
295 /* XXX double check these! */
296 WREG32(mmHDP_NONSURFACE_BASE, (adev->mc.vram_start >> 8));
297 WREG32(mmHDP_NONSURFACE_INFO, (2 << 7) | (1 << 30));
298 WREG32(mmHDP_NONSURFACE_SIZE, 0x3FFFFFFF);
299 WREG32(mmMC_VM_AGP_BASE, 0);
300 WREG32(mmMC_VM_AGP_TOP, 0x0FFFFFFF);
301 WREG32(mmMC_VM_AGP_BOT, 0x0FFFFFFF);
302 if (gmc_v7_0_wait_for_idle((void *)adev)) {
303 dev_warn(adev->dev, "Wait for MC idle timedout !\n");
305 gmc_v7_0_mc_resume(adev, &save);
307 WREG32(mmBIF_FB_EN, BIF_FB_EN__FB_READ_EN_MASK | BIF_FB_EN__FB_WRITE_EN_MASK);
309 tmp = RREG32(mmHDP_MISC_CNTL);
310 tmp = REG_SET_FIELD(tmp, HDP_MISC_CNTL, FLUSH_INVALIDATE_CACHE, 0);
311 WREG32(mmHDP_MISC_CNTL, tmp);
313 tmp = RREG32(mmHDP_HOST_PATH_CNTL);
314 WREG32(mmHDP_HOST_PATH_CNTL, tmp);
318 * gmc_v7_0_mc_init - initialize the memory controller driver params
320 * @adev: amdgpu_device pointer
322 * Look up the amount of vram, vram width, and decide how to place
323 * vram and gart within the GPU's physical address space (CIK).
324 * Returns 0 for success.
326 static int gmc_v7_0_mc_init(struct amdgpu_device *adev)
329 int chansize, numchan;
331 /* Get VRAM informations */
332 tmp = RREG32(mmMC_ARB_RAMCFG);
333 if (REG_GET_FIELD(tmp, MC_ARB_RAMCFG, CHANSIZE)) {
338 tmp = RREG32(mmMC_SHARED_CHMAP);
339 switch (REG_GET_FIELD(tmp, MC_SHARED_CHMAP, NOOFCHAN)) {
369 adev->mc.vram_width = numchan * chansize;
370 /* Could aper size report 0 ? */
371 adev->mc.aper_base = pci_resource_start(adev->pdev, 0);
372 adev->mc.aper_size = pci_resource_len(adev->pdev, 0);
373 /* size in MB on si */
374 adev->mc.mc_vram_size = RREG32(mmCONFIG_MEMSIZE) * 1024ULL * 1024ULL;
375 adev->mc.real_vram_size = RREG32(mmCONFIG_MEMSIZE) * 1024ULL * 1024ULL;
376 adev->mc.visible_vram_size = adev->mc.aper_size;
378 /* In case the PCI BAR is larger than the actual amount of vram */
379 if (adev->mc.visible_vram_size > adev->mc.real_vram_size)
380 adev->mc.visible_vram_size = adev->mc.real_vram_size;
382 /* unless the user had overridden it, set the gart
383 * size equal to the 1024 or vram, whichever is larger.
385 if (amdgpu_gart_size == -1)
386 adev->mc.gtt_size = amdgpu_ttm_get_gtt_mem_size(adev);
388 adev->mc.gtt_size = (uint64_t)amdgpu_gart_size << 20;
390 gmc_v7_0_vram_gtt_location(adev, &adev->mc);
397 * VMID 0 is the physical GPU addresses as used by the kernel.
398 * VMIDs 1-15 are used for userspace clients and are handled
399 * by the amdgpu vm/hsa code.
403 * gmc_v7_0_gart_flush_gpu_tlb - gart tlb flush callback
405 * @adev: amdgpu_device pointer
406 * @vmid: vm instance to flush
408 * Flush the TLB for the requested page table (CIK).
410 static void gmc_v7_0_gart_flush_gpu_tlb(struct amdgpu_device *adev,
413 /* flush hdp cache */
414 WREG32(mmHDP_MEM_COHERENCY_FLUSH_CNTL, 0);
416 /* bits 0-15 are the VM contexts0-15 */
417 WREG32(mmVM_INVALIDATE_REQUEST, 1 << vmid);
421 * gmc_v7_0_gart_set_pte_pde - update the page tables using MMIO
423 * @adev: amdgpu_device pointer
424 * @cpu_pt_addr: cpu address of the page table
425 * @gpu_page_idx: entry in the page table to update
426 * @addr: dst addr to write into pte/pde
427 * @flags: access flags
429 * Update the page tables using the CPU.
431 static int gmc_v7_0_gart_set_pte_pde(struct amdgpu_device *adev,
433 uint32_t gpu_page_idx,
437 void __iomem *ptr = (void *)cpu_pt_addr;
440 value = addr & 0xFFFFFFFFFFFFF000ULL;
442 writeq(value, ptr + (gpu_page_idx * 8));
448 * gmc_v8_0_set_fault_enable_default - update VM fault handling
450 * @adev: amdgpu_device pointer
451 * @value: true redirects VM faults to the default page
453 static void gmc_v7_0_set_fault_enable_default(struct amdgpu_device *adev,
458 tmp = RREG32(mmVM_CONTEXT1_CNTL);
459 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
460 RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
461 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
462 DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
463 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
464 PDE0_PROTECTION_FAULT_ENABLE_DEFAULT, value);
465 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
466 VALID_PROTECTION_FAULT_ENABLE_DEFAULT, value);
467 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
468 READ_PROTECTION_FAULT_ENABLE_DEFAULT, value);
469 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
470 WRITE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
471 WREG32(mmVM_CONTEXT1_CNTL, tmp);
475 * gmc_v7_0_gart_enable - gart enable
477 * @adev: amdgpu_device pointer
479 * This sets up the TLBs, programs the page tables for VMID0,
480 * sets up the hw for VMIDs 1-15 which are allocated on
481 * demand, and sets up the global locations for the LDS, GDS,
482 * and GPUVM for FSA64 clients (CIK).
483 * Returns 0 for success, errors for failure.
485 static int gmc_v7_0_gart_enable(struct amdgpu_device *adev)
490 if (adev->gart.robj == NULL) {
491 dev_err(adev->dev, "No VRAM object for PCIE GART.\n");
494 r = amdgpu_gart_table_vram_pin(adev);
497 /* Setup TLB control */
498 tmp = RREG32(mmMC_VM_MX_L1_TLB_CNTL);
499 tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_TLB, 1);
500 tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_FRAGMENT_PROCESSING, 1);
501 tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, SYSTEM_ACCESS_MODE, 3);
502 tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_ADVANCED_DRIVER_MODEL, 1);
503 tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, SYSTEM_APERTURE_UNMAPPED_ACCESS, 0);
504 WREG32(mmMC_VM_MX_L1_TLB_CNTL, tmp);
506 tmp = RREG32(mmVM_L2_CNTL);
507 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_CACHE, 1);
508 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_FRAGMENT_PROCESSING, 1);
509 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE, 1);
510 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_PDE0_CACHE_LRU_UPDATE_BY_WRITE, 1);
511 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, EFFECTIVE_L2_QUEUE_SIZE, 7);
512 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, CONTEXT1_IDENTITY_ACCESS_MODE, 1);
513 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_DEFAULT_PAGE_OUT_TO_SYSTEM_MEMORY, 1);
514 WREG32(mmVM_L2_CNTL, tmp);
515 tmp = REG_SET_FIELD(0, VM_L2_CNTL2, INVALIDATE_ALL_L1_TLBS, 1);
516 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL2, INVALIDATE_L2_CACHE, 1);
517 WREG32(mmVM_L2_CNTL2, tmp);
518 tmp = RREG32(mmVM_L2_CNTL3);
519 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, L2_CACHE_BIGK_ASSOCIATIVITY, 1);
520 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, BANK_SELECT, 4);
521 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, L2_CACHE_BIGK_FRAGMENT_SIZE, 4);
522 WREG32(mmVM_L2_CNTL3, tmp);
524 WREG32(mmVM_CONTEXT0_PAGE_TABLE_START_ADDR, adev->mc.gtt_start >> 12);
525 WREG32(mmVM_CONTEXT0_PAGE_TABLE_END_ADDR, adev->mc.gtt_end >> 12);
526 WREG32(mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR, adev->gart.table_addr >> 12);
527 WREG32(mmVM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR,
528 (u32)(adev->dummy_page.addr >> 12));
529 WREG32(mmVM_CONTEXT0_CNTL2, 0);
530 tmp = RREG32(mmVM_CONTEXT0_CNTL);
531 tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL, ENABLE_CONTEXT, 1);
532 tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL, PAGE_TABLE_DEPTH, 0);
533 tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL, RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
534 WREG32(mmVM_CONTEXT0_CNTL, tmp);
540 /* empty context1-15 */
541 /* FIXME start with 4G, once using 2 level pt switch to full
544 /* set vm size, must be a multiple of 4 */
545 WREG32(mmVM_CONTEXT1_PAGE_TABLE_START_ADDR, 0);
546 WREG32(mmVM_CONTEXT1_PAGE_TABLE_END_ADDR, adev->vm_manager.max_pfn - 1);
547 for (i = 1; i < 16; i++) {
549 WREG32(mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR + i,
550 adev->gart.table_addr >> 12);
552 WREG32(mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR + i - 8,
553 adev->gart.table_addr >> 12);
556 /* enable context1-15 */
557 WREG32(mmVM_CONTEXT1_PROTECTION_FAULT_DEFAULT_ADDR,
558 (u32)(adev->dummy_page.addr >> 12));
559 WREG32(mmVM_CONTEXT1_CNTL2, 4);
560 tmp = RREG32(mmVM_CONTEXT1_CNTL);
561 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, ENABLE_CONTEXT, 1);
562 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, PAGE_TABLE_DEPTH, 1);
563 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, PAGE_TABLE_BLOCK_SIZE,
564 amdgpu_vm_block_size - 9);
565 WREG32(mmVM_CONTEXT1_CNTL, tmp);
566 if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS)
567 gmc_v7_0_set_fault_enable_default(adev, false);
569 gmc_v7_0_set_fault_enable_default(adev, true);
571 if (adev->asic_type == CHIP_KAVERI) {
572 tmp = RREG32(mmCHUB_CONTROL);
574 WREG32(mmCHUB_CONTROL, tmp);
577 gmc_v7_0_gart_flush_gpu_tlb(adev, 0);
578 DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
579 (unsigned)(adev->mc.gtt_size >> 20),
580 (unsigned long long)adev->gart.table_addr);
581 adev->gart.ready = true;
585 static int gmc_v7_0_gart_init(struct amdgpu_device *adev)
589 if (adev->gart.robj) {
590 WARN(1, "R600 PCIE GART already initialized\n");
593 /* Initialize common gart structure */
594 r = amdgpu_gart_init(adev);
597 adev->gart.table_size = adev->gart.num_gpu_pages * 8;
598 return amdgpu_gart_table_vram_alloc(adev);
602 * gmc_v7_0_gart_disable - gart disable
604 * @adev: amdgpu_device pointer
606 * This disables all VM page table (CIK).
608 static void gmc_v7_0_gart_disable(struct amdgpu_device *adev)
612 /* Disable all tables */
613 WREG32(mmVM_CONTEXT0_CNTL, 0);
614 WREG32(mmVM_CONTEXT1_CNTL, 0);
615 /* Setup TLB control */
616 tmp = RREG32(mmMC_VM_MX_L1_TLB_CNTL);
617 tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_TLB, 0);
618 tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_FRAGMENT_PROCESSING, 0);
619 tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_ADVANCED_DRIVER_MODEL, 0);
620 WREG32(mmMC_VM_MX_L1_TLB_CNTL, tmp);
622 tmp = RREG32(mmVM_L2_CNTL);
623 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_CACHE, 0);
624 WREG32(mmVM_L2_CNTL, tmp);
625 WREG32(mmVM_L2_CNTL2, 0);
626 amdgpu_gart_table_vram_unpin(adev);
630 * gmc_v7_0_gart_fini - vm fini callback
632 * @adev: amdgpu_device pointer
634 * Tears down the driver GART/VM setup (CIK).
636 static void gmc_v7_0_gart_fini(struct amdgpu_device *adev)
638 amdgpu_gart_table_vram_free(adev);
639 amdgpu_gart_fini(adev);
644 * VMID 0 is the physical GPU addresses as used by the kernel.
645 * VMIDs 1-15 are used for userspace clients and are handled
646 * by the amdgpu vm/hsa code.
649 * gmc_v7_0_vm_init - cik vm init callback
651 * @adev: amdgpu_device pointer
653 * Inits cik specific vm parameters (number of VMs, base of vram for
655 * Returns 0 for success.
657 static int gmc_v7_0_vm_init(struct amdgpu_device *adev)
661 * VMID 0 is reserved for System
662 * amdgpu graphics/compute will use VMIDs 1-7
663 * amdkfd will use VMIDs 8-15
665 adev->vm_manager.num_ids = AMDGPU_NUM_OF_VMIDS;
666 amdgpu_vm_manager_init(adev);
668 /* base offset of vram pages */
669 if (adev->flags & AMD_IS_APU) {
670 u64 tmp = RREG32(mmMC_VM_FB_OFFSET);
672 adev->vm_manager.vram_base_offset = tmp;
674 adev->vm_manager.vram_base_offset = 0;
680 * gmc_v7_0_vm_fini - cik vm fini callback
682 * @adev: amdgpu_device pointer
684 * Tear down any asic specific VM setup (CIK).
686 static void gmc_v7_0_vm_fini(struct amdgpu_device *adev)
691 * gmc_v7_0_vm_decode_fault - print human readable fault info
693 * @adev: amdgpu_device pointer
694 * @status: VM_CONTEXT1_PROTECTION_FAULT_STATUS register value
695 * @addr: VM_CONTEXT1_PROTECTION_FAULT_ADDR register value
697 * Print human readable fault information (CIK).
699 static void gmc_v7_0_vm_decode_fault(struct amdgpu_device *adev,
700 u32 status, u32 addr, u32 mc_client)
703 u32 vmid = REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS, VMID);
704 u32 protections = REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS,
706 char block[5] = { mc_client >> 24, (mc_client >> 16) & 0xff,
707 (mc_client >> 8) & 0xff, mc_client & 0xff, 0 };
709 mc_id = REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS,
712 printk("VM fault (0x%02x, vmid %d) at page %u, %s from '%s' (0x%08x) (%d)\n",
713 protections, vmid, addr,
714 REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS,
716 "write" : "read", block, mc_client, mc_id);
720 static const u32 mc_cg_registers[] = {
721 mmMC_HUB_MISC_HUB_CG,
722 mmMC_HUB_MISC_SIP_CG,
726 mmMC_CITF_MISC_WR_CG,
727 mmMC_CITF_MISC_RD_CG,
728 mmMC_CITF_MISC_VM_CG,
732 static const u32 mc_cg_ls_en[] = {
733 MC_HUB_MISC_HUB_CG__MEM_LS_ENABLE_MASK,
734 MC_HUB_MISC_SIP_CG__MEM_LS_ENABLE_MASK,
735 MC_HUB_MISC_VM_CG__MEM_LS_ENABLE_MASK,
736 MC_XPB_CLK_GAT__MEM_LS_ENABLE_MASK,
737 ATC_MISC_CG__MEM_LS_ENABLE_MASK,
738 MC_CITF_MISC_WR_CG__MEM_LS_ENABLE_MASK,
739 MC_CITF_MISC_RD_CG__MEM_LS_ENABLE_MASK,
740 MC_CITF_MISC_VM_CG__MEM_LS_ENABLE_MASK,
741 VM_L2_CG__MEM_LS_ENABLE_MASK,
744 static const u32 mc_cg_en[] = {
745 MC_HUB_MISC_HUB_CG__ENABLE_MASK,
746 MC_HUB_MISC_SIP_CG__ENABLE_MASK,
747 MC_HUB_MISC_VM_CG__ENABLE_MASK,
748 MC_XPB_CLK_GAT__ENABLE_MASK,
749 ATC_MISC_CG__ENABLE_MASK,
750 MC_CITF_MISC_WR_CG__ENABLE_MASK,
751 MC_CITF_MISC_RD_CG__ENABLE_MASK,
752 MC_CITF_MISC_VM_CG__ENABLE_MASK,
753 VM_L2_CG__ENABLE_MASK,
756 static void gmc_v7_0_enable_mc_ls(struct amdgpu_device *adev,
762 for (i = 0; i < ARRAY_SIZE(mc_cg_registers); i++) {
763 orig = data = RREG32(mc_cg_registers[i]);
764 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_MC_LS))
765 data |= mc_cg_ls_en[i];
767 data &= ~mc_cg_ls_en[i];
769 WREG32(mc_cg_registers[i], data);
773 static void gmc_v7_0_enable_mc_mgcg(struct amdgpu_device *adev,
779 for (i = 0; i < ARRAY_SIZE(mc_cg_registers); i++) {
780 orig = data = RREG32(mc_cg_registers[i]);
781 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_MC_MGCG))
784 data &= ~mc_cg_en[i];
786 WREG32(mc_cg_registers[i], data);
790 static void gmc_v7_0_enable_bif_mgls(struct amdgpu_device *adev,
795 orig = data = RREG32_PCIE(ixPCIE_CNTL2);
797 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_BIF_LS)) {
798 data = REG_SET_FIELD(data, PCIE_CNTL2, SLV_MEM_LS_EN, 1);
799 data = REG_SET_FIELD(data, PCIE_CNTL2, MST_MEM_LS_EN, 1);
800 data = REG_SET_FIELD(data, PCIE_CNTL2, REPLAY_MEM_LS_EN, 1);
801 data = REG_SET_FIELD(data, PCIE_CNTL2, SLV_MEM_AGGRESSIVE_LS_EN, 1);
803 data = REG_SET_FIELD(data, PCIE_CNTL2, SLV_MEM_LS_EN, 0);
804 data = REG_SET_FIELD(data, PCIE_CNTL2, MST_MEM_LS_EN, 0);
805 data = REG_SET_FIELD(data, PCIE_CNTL2, REPLAY_MEM_LS_EN, 0);
806 data = REG_SET_FIELD(data, PCIE_CNTL2, SLV_MEM_AGGRESSIVE_LS_EN, 0);
810 WREG32_PCIE(ixPCIE_CNTL2, data);
813 static void gmc_v7_0_enable_hdp_mgcg(struct amdgpu_device *adev,
818 orig = data = RREG32(mmHDP_HOST_PATH_CNTL);
820 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_HDP_MGCG))
821 data = REG_SET_FIELD(data, HDP_HOST_PATH_CNTL, CLOCK_GATING_DIS, 0);
823 data = REG_SET_FIELD(data, HDP_HOST_PATH_CNTL, CLOCK_GATING_DIS, 1);
826 WREG32(mmHDP_HOST_PATH_CNTL, data);
829 static void gmc_v7_0_enable_hdp_ls(struct amdgpu_device *adev,
834 orig = data = RREG32(mmHDP_MEM_POWER_LS);
836 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_HDP_LS))
837 data = REG_SET_FIELD(data, HDP_MEM_POWER_LS, LS_ENABLE, 1);
839 data = REG_SET_FIELD(data, HDP_MEM_POWER_LS, LS_ENABLE, 0);
842 WREG32(mmHDP_MEM_POWER_LS, data);
845 static int gmc_v7_0_convert_vram_type(int mc_seq_vram_type)
847 switch (mc_seq_vram_type) {
848 case MC_SEQ_MISC0__MT__GDDR1:
849 return AMDGPU_VRAM_TYPE_GDDR1;
850 case MC_SEQ_MISC0__MT__DDR2:
851 return AMDGPU_VRAM_TYPE_DDR2;
852 case MC_SEQ_MISC0__MT__GDDR3:
853 return AMDGPU_VRAM_TYPE_GDDR3;
854 case MC_SEQ_MISC0__MT__GDDR4:
855 return AMDGPU_VRAM_TYPE_GDDR4;
856 case MC_SEQ_MISC0__MT__GDDR5:
857 return AMDGPU_VRAM_TYPE_GDDR5;
858 case MC_SEQ_MISC0__MT__HBM:
859 return AMDGPU_VRAM_TYPE_HBM;
860 case MC_SEQ_MISC0__MT__DDR3:
861 return AMDGPU_VRAM_TYPE_DDR3;
863 return AMDGPU_VRAM_TYPE_UNKNOWN;
867 static int gmc_v7_0_early_init(void *handle)
869 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
871 gmc_v7_0_set_gart_funcs(adev);
872 gmc_v7_0_set_irq_funcs(adev);
877 static int gmc_v7_0_late_init(void *handle)
879 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
881 if (amdgpu_vm_fault_stop != AMDGPU_VM_FAULT_STOP_ALWAYS)
882 return amdgpu_irq_get(adev, &adev->mc.vm_fault, 0);
887 static int gmc_v7_0_sw_init(void *handle)
891 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
893 if (adev->flags & AMD_IS_APU) {
894 adev->mc.vram_type = AMDGPU_VRAM_TYPE_UNKNOWN;
896 u32 tmp = RREG32(mmMC_SEQ_MISC0);
897 tmp &= MC_SEQ_MISC0__MT__MASK;
898 adev->mc.vram_type = gmc_v7_0_convert_vram_type(tmp);
901 r = amdgpu_irq_add_id(adev, 146, &adev->mc.vm_fault);
905 r = amdgpu_irq_add_id(adev, 147, &adev->mc.vm_fault);
909 /* Adjust VM size here.
910 * Currently set to 4GB ((1 << 20) 4k pages).
911 * Max GPUVM size for cayman and SI is 40 bits.
913 adev->vm_manager.max_pfn = amdgpu_vm_size << 18;
915 /* Set the internal MC address mask
916 * This is the max address of the GPU's
917 * internal address space.
919 adev->mc.mc_mask = 0xffffffffffULL; /* 40 bit MC */
921 /* set DMA mask + need_dma32 flags.
922 * PCIE - can handle 40-bits.
923 * IGP - can handle 40-bits
924 * PCI - dma32 for legacy pci gart, 40 bits on newer asics
926 adev->need_dma32 = false;
927 dma_bits = adev->need_dma32 ? 32 : 40;
928 r = pci_set_dma_mask(adev->pdev, DMA_BIT_MASK(dma_bits));
930 adev->need_dma32 = true;
932 printk(KERN_WARNING "amdgpu: No suitable DMA available.\n");
934 r = pci_set_consistent_dma_mask(adev->pdev, DMA_BIT_MASK(dma_bits));
936 pci_set_consistent_dma_mask(adev->pdev, DMA_BIT_MASK(32));
937 printk(KERN_WARNING "amdgpu: No coherent DMA available.\n");
940 r = gmc_v7_0_init_microcode(adev);
942 DRM_ERROR("Failed to load mc firmware!\n");
946 r = amdgpu_ttm_global_init(adev);
951 r = gmc_v7_0_mc_init(adev);
956 r = amdgpu_bo_init(adev);
960 r = gmc_v7_0_gart_init(adev);
964 if (!adev->vm_manager.enabled) {
965 r = gmc_v7_0_vm_init(adev);
967 dev_err(adev->dev, "vm manager initialization failed (%d).\n", r);
970 adev->vm_manager.enabled = true;
976 static int gmc_v7_0_sw_fini(void *handle)
978 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
980 if (adev->vm_manager.enabled) {
981 amdgpu_vm_manager_fini(adev);
982 gmc_v7_0_vm_fini(adev);
983 adev->vm_manager.enabled = false;
985 gmc_v7_0_gart_fini(adev);
986 amdgpu_gem_force_release(adev);
987 amdgpu_bo_fini(adev);
992 static int gmc_v7_0_hw_init(void *handle)
995 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
997 gmc_v7_0_init_golden_registers(adev);
999 gmc_v7_0_mc_program(adev);
1001 if (!(adev->flags & AMD_IS_APU)) {
1002 r = gmc_v7_0_mc_load_microcode(adev);
1004 DRM_ERROR("Failed to load MC firmware!\n");
1009 r = gmc_v7_0_gart_enable(adev);
1016 static int gmc_v7_0_hw_fini(void *handle)
1018 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1020 amdgpu_irq_put(adev, &adev->mc.vm_fault, 0);
1021 gmc_v7_0_gart_disable(adev);
1026 static int gmc_v7_0_suspend(void *handle)
1028 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1030 if (adev->vm_manager.enabled) {
1031 gmc_v7_0_vm_fini(adev);
1032 adev->vm_manager.enabled = false;
1034 gmc_v7_0_hw_fini(adev);
1039 static int gmc_v7_0_resume(void *handle)
1042 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1044 r = gmc_v7_0_hw_init(adev);
1048 if (!adev->vm_manager.enabled) {
1049 r = gmc_v7_0_vm_init(adev);
1051 dev_err(adev->dev, "vm manager initialization failed (%d).\n", r);
1054 adev->vm_manager.enabled = true;
1060 static bool gmc_v7_0_is_idle(void *handle)
1062 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1063 u32 tmp = RREG32(mmSRBM_STATUS);
1065 if (tmp & (SRBM_STATUS__MCB_BUSY_MASK | SRBM_STATUS__MCB_NON_DISPLAY_BUSY_MASK |
1066 SRBM_STATUS__MCC_BUSY_MASK | SRBM_STATUS__MCD_BUSY_MASK | SRBM_STATUS__VMC_BUSY_MASK))
1072 static int gmc_v7_0_wait_for_idle(void *handle)
1076 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1078 for (i = 0; i < adev->usec_timeout; i++) {
1079 /* read MC_STATUS */
1080 tmp = RREG32(mmSRBM_STATUS) & (SRBM_STATUS__MCB_BUSY_MASK |
1081 SRBM_STATUS__MCB_NON_DISPLAY_BUSY_MASK |
1082 SRBM_STATUS__MCC_BUSY_MASK |
1083 SRBM_STATUS__MCD_BUSY_MASK |
1084 SRBM_STATUS__VMC_BUSY_MASK);
1093 static int gmc_v7_0_soft_reset(void *handle)
1095 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1096 struct amdgpu_mode_mc_save save;
1097 u32 srbm_soft_reset = 0;
1098 u32 tmp = RREG32(mmSRBM_STATUS);
1100 if (tmp & SRBM_STATUS__VMC_BUSY_MASK)
1101 srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset,
1102 SRBM_SOFT_RESET, SOFT_RESET_VMC, 1);
1104 if (tmp & (SRBM_STATUS__MCB_BUSY_MASK | SRBM_STATUS__MCB_NON_DISPLAY_BUSY_MASK |
1105 SRBM_STATUS__MCC_BUSY_MASK | SRBM_STATUS__MCD_BUSY_MASK)) {
1106 if (!(adev->flags & AMD_IS_APU))
1107 srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset,
1108 SRBM_SOFT_RESET, SOFT_RESET_MC, 1);
1111 if (srbm_soft_reset) {
1112 gmc_v7_0_mc_stop(adev, &save);
1113 if (gmc_v7_0_wait_for_idle((void *)adev)) {
1114 dev_warn(adev->dev, "Wait for GMC idle timed out !\n");
1118 tmp = RREG32(mmSRBM_SOFT_RESET);
1119 tmp |= srbm_soft_reset;
1120 dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
1121 WREG32(mmSRBM_SOFT_RESET, tmp);
1122 tmp = RREG32(mmSRBM_SOFT_RESET);
1126 tmp &= ~srbm_soft_reset;
1127 WREG32(mmSRBM_SOFT_RESET, tmp);
1128 tmp = RREG32(mmSRBM_SOFT_RESET);
1130 /* Wait a little for things to settle down */
1133 gmc_v7_0_mc_resume(adev, &save);
1140 static int gmc_v7_0_vm_fault_interrupt_state(struct amdgpu_device *adev,
1141 struct amdgpu_irq_src *src,
1143 enum amdgpu_interrupt_state state)
1146 u32 bits = (VM_CONTEXT1_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
1147 VM_CONTEXT1_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
1148 VM_CONTEXT1_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
1149 VM_CONTEXT1_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
1150 VM_CONTEXT1_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
1151 VM_CONTEXT1_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK);
1154 case AMDGPU_IRQ_STATE_DISABLE:
1155 /* system context */
1156 tmp = RREG32(mmVM_CONTEXT0_CNTL);
1158 WREG32(mmVM_CONTEXT0_CNTL, tmp);
1160 tmp = RREG32(mmVM_CONTEXT1_CNTL);
1162 WREG32(mmVM_CONTEXT1_CNTL, tmp);
1164 case AMDGPU_IRQ_STATE_ENABLE:
1165 /* system context */
1166 tmp = RREG32(mmVM_CONTEXT0_CNTL);
1168 WREG32(mmVM_CONTEXT0_CNTL, tmp);
1170 tmp = RREG32(mmVM_CONTEXT1_CNTL);
1172 WREG32(mmVM_CONTEXT1_CNTL, tmp);
1181 static int gmc_v7_0_process_interrupt(struct amdgpu_device *adev,
1182 struct amdgpu_irq_src *source,
1183 struct amdgpu_iv_entry *entry)
1185 u32 addr, status, mc_client;
1187 addr = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_ADDR);
1188 status = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_STATUS);
1189 mc_client = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_MCCLIENT);
1190 /* reset addr and status */
1191 WREG32_P(mmVM_CONTEXT1_CNTL2, 1, ~1);
1193 if (!addr && !status)
1196 if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_FIRST)
1197 gmc_v7_0_set_fault_enable_default(adev, false);
1199 dev_err(adev->dev, "GPU fault detected: %d 0x%08x\n",
1200 entry->src_id, entry->src_data);
1201 dev_err(adev->dev, " VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x%08X\n",
1203 dev_err(adev->dev, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n",
1205 gmc_v7_0_vm_decode_fault(adev, status, addr, mc_client);
1210 static int gmc_v7_0_set_clockgating_state(void *handle,
1211 enum amd_clockgating_state state)
1214 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1216 if (state == AMD_CG_STATE_GATE)
1219 if (!(adev->flags & AMD_IS_APU)) {
1220 gmc_v7_0_enable_mc_mgcg(adev, gate);
1221 gmc_v7_0_enable_mc_ls(adev, gate);
1223 gmc_v7_0_enable_bif_mgls(adev, gate);
1224 gmc_v7_0_enable_hdp_mgcg(adev, gate);
1225 gmc_v7_0_enable_hdp_ls(adev, gate);
1230 static int gmc_v7_0_set_powergating_state(void *handle,
1231 enum amd_powergating_state state)
1236 const struct amd_ip_funcs gmc_v7_0_ip_funcs = {
1238 .early_init = gmc_v7_0_early_init,
1239 .late_init = gmc_v7_0_late_init,
1240 .sw_init = gmc_v7_0_sw_init,
1241 .sw_fini = gmc_v7_0_sw_fini,
1242 .hw_init = gmc_v7_0_hw_init,
1243 .hw_fini = gmc_v7_0_hw_fini,
1244 .suspend = gmc_v7_0_suspend,
1245 .resume = gmc_v7_0_resume,
1246 .is_idle = gmc_v7_0_is_idle,
1247 .wait_for_idle = gmc_v7_0_wait_for_idle,
1248 .soft_reset = gmc_v7_0_soft_reset,
1249 .set_clockgating_state = gmc_v7_0_set_clockgating_state,
1250 .set_powergating_state = gmc_v7_0_set_powergating_state,
1253 static const struct amdgpu_gart_funcs gmc_v7_0_gart_funcs = {
1254 .flush_gpu_tlb = gmc_v7_0_gart_flush_gpu_tlb,
1255 .set_pte_pde = gmc_v7_0_gart_set_pte_pde,
1258 static const struct amdgpu_irq_src_funcs gmc_v7_0_irq_funcs = {
1259 .set = gmc_v7_0_vm_fault_interrupt_state,
1260 .process = gmc_v7_0_process_interrupt,
1263 static void gmc_v7_0_set_gart_funcs(struct amdgpu_device *adev)
1265 if (adev->gart.gart_funcs == NULL)
1266 adev->gart.gart_funcs = &gmc_v7_0_gart_funcs;
1269 static void gmc_v7_0_set_irq_funcs(struct amdgpu_device *adev)
1271 adev->mc.vm_fault.num_types = 1;
1272 adev->mc.vm_fault.funcs = &gmc_v7_0_irq_funcs;