GNU Linux-libre 4.19.281-gnu1
[releases.git] / drivers / gpu / drm / amd / amdgpu / gmc_v9_0.c
1 /*
2  * Copyright 2016 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23 #include <linux/firmware.h>
24 #include <drm/drm_cache.h>
25 #include "amdgpu.h"
26 #include "gmc_v9_0.h"
27 #include "amdgpu_atomfirmware.h"
28
29 #include "hdp/hdp_4_0_offset.h"
30 #include "hdp/hdp_4_0_sh_mask.h"
31 #include "gc/gc_9_0_sh_mask.h"
32 #include "dce/dce_12_0_offset.h"
33 #include "dce/dce_12_0_sh_mask.h"
34 #include "vega10_enum.h"
35 #include "mmhub/mmhub_1_0_offset.h"
36 #include "athub/athub_1_0_offset.h"
37 #include "oss/osssys_4_0_offset.h"
38
39 #include "soc15.h"
40 #include "soc15_common.h"
41 #include "umc/umc_6_0_sh_mask.h"
42
43 #include "gfxhub_v1_0.h"
44 #include "mmhub_v1_0.h"
45
46 #include "ivsrcid/vmc/irqsrcs_vmc_1_0.h"
47
48 /* add these here since we already include dce12 headers and these are for DCN */
49 #define mmHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION                                                          0x055d
50 #define mmHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION_BASE_IDX                                                 2
51 #define HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION__PRI_VIEWPORT_WIDTH__SHIFT                                        0x0
52 #define HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION__PRI_VIEWPORT_HEIGHT__SHIFT                                       0x10
53 #define HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION__PRI_VIEWPORT_WIDTH_MASK                                          0x00003FFFL
54 #define HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION__PRI_VIEWPORT_HEIGHT_MASK                                         0x3FFF0000L
55
56 /* XXX Move this macro to VEGA10 header file, which is like vid.h for VI.*/
57 #define AMDGPU_NUM_OF_VMIDS                     8
58
59 static const u32 golden_settings_vega10_hdp[] =
60 {
61         0xf64, 0x0fffffff, 0x00000000,
62         0xf65, 0x0fffffff, 0x00000000,
63         0xf66, 0x0fffffff, 0x00000000,
64         0xf67, 0x0fffffff, 0x00000000,
65         0xf68, 0x0fffffff, 0x00000000,
66         0xf6a, 0x0fffffff, 0x00000000,
67         0xf6b, 0x0fffffff, 0x00000000,
68         0xf6c, 0x0fffffff, 0x00000000,
69         0xf6d, 0x0fffffff, 0x00000000,
70         0xf6e, 0x0fffffff, 0x00000000,
71 };
72
73 static const struct soc15_reg_golden golden_settings_mmhub_1_0_0[] =
74 {
75         SOC15_REG_GOLDEN_VALUE(MMHUB, 0, mmDAGB1_WRCLI2, 0x00000007, 0xfe5fe0fa),
76         SOC15_REG_GOLDEN_VALUE(MMHUB, 0, mmMMEA1_DRAM_WR_CLI2GRP_MAP0, 0x00000030, 0x55555565)
77 };
78
79 static const struct soc15_reg_golden golden_settings_athub_1_0_0[] =
80 {
81         SOC15_REG_GOLDEN_VALUE(ATHUB, 0, mmRPB_ARB_CNTL, 0x0000ff00, 0x00000800),
82         SOC15_REG_GOLDEN_VALUE(ATHUB, 0, mmRPB_ARB_CNTL2, 0x00ff00ff, 0x00080008)
83 };
84
85 /* Ecc related register addresses, (BASE + reg offset) */
86 /* Universal Memory Controller caps (may be fused). */
87 /* UMCCH:UmcLocalCap */
88 #define UMCLOCALCAPS_ADDR0      (0x00014306 + 0x00000000)
89 #define UMCLOCALCAPS_ADDR1      (0x00014306 + 0x00000800)
90 #define UMCLOCALCAPS_ADDR2      (0x00014306 + 0x00001000)
91 #define UMCLOCALCAPS_ADDR3      (0x00014306 + 0x00001800)
92 #define UMCLOCALCAPS_ADDR4      (0x00054306 + 0x00000000)
93 #define UMCLOCALCAPS_ADDR5      (0x00054306 + 0x00000800)
94 #define UMCLOCALCAPS_ADDR6      (0x00054306 + 0x00001000)
95 #define UMCLOCALCAPS_ADDR7      (0x00054306 + 0x00001800)
96 #define UMCLOCALCAPS_ADDR8      (0x00094306 + 0x00000000)
97 #define UMCLOCALCAPS_ADDR9      (0x00094306 + 0x00000800)
98 #define UMCLOCALCAPS_ADDR10     (0x00094306 + 0x00001000)
99 #define UMCLOCALCAPS_ADDR11     (0x00094306 + 0x00001800)
100 #define UMCLOCALCAPS_ADDR12     (0x000d4306 + 0x00000000)
101 #define UMCLOCALCAPS_ADDR13     (0x000d4306 + 0x00000800)
102 #define UMCLOCALCAPS_ADDR14     (0x000d4306 + 0x00001000)
103 #define UMCLOCALCAPS_ADDR15     (0x000d4306 + 0x00001800)
104
105 /* Universal Memory Controller Channel config. */
106 /* UMCCH:UMC_CONFIG */
107 #define UMCCH_UMC_CONFIG_ADDR0  (0x00014040 + 0x00000000)
108 #define UMCCH_UMC_CONFIG_ADDR1  (0x00014040 + 0x00000800)
109 #define UMCCH_UMC_CONFIG_ADDR2  (0x00014040 + 0x00001000)
110 #define UMCCH_UMC_CONFIG_ADDR3  (0x00014040 + 0x00001800)
111 #define UMCCH_UMC_CONFIG_ADDR4  (0x00054040 + 0x00000000)
112 #define UMCCH_UMC_CONFIG_ADDR5  (0x00054040 + 0x00000800)
113 #define UMCCH_UMC_CONFIG_ADDR6  (0x00054040 + 0x00001000)
114 #define UMCCH_UMC_CONFIG_ADDR7  (0x00054040 + 0x00001800)
115 #define UMCCH_UMC_CONFIG_ADDR8  (0x00094040 + 0x00000000)
116 #define UMCCH_UMC_CONFIG_ADDR9  (0x00094040 + 0x00000800)
117 #define UMCCH_UMC_CONFIG_ADDR10 (0x00094040 + 0x00001000)
118 #define UMCCH_UMC_CONFIG_ADDR11 (0x00094040 + 0x00001800)
119 #define UMCCH_UMC_CONFIG_ADDR12 (0x000d4040 + 0x00000000)
120 #define UMCCH_UMC_CONFIG_ADDR13 (0x000d4040 + 0x00000800)
121 #define UMCCH_UMC_CONFIG_ADDR14 (0x000d4040 + 0x00001000)
122 #define UMCCH_UMC_CONFIG_ADDR15 (0x000d4040 + 0x00001800)
123
124 /* Universal Memory Controller Channel Ecc config. */
125 /* UMCCH:EccCtrl */
126 #define UMCCH_ECCCTRL_ADDR0     (0x00014053 + 0x00000000)
127 #define UMCCH_ECCCTRL_ADDR1     (0x00014053 + 0x00000800)
128 #define UMCCH_ECCCTRL_ADDR2     (0x00014053 + 0x00001000)
129 #define UMCCH_ECCCTRL_ADDR3     (0x00014053 + 0x00001800)
130 #define UMCCH_ECCCTRL_ADDR4     (0x00054053 + 0x00000000)
131 #define UMCCH_ECCCTRL_ADDR5     (0x00054053 + 0x00000800)
132 #define UMCCH_ECCCTRL_ADDR6     (0x00054053 + 0x00001000)
133 #define UMCCH_ECCCTRL_ADDR7     (0x00054053 + 0x00001800)
134 #define UMCCH_ECCCTRL_ADDR8     (0x00094053 + 0x00000000)
135 #define UMCCH_ECCCTRL_ADDR9     (0x00094053 + 0x00000800)
136 #define UMCCH_ECCCTRL_ADDR10    (0x00094053 + 0x00001000)
137 #define UMCCH_ECCCTRL_ADDR11    (0x00094053 + 0x00001800)
138 #define UMCCH_ECCCTRL_ADDR12    (0x000d4053 + 0x00000000)
139 #define UMCCH_ECCCTRL_ADDR13    (0x000d4053 + 0x00000800)
140 #define UMCCH_ECCCTRL_ADDR14    (0x000d4053 + 0x00001000)
141 #define UMCCH_ECCCTRL_ADDR15    (0x000d4053 + 0x00001800)
142
143 static const uint32_t ecc_umclocalcap_addrs[] = {
144         UMCLOCALCAPS_ADDR0,
145         UMCLOCALCAPS_ADDR1,
146         UMCLOCALCAPS_ADDR2,
147         UMCLOCALCAPS_ADDR3,
148         UMCLOCALCAPS_ADDR4,
149         UMCLOCALCAPS_ADDR5,
150         UMCLOCALCAPS_ADDR6,
151         UMCLOCALCAPS_ADDR7,
152         UMCLOCALCAPS_ADDR8,
153         UMCLOCALCAPS_ADDR9,
154         UMCLOCALCAPS_ADDR10,
155         UMCLOCALCAPS_ADDR11,
156         UMCLOCALCAPS_ADDR12,
157         UMCLOCALCAPS_ADDR13,
158         UMCLOCALCAPS_ADDR14,
159         UMCLOCALCAPS_ADDR15,
160 };
161
162 static const uint32_t ecc_umcch_umc_config_addrs[] = {
163         UMCCH_UMC_CONFIG_ADDR0,
164         UMCCH_UMC_CONFIG_ADDR1,
165         UMCCH_UMC_CONFIG_ADDR2,
166         UMCCH_UMC_CONFIG_ADDR3,
167         UMCCH_UMC_CONFIG_ADDR4,
168         UMCCH_UMC_CONFIG_ADDR5,
169         UMCCH_UMC_CONFIG_ADDR6,
170         UMCCH_UMC_CONFIG_ADDR7,
171         UMCCH_UMC_CONFIG_ADDR8,
172         UMCCH_UMC_CONFIG_ADDR9,
173         UMCCH_UMC_CONFIG_ADDR10,
174         UMCCH_UMC_CONFIG_ADDR11,
175         UMCCH_UMC_CONFIG_ADDR12,
176         UMCCH_UMC_CONFIG_ADDR13,
177         UMCCH_UMC_CONFIG_ADDR14,
178         UMCCH_UMC_CONFIG_ADDR15,
179 };
180
181 static const uint32_t ecc_umcch_eccctrl_addrs[] = {
182         UMCCH_ECCCTRL_ADDR0,
183         UMCCH_ECCCTRL_ADDR1,
184         UMCCH_ECCCTRL_ADDR2,
185         UMCCH_ECCCTRL_ADDR3,
186         UMCCH_ECCCTRL_ADDR4,
187         UMCCH_ECCCTRL_ADDR5,
188         UMCCH_ECCCTRL_ADDR6,
189         UMCCH_ECCCTRL_ADDR7,
190         UMCCH_ECCCTRL_ADDR8,
191         UMCCH_ECCCTRL_ADDR9,
192         UMCCH_ECCCTRL_ADDR10,
193         UMCCH_ECCCTRL_ADDR11,
194         UMCCH_ECCCTRL_ADDR12,
195         UMCCH_ECCCTRL_ADDR13,
196         UMCCH_ECCCTRL_ADDR14,
197         UMCCH_ECCCTRL_ADDR15,
198 };
199
200 static int gmc_v9_0_vm_fault_interrupt_state(struct amdgpu_device *adev,
201                                         struct amdgpu_irq_src *src,
202                                         unsigned type,
203                                         enum amdgpu_interrupt_state state)
204 {
205         struct amdgpu_vmhub *hub;
206         u32 tmp, reg, bits, i, j;
207
208         bits = VM_CONTEXT1_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
209                 VM_CONTEXT1_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
210                 VM_CONTEXT1_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
211                 VM_CONTEXT1_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
212                 VM_CONTEXT1_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
213                 VM_CONTEXT1_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
214                 VM_CONTEXT1_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK;
215
216         switch (state) {
217         case AMDGPU_IRQ_STATE_DISABLE:
218                 for (j = 0; j < AMDGPU_MAX_VMHUBS; j++) {
219                         hub = &adev->vmhub[j];
220                         for (i = 0; i < 16; i++) {
221                                 reg = hub->vm_context0_cntl + i;
222                                 tmp = RREG32(reg);
223                                 tmp &= ~bits;
224                                 WREG32(reg, tmp);
225                         }
226                 }
227                 break;
228         case AMDGPU_IRQ_STATE_ENABLE:
229                 for (j = 0; j < AMDGPU_MAX_VMHUBS; j++) {
230                         hub = &adev->vmhub[j];
231                         for (i = 0; i < 16; i++) {
232                                 reg = hub->vm_context0_cntl + i;
233                                 tmp = RREG32(reg);
234                                 tmp |= bits;
235                                 WREG32(reg, tmp);
236                         }
237                 }
238         default:
239                 break;
240         }
241
242         return 0;
243 }
244
245 static int gmc_v9_0_process_interrupt(struct amdgpu_device *adev,
246                                 struct amdgpu_irq_src *source,
247                                 struct amdgpu_iv_entry *entry)
248 {
249         struct amdgpu_vmhub *hub = &adev->vmhub[entry->vmid_src];
250         uint32_t status = 0;
251         u64 addr;
252
253         addr = (u64)entry->src_data[0] << 12;
254         addr |= ((u64)entry->src_data[1] & 0xf) << 44;
255
256         if (!amdgpu_sriov_vf(adev)) {
257                 status = RREG32(hub->vm_l2_pro_fault_status);
258                 WREG32_P(hub->vm_l2_pro_fault_cntl, 1, ~1);
259         }
260
261         if (printk_ratelimit()) {
262                 struct amdgpu_task_info task_info = { 0 };
263
264                 amdgpu_vm_get_task_info(adev, entry->pasid, &task_info);
265
266                 dev_err(adev->dev,
267                         "[%s] VMC page fault (src_id:%u ring:%u vmid:%u pasid:%u, for process %s pid %d thread %s pid %d\n)\n",
268                         entry->vmid_src ? "mmhub" : "gfxhub",
269                         entry->src_id, entry->ring_id, entry->vmid,
270                         entry->pasid, task_info.process_name, task_info.tgid,
271                         task_info.task_name, task_info.pid);
272                 dev_err(adev->dev, "  at address 0x%016llx from %d\n",
273                         addr, entry->client_id);
274                 if (!amdgpu_sriov_vf(adev))
275                         dev_err(adev->dev,
276                                 "VM_L2_PROTECTION_FAULT_STATUS:0x%08X\n",
277                                 status);
278         }
279
280         return 0;
281 }
282
283 static const struct amdgpu_irq_src_funcs gmc_v9_0_irq_funcs = {
284         .set = gmc_v9_0_vm_fault_interrupt_state,
285         .process = gmc_v9_0_process_interrupt,
286 };
287
288 static void gmc_v9_0_set_irq_funcs(struct amdgpu_device *adev)
289 {
290         adev->gmc.vm_fault.num_types = 1;
291         adev->gmc.vm_fault.funcs = &gmc_v9_0_irq_funcs;
292 }
293
294 static uint32_t gmc_v9_0_get_invalidate_req(unsigned int vmid)
295 {
296         u32 req = 0;
297
298         /* invalidate using legacy mode on vmid*/
299         req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ,
300                             PER_VMID_INVALIDATE_REQ, 1 << vmid);
301         req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, FLUSH_TYPE, 0);
302         req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PTES, 1);
303         req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE0, 1);
304         req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE1, 1);
305         req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE2, 1);
306         req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L1_PTES, 1);
307         req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ,
308                             CLEAR_PROTECTION_FAULT_STATUS_ADDR, 0);
309
310         return req;
311 }
312
313 /*
314  * GART
315  * VMID 0 is the physical GPU addresses as used by the kernel.
316  * VMIDs 1-15 are used for userspace clients and are handled
317  * by the amdgpu vm/hsa code.
318  */
319
320 /**
321  * gmc_v9_0_flush_gpu_tlb - gart tlb flush callback
322  *
323  * @adev: amdgpu_device pointer
324  * @vmid: vm instance to flush
325  *
326  * Flush the TLB for the requested page table.
327  */
328 static void gmc_v9_0_flush_gpu_tlb(struct amdgpu_device *adev,
329                                         uint32_t vmid)
330 {
331         /* Use register 17 for GART */
332         const unsigned eng = 17;
333         unsigned i, j;
334
335         spin_lock(&adev->gmc.invalidate_lock);
336
337         for (i = 0; i < AMDGPU_MAX_VMHUBS; ++i) {
338                 struct amdgpu_vmhub *hub = &adev->vmhub[i];
339                 u32 tmp = gmc_v9_0_get_invalidate_req(vmid);
340
341                 WREG32_NO_KIQ(hub->vm_inv_eng0_req + eng, tmp);
342
343                 /* Busy wait for ACK.*/
344                 for (j = 0; j < 100; j++) {
345                         tmp = RREG32_NO_KIQ(hub->vm_inv_eng0_ack + eng);
346                         tmp &= 1 << vmid;
347                         if (tmp)
348                                 break;
349                         cpu_relax();
350                 }
351                 if (j < 100)
352                         continue;
353
354                 /* Wait for ACK with a delay.*/
355                 for (j = 0; j < adev->usec_timeout; j++) {
356                         tmp = RREG32_NO_KIQ(hub->vm_inv_eng0_ack + eng);
357                         tmp &= 1 << vmid;
358                         if (tmp)
359                                 break;
360                         udelay(1);
361                 }
362                 if (j < adev->usec_timeout)
363                         continue;
364
365                 DRM_ERROR("Timeout waiting for VM flush ACK!\n");
366         }
367
368         spin_unlock(&adev->gmc.invalidate_lock);
369 }
370
371 static uint64_t gmc_v9_0_emit_flush_gpu_tlb(struct amdgpu_ring *ring,
372                                             unsigned vmid, uint64_t pd_addr)
373 {
374         struct amdgpu_device *adev = ring->adev;
375         struct amdgpu_vmhub *hub = &adev->vmhub[ring->funcs->vmhub];
376         uint32_t req = gmc_v9_0_get_invalidate_req(vmid);
377         uint64_t flags = AMDGPU_PTE_VALID;
378         unsigned eng = ring->vm_inv_eng;
379
380         amdgpu_gmc_get_vm_pde(adev, -1, &pd_addr, &flags);
381         pd_addr |= flags;
382
383         amdgpu_ring_emit_wreg(ring, hub->ctx0_ptb_addr_lo32 + (2 * vmid),
384                               lower_32_bits(pd_addr));
385
386         amdgpu_ring_emit_wreg(ring, hub->ctx0_ptb_addr_hi32 + (2 * vmid),
387                               upper_32_bits(pd_addr));
388
389         amdgpu_ring_emit_reg_write_reg_wait(ring, hub->vm_inv_eng0_req + eng,
390                                             hub->vm_inv_eng0_ack + eng,
391                                             req, 1 << vmid);
392
393         return pd_addr;
394 }
395
396 static void gmc_v9_0_emit_pasid_mapping(struct amdgpu_ring *ring, unsigned vmid,
397                                         unsigned pasid)
398 {
399         struct amdgpu_device *adev = ring->adev;
400         uint32_t reg;
401
402         if (ring->funcs->vmhub == AMDGPU_GFXHUB)
403                 reg = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_VMID_0_LUT) + vmid;
404         else
405                 reg = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_VMID_0_LUT_MM) + vmid;
406
407         amdgpu_ring_emit_wreg(ring, reg, pasid);
408 }
409
410 /**
411  * gmc_v9_0_set_pte_pde - update the page tables using MMIO
412  *
413  * @adev: amdgpu_device pointer
414  * @cpu_pt_addr: cpu address of the page table
415  * @gpu_page_idx: entry in the page table to update
416  * @addr: dst addr to write into pte/pde
417  * @flags: access flags
418  *
419  * Update the page tables using the CPU.
420  */
421 static int gmc_v9_0_set_pte_pde(struct amdgpu_device *adev, void *cpu_pt_addr,
422                                 uint32_t gpu_page_idx, uint64_t addr,
423                                 uint64_t flags)
424 {
425         void __iomem *ptr = (void *)cpu_pt_addr;
426         uint64_t value;
427
428         /*
429          * PTE format on VEGA 10:
430          * 63:59 reserved
431          * 58:57 mtype
432          * 56 F
433          * 55 L
434          * 54 P
435          * 53 SW
436          * 52 T
437          * 50:48 reserved
438          * 47:12 4k physical page base address
439          * 11:7 fragment
440          * 6 write
441          * 5 read
442          * 4 exe
443          * 3 Z
444          * 2 snooped
445          * 1 system
446          * 0 valid
447          *
448          * PDE format on VEGA 10:
449          * 63:59 block fragment size
450          * 58:55 reserved
451          * 54 P
452          * 53:48 reserved
453          * 47:6 physical base address of PD or PTE
454          * 5:3 reserved
455          * 2 C
456          * 1 system
457          * 0 valid
458          */
459
460         /*
461          * The following is for PTE only. GART does not have PDEs.
462         */
463         value = addr & 0x0000FFFFFFFFF000ULL;
464         value |= flags;
465         writeq(value, ptr + (gpu_page_idx * 8));
466         return 0;
467 }
468
469 static uint64_t gmc_v9_0_get_vm_pte_flags(struct amdgpu_device *adev,
470                                                 uint32_t flags)
471
472 {
473         uint64_t pte_flag = 0;
474
475         if (flags & AMDGPU_VM_PAGE_EXECUTABLE)
476                 pte_flag |= AMDGPU_PTE_EXECUTABLE;
477         if (flags & AMDGPU_VM_PAGE_READABLE)
478                 pte_flag |= AMDGPU_PTE_READABLE;
479         if (flags & AMDGPU_VM_PAGE_WRITEABLE)
480                 pte_flag |= AMDGPU_PTE_WRITEABLE;
481
482         switch (flags & AMDGPU_VM_MTYPE_MASK) {
483         case AMDGPU_VM_MTYPE_DEFAULT:
484                 pte_flag |= AMDGPU_PTE_MTYPE(MTYPE_NC);
485                 break;
486         case AMDGPU_VM_MTYPE_NC:
487                 pte_flag |= AMDGPU_PTE_MTYPE(MTYPE_NC);
488                 break;
489         case AMDGPU_VM_MTYPE_WC:
490                 pte_flag |= AMDGPU_PTE_MTYPE(MTYPE_WC);
491                 break;
492         case AMDGPU_VM_MTYPE_CC:
493                 pte_flag |= AMDGPU_PTE_MTYPE(MTYPE_CC);
494                 break;
495         case AMDGPU_VM_MTYPE_UC:
496                 pte_flag |= AMDGPU_PTE_MTYPE(MTYPE_UC);
497                 break;
498         default:
499                 pte_flag |= AMDGPU_PTE_MTYPE(MTYPE_NC);
500                 break;
501         }
502
503         if (flags & AMDGPU_VM_PAGE_PRT)
504                 pte_flag |= AMDGPU_PTE_PRT;
505
506         return pte_flag;
507 }
508
509 static void gmc_v9_0_get_vm_pde(struct amdgpu_device *adev, int level,
510                                 uint64_t *addr, uint64_t *flags)
511 {
512         if (!(*flags & AMDGPU_PDE_PTE))
513                 *addr = adev->vm_manager.vram_base_offset + *addr -
514                         adev->gmc.vram_start;
515         BUG_ON(*addr & 0xFFFF00000000003FULL);
516
517         if (!adev->gmc.translate_further)
518                 return;
519
520         if (level == AMDGPU_VM_PDB1) {
521                 /* Set the block fragment size */
522                 if (!(*flags & AMDGPU_PDE_PTE))
523                         *flags |= AMDGPU_PDE_BFS(0x9);
524
525         } else if (level == AMDGPU_VM_PDB0) {
526                 if (*flags & AMDGPU_PDE_PTE)
527                         *flags &= ~AMDGPU_PDE_PTE;
528                 else
529                         *flags |= AMDGPU_PTE_TF;
530         }
531 }
532
533 static const struct amdgpu_gmc_funcs gmc_v9_0_gmc_funcs = {
534         .flush_gpu_tlb = gmc_v9_0_flush_gpu_tlb,
535         .emit_flush_gpu_tlb = gmc_v9_0_emit_flush_gpu_tlb,
536         .emit_pasid_mapping = gmc_v9_0_emit_pasid_mapping,
537         .set_pte_pde = gmc_v9_0_set_pte_pde,
538         .get_vm_pte_flags = gmc_v9_0_get_vm_pte_flags,
539         .get_vm_pde = gmc_v9_0_get_vm_pde
540 };
541
542 static void gmc_v9_0_set_gmc_funcs(struct amdgpu_device *adev)
543 {
544         if (adev->gmc.gmc_funcs == NULL)
545                 adev->gmc.gmc_funcs = &gmc_v9_0_gmc_funcs;
546 }
547
548 static int gmc_v9_0_early_init(void *handle)
549 {
550         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
551
552         gmc_v9_0_set_gmc_funcs(adev);
553         gmc_v9_0_set_irq_funcs(adev);
554
555         adev->gmc.shared_aperture_start = 0x2000000000000000ULL;
556         adev->gmc.shared_aperture_end =
557                 adev->gmc.shared_aperture_start + (4ULL << 30) - 1;
558         adev->gmc.private_aperture_start = 0x1000000000000000ULL;
559         adev->gmc.private_aperture_end =
560                 adev->gmc.private_aperture_start + (4ULL << 30) - 1;
561
562         return 0;
563 }
564
565 static int gmc_v9_0_ecc_available(struct amdgpu_device *adev)
566 {
567         uint32_t reg_val;
568         uint32_t reg_addr;
569         uint32_t field_val;
570         size_t i;
571         uint32_t fv2;
572         size_t lost_sheep;
573
574         DRM_DEBUG("ecc: gmc_v9_0_ecc_available()\n");
575
576         lost_sheep = 0;
577         for (i = 0; i < ARRAY_SIZE(ecc_umclocalcap_addrs); ++i) {
578                 reg_addr = ecc_umclocalcap_addrs[i];
579                 DRM_DEBUG("ecc: "
580                           "UMCCH_UmcLocalCap[%zu]: reg_addr: 0x%08x\n",
581                           i, reg_addr);
582                 reg_val = RREG32(reg_addr);
583                 field_val = REG_GET_FIELD(reg_val, UMCCH0_0_UmcLocalCap,
584                                           EccDis);
585                 DRM_DEBUG("ecc: "
586                           "reg_val: 0x%08x, "
587                           "EccDis: 0x%08x, ",
588                           reg_val, field_val);
589                 if (field_val) {
590                         DRM_ERROR("ecc: UmcLocalCap:EccDis is set.\n");
591                         ++lost_sheep;
592                 }
593         }
594
595         for (i = 0; i < ARRAY_SIZE(ecc_umcch_umc_config_addrs); ++i) {
596                 reg_addr = ecc_umcch_umc_config_addrs[i];
597                 DRM_DEBUG("ecc: "
598                           "UMCCH0_0_UMC_CONFIG[%zu]: reg_addr: 0x%08x",
599                           i, reg_addr);
600                 reg_val = RREG32(reg_addr);
601                 field_val = REG_GET_FIELD(reg_val, UMCCH0_0_UMC_CONFIG,
602                                           DramReady);
603                 DRM_DEBUG("ecc: "
604                           "reg_val: 0x%08x, "
605                           "DramReady: 0x%08x\n",
606                           reg_val, field_val);
607
608                 if (!field_val) {
609                         DRM_ERROR("ecc: UMC_CONFIG:DramReady is not set.\n");
610                         ++lost_sheep;
611                 }
612         }
613
614         for (i = 0; i < ARRAY_SIZE(ecc_umcch_eccctrl_addrs); ++i) {
615                 reg_addr = ecc_umcch_eccctrl_addrs[i];
616                 DRM_DEBUG("ecc: "
617                           "UMCCH_EccCtrl[%zu]: reg_addr: 0x%08x, ",
618                           i, reg_addr);
619                 reg_val = RREG32(reg_addr);
620                 field_val = REG_GET_FIELD(reg_val, UMCCH0_0_EccCtrl,
621                                           WrEccEn);
622                 fv2 = REG_GET_FIELD(reg_val, UMCCH0_0_EccCtrl,
623                                     RdEccEn);
624                 DRM_DEBUG("ecc: "
625                           "reg_val: 0x%08x, "
626                           "WrEccEn: 0x%08x, "
627                           "RdEccEn: 0x%08x\n",
628                           reg_val, field_val, fv2);
629
630                 if (!field_val) {
631                         DRM_DEBUG("ecc: WrEccEn is not set\n");
632                         ++lost_sheep;
633                 }
634                 if (!fv2) {
635                         DRM_DEBUG("ecc: RdEccEn is not set\n");
636                         ++lost_sheep;
637                 }
638         }
639
640         DRM_DEBUG("ecc: lost_sheep: %zu\n", lost_sheep);
641         return lost_sheep == 0;
642 }
643
644 static int gmc_v9_0_late_init(void *handle)
645 {
646         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
647         /*
648          * The latest engine allocation on gfx9 is:
649          * Engine 0, 1: idle
650          * Engine 2, 3: firmware
651          * Engine 4~13: amdgpu ring, subject to change when ring number changes
652          * Engine 14~15: idle
653          * Engine 16: kfd tlb invalidation
654          * Engine 17: Gart flushes
655          */
656         unsigned vm_inv_eng[AMDGPU_MAX_VMHUBS] = { 4, 4 };
657         unsigned i;
658         int r;
659
660         /*
661          * TODO - Uncomment once GART corruption issue is fixed.
662          */
663         /* amdgpu_bo_late_init(adev); */
664
665         for(i = 0; i < adev->num_rings; ++i) {
666                 struct amdgpu_ring *ring = adev->rings[i];
667                 unsigned vmhub = ring->funcs->vmhub;
668
669                 ring->vm_inv_eng = vm_inv_eng[vmhub]++;
670                 dev_info(adev->dev, "ring %u(%s) uses VM inv eng %u on hub %u\n",
671                          ring->idx, ring->name, ring->vm_inv_eng,
672                          ring->funcs->vmhub);
673         }
674
675         /* Engine 16 is used for KFD and 17 for GART flushes */
676         for(i = 0; i < AMDGPU_MAX_VMHUBS; ++i)
677                 BUG_ON(vm_inv_eng[i] > 16);
678
679         if (adev->asic_type == CHIP_VEGA10 && !amdgpu_sriov_vf(adev)) {
680                 r = gmc_v9_0_ecc_available(adev);
681                 if (r == 1) {
682                         DRM_INFO("ECC is active.\n");
683                 } else if (r == 0) {
684                         DRM_INFO("ECC is not present.\n");
685                         adev->df_funcs->enable_ecc_force_par_wr_rmw(adev, false);
686                 } else {
687                         DRM_ERROR("gmc_v9_0_ecc_available() failed. r: %d\n", r);
688                         return r;
689                 }
690         }
691
692         return amdgpu_irq_get(adev, &adev->gmc.vm_fault, 0);
693 }
694
695 static void gmc_v9_0_vram_gtt_location(struct amdgpu_device *adev,
696                                         struct amdgpu_gmc *mc)
697 {
698         u64 base = 0;
699         if (!amdgpu_sriov_vf(adev))
700                 base = mmhub_v1_0_get_fb_location(adev);
701         amdgpu_device_vram_location(adev, &adev->gmc, base);
702         amdgpu_device_gart_location(adev, mc);
703         /* base offset of vram pages */
704         adev->vm_manager.vram_base_offset = gfxhub_v1_0_get_mc_fb_offset(adev);
705 }
706
707 /**
708  * gmc_v9_0_mc_init - initialize the memory controller driver params
709  *
710  * @adev: amdgpu_device pointer
711  *
712  * Look up the amount of vram, vram width, and decide how to place
713  * vram and gart within the GPU's physical address space.
714  * Returns 0 for success.
715  */
716 static int gmc_v9_0_mc_init(struct amdgpu_device *adev)
717 {
718         int chansize, numchan;
719         int r;
720
721         if (amdgpu_emu_mode != 1)
722                 adev->gmc.vram_width = amdgpu_atomfirmware_get_vram_width(adev);
723         if (!adev->gmc.vram_width) {
724                 /* hbm memory channel size */
725                 if (adev->flags & AMD_IS_APU)
726                         chansize = 64;
727                 else
728                         chansize = 128;
729
730                 numchan = adev->df_funcs->get_hbm_channel_number(adev);
731                 adev->gmc.vram_width = numchan * chansize;
732         }
733
734         /* size in MB on si */
735         adev->gmc.mc_vram_size =
736                 adev->nbio_funcs->get_memsize(adev) * 1024ULL * 1024ULL;
737         adev->gmc.real_vram_size = adev->gmc.mc_vram_size;
738
739         if (!(adev->flags & AMD_IS_APU)) {
740                 r = amdgpu_device_resize_fb_bar(adev);
741                 if (r)
742                         return r;
743         }
744         adev->gmc.aper_base = pci_resource_start(adev->pdev, 0);
745         adev->gmc.aper_size = pci_resource_len(adev->pdev, 0);
746
747 #ifdef CONFIG_X86_64
748         if (adev->flags & AMD_IS_APU) {
749                 adev->gmc.aper_base = gfxhub_v1_0_get_mc_fb_offset(adev);
750                 adev->gmc.aper_size = adev->gmc.real_vram_size;
751         }
752 #endif
753         /* In case the PCI BAR is larger than the actual amount of vram */
754         adev->gmc.visible_vram_size = adev->gmc.aper_size;
755         if (adev->gmc.visible_vram_size > adev->gmc.real_vram_size)
756                 adev->gmc.visible_vram_size = adev->gmc.real_vram_size;
757
758         /* set the gart size */
759         if (amdgpu_gart_size == -1) {
760                 switch (adev->asic_type) {
761                 case CHIP_VEGA10:  /* all engines support GPUVM */
762                 case CHIP_VEGA12:  /* all engines support GPUVM */
763                 case CHIP_VEGA20:
764                 default:
765                         adev->gmc.gart_size = 512ULL << 20;
766                         break;
767                 case CHIP_RAVEN:   /* DCE SG support */
768                         adev->gmc.gart_size = 1024ULL << 20;
769                         break;
770                 }
771         } else {
772                 adev->gmc.gart_size = (u64)amdgpu_gart_size << 20;
773         }
774
775         gmc_v9_0_vram_gtt_location(adev, &adev->gmc);
776
777         return 0;
778 }
779
780 static int gmc_v9_0_gart_init(struct amdgpu_device *adev)
781 {
782         int r;
783
784         if (adev->gart.robj) {
785                 WARN(1, "VEGA10 PCIE GART already initialized\n");
786                 return 0;
787         }
788         /* Initialize common gart structure */
789         r = amdgpu_gart_init(adev);
790         if (r)
791                 return r;
792         adev->gart.table_size = adev->gart.num_gpu_pages * 8;
793         adev->gart.gart_pte_flags = AMDGPU_PTE_MTYPE(MTYPE_UC) |
794                                  AMDGPU_PTE_EXECUTABLE;
795         return amdgpu_gart_table_vram_alloc(adev);
796 }
797
798 static unsigned gmc_v9_0_get_vbios_fb_size(struct amdgpu_device *adev)
799 {
800 #if 0
801         u32 d1vga_control = RREG32_SOC15(DCE, 0, mmD1VGA_CONTROL);
802 #endif
803         unsigned size;
804
805         /*
806          * TODO Remove once GART corruption is resolved
807          * Check related code in gmc_v9_0_sw_fini
808          * */
809         size = 9 * 1024 * 1024;
810
811 #if 0
812         if (REG_GET_FIELD(d1vga_control, D1VGA_CONTROL, D1VGA_MODE_ENABLE)) {
813                 size = 9 * 1024 * 1024; /* reserve 8MB for vga emulator and 1 MB for FB */
814         } else {
815                 u32 viewport;
816
817                 switch (adev->asic_type) {
818                 case CHIP_RAVEN:
819                         viewport = RREG32_SOC15(DCE, 0, mmHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION);
820                         size = (REG_GET_FIELD(viewport,
821                                               HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION, PRI_VIEWPORT_HEIGHT) *
822                                 REG_GET_FIELD(viewport,
823                                               HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION, PRI_VIEWPORT_WIDTH) *
824                                 4);
825                         break;
826                 case CHIP_VEGA10:
827                 case CHIP_VEGA12:
828                 default:
829                         viewport = RREG32_SOC15(DCE, 0, mmSCL0_VIEWPORT_SIZE);
830                         size = (REG_GET_FIELD(viewport, SCL0_VIEWPORT_SIZE, VIEWPORT_HEIGHT) *
831                                 REG_GET_FIELD(viewport, SCL0_VIEWPORT_SIZE, VIEWPORT_WIDTH) *
832                                 4);
833                         break;
834                 }
835         }
836         /* return 0 if the pre-OS buffer uses up most of vram */
837         if ((adev->gmc.real_vram_size - size) < (8 * 1024 * 1024))
838                 return 0;
839
840 #endif
841         return size;
842 }
843
844 static int gmc_v9_0_sw_init(void *handle)
845 {
846         int r;
847         int dma_bits;
848         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
849
850         gfxhub_v1_0_init(adev);
851         mmhub_v1_0_init(adev);
852
853         spin_lock_init(&adev->gmc.invalidate_lock);
854
855         adev->gmc.vram_type = amdgpu_atomfirmware_get_vram_type(adev);
856         switch (adev->asic_type) {
857         case CHIP_RAVEN:
858                 if (adev->rev_id == 0x0 || adev->rev_id == 0x1) {
859                         amdgpu_vm_adjust_size(adev, 256 * 1024, 9, 3, 48);
860                 } else {
861                         /* vm_size is 128TB + 512GB for legacy 3-level page support */
862                         amdgpu_vm_adjust_size(adev, 128 * 1024 + 512, 9, 2, 48);
863                         adev->gmc.translate_further =
864                                 adev->vm_manager.num_level > 1;
865                 }
866                 break;
867         case CHIP_VEGA10:
868         case CHIP_VEGA12:
869         case CHIP_VEGA20:
870                 /*
871                  * To fulfill 4-level page support,
872                  * vm size is 256TB (48bit), maximum size of Vega10,
873                  * block size 512 (9bit)
874                  */
875                 amdgpu_vm_adjust_size(adev, 256 * 1024, 9, 3, 48);
876                 break;
877         default:
878                 break;
879         }
880
881         /* This interrupt is VMC page fault.*/
882         r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VMC, VMC_1_0__SRCID__VM_FAULT,
883                                 &adev->gmc.vm_fault);
884         r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_UTCL2, UTCL2_1_0__SRCID__FAULT,
885                                 &adev->gmc.vm_fault);
886
887         if (r)
888                 return r;
889
890         /* Set the internal MC address mask
891          * This is the max address of the GPU's
892          * internal address space.
893          */
894         adev->gmc.mc_mask = 0xffffffffffffULL; /* 48 bit MC */
895
896         /* set DMA mask + need_dma32 flags.
897          * PCIE - can handle 44-bits.
898          * IGP - can handle 44-bits
899          * PCI - dma32 for legacy pci gart, 44 bits on vega10
900          */
901         adev->need_dma32 = false;
902         dma_bits = adev->need_dma32 ? 32 : 44;
903         r = pci_set_dma_mask(adev->pdev, DMA_BIT_MASK(dma_bits));
904         if (r) {
905                 adev->need_dma32 = true;
906                 dma_bits = 32;
907                 printk(KERN_WARNING "amdgpu: No suitable DMA available.\n");
908         }
909         r = pci_set_consistent_dma_mask(adev->pdev, DMA_BIT_MASK(dma_bits));
910         if (r) {
911                 pci_set_consistent_dma_mask(adev->pdev, DMA_BIT_MASK(32));
912                 printk(KERN_WARNING "amdgpu: No coherent DMA available.\n");
913         }
914         adev->need_swiotlb = drm_get_max_iomem() > ((u64)1 << dma_bits);
915
916         r = gmc_v9_0_mc_init(adev);
917         if (r)
918                 return r;
919
920         adev->gmc.stolen_size = gmc_v9_0_get_vbios_fb_size(adev);
921
922         /* Memory manager */
923         r = amdgpu_bo_init(adev);
924         if (r)
925                 return r;
926
927         r = gmc_v9_0_gart_init(adev);
928         if (r)
929                 return r;
930
931         /*
932          * number of VMs
933          * VMID 0 is reserved for System
934          * amdgpu graphics/compute will use VMIDs 1-7
935          * amdkfd will use VMIDs 8-15
936          */
937         adev->vm_manager.id_mgr[AMDGPU_GFXHUB].num_ids = AMDGPU_NUM_OF_VMIDS;
938         adev->vm_manager.id_mgr[AMDGPU_MMHUB].num_ids = AMDGPU_NUM_OF_VMIDS;
939
940         amdgpu_vm_manager_init(adev);
941
942         return 0;
943 }
944
945 static int gmc_v9_0_sw_fini(void *handle)
946 {
947         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
948
949         amdgpu_gem_force_release(adev);
950         amdgpu_vm_manager_fini(adev);
951
952         /*
953         * TODO:
954         * Currently there is a bug where some memory client outside
955         * of the driver writes to first 8M of VRAM on S3 resume,
956         * this overrides GART which by default gets placed in first 8M and
957         * causes VM_FAULTS once GTT is accessed.
958         * Keep the stolen memory reservation until the while this is not solved.
959         * Also check code in gmc_v9_0_get_vbios_fb_size and gmc_v9_0_late_init
960         */
961         amdgpu_bo_free_kernel(&adev->stolen_vga_memory, NULL, NULL);
962
963         amdgpu_gart_table_vram_free(adev);
964         amdgpu_bo_fini(adev);
965         amdgpu_gart_fini(adev);
966
967         return 0;
968 }
969
970 static void gmc_v9_0_init_golden_registers(struct amdgpu_device *adev)
971 {
972
973         switch (adev->asic_type) {
974         case CHIP_VEGA10:
975         case CHIP_VEGA20:
976                 soc15_program_register_sequence(adev,
977                                                 golden_settings_mmhub_1_0_0,
978                                                 ARRAY_SIZE(golden_settings_mmhub_1_0_0));
979                 soc15_program_register_sequence(adev,
980                                                 golden_settings_athub_1_0_0,
981                                                 ARRAY_SIZE(golden_settings_athub_1_0_0));
982                 break;
983         case CHIP_VEGA12:
984                 break;
985         case CHIP_RAVEN:
986                 soc15_program_register_sequence(adev,
987                                                 golden_settings_athub_1_0_0,
988                                                 ARRAY_SIZE(golden_settings_athub_1_0_0));
989                 break;
990         default:
991                 break;
992         }
993 }
994
995 /**
996  * gmc_v9_0_restore_registers - restores regs
997  *
998  * @adev: amdgpu_device pointer
999  *
1000  * This restores register values, saved at suspend.
1001  */
1002 static void gmc_v9_0_restore_registers(struct amdgpu_device *adev)
1003 {
1004         if (adev->asic_type == CHIP_RAVEN)
1005                 WREG32(mmDCHUBBUB_SDPIF_MMIO_CNTRL_0, adev->gmc.sdpif_register);
1006 }
1007
1008 /**
1009  * gmc_v9_0_gart_enable - gart enable
1010  *
1011  * @adev: amdgpu_device pointer
1012  */
1013 static int gmc_v9_0_gart_enable(struct amdgpu_device *adev)
1014 {
1015         int r;
1016         bool value;
1017         u32 tmp;
1018
1019         amdgpu_device_program_register_sequence(adev,
1020                                                 golden_settings_vega10_hdp,
1021                                                 ARRAY_SIZE(golden_settings_vega10_hdp));
1022
1023         if (adev->gart.robj == NULL) {
1024                 dev_err(adev->dev, "No VRAM object for PCIE GART.\n");
1025                 return -EINVAL;
1026         }
1027         r = amdgpu_gart_table_vram_pin(adev);
1028         if (r)
1029                 return r;
1030
1031         switch (adev->asic_type) {
1032         case CHIP_RAVEN:
1033                 mmhub_v1_0_initialize_power_gating(adev);
1034                 mmhub_v1_0_update_power_gating(adev, true);
1035                 break;
1036         default:
1037                 break;
1038         }
1039
1040         r = gfxhub_v1_0_gart_enable(adev);
1041         if (r)
1042                 return r;
1043
1044         r = mmhub_v1_0_gart_enable(adev);
1045         if (r)
1046                 return r;
1047
1048         WREG32_FIELD15(HDP, 0, HDP_MISC_CNTL, FLUSH_INVALIDATE_CACHE, 1);
1049
1050         tmp = RREG32_SOC15(HDP, 0, mmHDP_HOST_PATH_CNTL);
1051         WREG32_SOC15(HDP, 0, mmHDP_HOST_PATH_CNTL, tmp);
1052
1053         WREG32_SOC15(HDP, 0, mmHDP_NONSURFACE_BASE, (adev->gmc.vram_start >> 8));
1054         WREG32_SOC15(HDP, 0, mmHDP_NONSURFACE_BASE_HI, (adev->gmc.vram_start >> 40));
1055
1056         /* After HDP is initialized, flush HDP.*/
1057         adev->nbio_funcs->hdp_flush(adev, NULL);
1058
1059         if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS)
1060                 value = false;
1061         else
1062                 value = true;
1063
1064         gfxhub_v1_0_set_fault_enable_default(adev, value);
1065         mmhub_v1_0_set_fault_enable_default(adev, value);
1066         gmc_v9_0_flush_gpu_tlb(adev, 0);
1067
1068         DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
1069                  (unsigned)(adev->gmc.gart_size >> 20),
1070                  (unsigned long long)adev->gart.table_addr);
1071         adev->gart.ready = true;
1072         return 0;
1073 }
1074
1075 static int gmc_v9_0_hw_init(void *handle)
1076 {
1077         int r;
1078         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1079
1080         /* The sequence of these two function calls matters.*/
1081         gmc_v9_0_init_golden_registers(adev);
1082
1083         if (adev->mode_info.num_crtc) {
1084                 /* Lockout access through VGA aperture*/
1085                 WREG32_FIELD15(DCE, 0, VGA_HDP_CONTROL, VGA_MEMORY_DISABLE, 1);
1086
1087                 /* disable VGA render */
1088                 WREG32_FIELD15(DCE, 0, VGA_RENDER_CONTROL, VGA_VSTATUS_CNTL, 0);
1089         }
1090
1091         r = gmc_v9_0_gart_enable(adev);
1092
1093         return r;
1094 }
1095
1096 /**
1097  * gmc_v9_0_save_registers - saves regs
1098  *
1099  * @adev: amdgpu_device pointer
1100  *
1101  * This saves potential register values that should be
1102  * restored upon resume
1103  */
1104 static void gmc_v9_0_save_registers(struct amdgpu_device *adev)
1105 {
1106         if (adev->asic_type == CHIP_RAVEN)
1107                 adev->gmc.sdpif_register = RREG32(mmDCHUBBUB_SDPIF_MMIO_CNTRL_0);
1108 }
1109
1110 /**
1111  * gmc_v9_0_gart_disable - gart disable
1112  *
1113  * @adev: amdgpu_device pointer
1114  *
1115  * This disables all VM page table.
1116  */
1117 static void gmc_v9_0_gart_disable(struct amdgpu_device *adev)
1118 {
1119         gfxhub_v1_0_gart_disable(adev);
1120         mmhub_v1_0_gart_disable(adev);
1121         amdgpu_gart_table_vram_unpin(adev);
1122 }
1123
1124 static int gmc_v9_0_hw_fini(void *handle)
1125 {
1126         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1127
1128         if (amdgpu_sriov_vf(adev)) {
1129                 /* full access mode, so don't touch any GMC register */
1130                 DRM_DEBUG("For SRIOV client, shouldn't do anything.\n");
1131                 return 0;
1132         }
1133
1134         amdgpu_irq_put(adev, &adev->gmc.vm_fault, 0);
1135         gmc_v9_0_gart_disable(adev);
1136
1137         return 0;
1138 }
1139
1140 static int gmc_v9_0_suspend(void *handle)
1141 {
1142         int r;
1143         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1144
1145         r = gmc_v9_0_hw_fini(adev);
1146         if (r)
1147                 return r;
1148
1149         gmc_v9_0_save_registers(adev);
1150
1151         return 0;
1152 }
1153
1154 static int gmc_v9_0_resume(void *handle)
1155 {
1156         int r;
1157         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1158
1159         gmc_v9_0_restore_registers(adev);
1160         r = gmc_v9_0_hw_init(adev);
1161         if (r)
1162                 return r;
1163
1164         amdgpu_vmid_reset_all(adev);
1165
1166         return 0;
1167 }
1168
1169 static bool gmc_v9_0_is_idle(void *handle)
1170 {
1171         /* MC is always ready in GMC v9.*/
1172         return true;
1173 }
1174
1175 static int gmc_v9_0_wait_for_idle(void *handle)
1176 {
1177         /* There is no need to wait for MC idle in GMC v9.*/
1178         return 0;
1179 }
1180
1181 static int gmc_v9_0_soft_reset(void *handle)
1182 {
1183         /* XXX for emulation.*/
1184         return 0;
1185 }
1186
1187 static int gmc_v9_0_set_clockgating_state(void *handle,
1188                                         enum amd_clockgating_state state)
1189 {
1190         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1191
1192         return mmhub_v1_0_set_clockgating(adev, state);
1193 }
1194
1195 static void gmc_v9_0_get_clockgating_state(void *handle, u32 *flags)
1196 {
1197         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1198
1199         mmhub_v1_0_get_clockgating(adev, flags);
1200 }
1201
1202 static int gmc_v9_0_set_powergating_state(void *handle,
1203                                         enum amd_powergating_state state)
1204 {
1205         return 0;
1206 }
1207
1208 const struct amd_ip_funcs gmc_v9_0_ip_funcs = {
1209         .name = "gmc_v9_0",
1210         .early_init = gmc_v9_0_early_init,
1211         .late_init = gmc_v9_0_late_init,
1212         .sw_init = gmc_v9_0_sw_init,
1213         .sw_fini = gmc_v9_0_sw_fini,
1214         .hw_init = gmc_v9_0_hw_init,
1215         .hw_fini = gmc_v9_0_hw_fini,
1216         .suspend = gmc_v9_0_suspend,
1217         .resume = gmc_v9_0_resume,
1218         .is_idle = gmc_v9_0_is_idle,
1219         .wait_for_idle = gmc_v9_0_wait_for_idle,
1220         .soft_reset = gmc_v9_0_soft_reset,
1221         .set_clockgating_state = gmc_v9_0_set_clockgating_state,
1222         .set_powergating_state = gmc_v9_0_set_powergating_state,
1223         .get_clockgating_state = gmc_v9_0_get_clockgating_state,
1224 };
1225
1226 const struct amdgpu_ip_block_version gmc_v9_0_ip_block =
1227 {
1228         .type = AMD_IP_BLOCK_TYPE_GMC,
1229         .major = 9,
1230         .minor = 0,
1231         .rev = 0,
1232         .funcs = &gmc_v9_0_ip_funcs,
1233 };