2 * Copyright 2014 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
22 * Authors: Alex Deucher
24 #include <linux/firmware.h>
27 #include "amdgpu_ucode.h"
28 #include "amdgpu_trace.h"
32 #include "oss/oss_3_0_d.h"
33 #include "oss/oss_3_0_sh_mask.h"
35 #include "gmc/gmc_8_1_d.h"
36 #include "gmc/gmc_8_1_sh_mask.h"
38 #include "gca/gfx_8_0_d.h"
39 #include "gca/gfx_8_0_enum.h"
40 #include "gca/gfx_8_0_sh_mask.h"
42 #include "bif/bif_5_0_d.h"
43 #include "bif/bif_5_0_sh_mask.h"
45 #include "tonga_sdma_pkt_open.h"
47 static void sdma_v3_0_set_ring_funcs(struct amdgpu_device *adev);
48 static void sdma_v3_0_set_buffer_funcs(struct amdgpu_device *adev);
49 static void sdma_v3_0_set_vm_pte_funcs(struct amdgpu_device *adev);
50 static void sdma_v3_0_set_irq_funcs(struct amdgpu_device *adev);
55 static const u32 sdma_offsets[SDMA_MAX_INSTANCE] =
57 SDMA0_REGISTER_OFFSET,
61 static const u32 golden_settings_tonga_a11[] =
63 mmSDMA0_CHICKEN_BITS, 0xfc910007, 0x00810007,
64 mmSDMA0_CLK_CTRL, 0xff000fff, 0x00000000,
65 mmSDMA0_GFX_IB_CNTL, 0x800f0111, 0x00000100,
66 mmSDMA0_RLC0_IB_CNTL, 0x800f0111, 0x00000100,
67 mmSDMA0_RLC1_IB_CNTL, 0x800f0111, 0x00000100,
68 mmSDMA1_CHICKEN_BITS, 0xfc910007, 0x00810007,
69 mmSDMA1_CLK_CTRL, 0xff000fff, 0x00000000,
70 mmSDMA1_GFX_IB_CNTL, 0x800f0111, 0x00000100,
71 mmSDMA1_RLC0_IB_CNTL, 0x800f0111, 0x00000100,
72 mmSDMA1_RLC1_IB_CNTL, 0x800f0111, 0x00000100,
75 static const u32 tonga_mgcg_cgcg_init[] =
77 mmSDMA0_CLK_CTRL, 0xff000ff0, 0x00000100,
78 mmSDMA1_CLK_CTRL, 0xff000ff0, 0x00000100
81 static const u32 golden_settings_fiji_a10[] =
83 mmSDMA0_CHICKEN_BITS, 0xfc910007, 0x00810007,
84 mmSDMA0_GFX_IB_CNTL, 0x800f0111, 0x00000100,
85 mmSDMA0_RLC0_IB_CNTL, 0x800f0111, 0x00000100,
86 mmSDMA0_RLC1_IB_CNTL, 0x800f0111, 0x00000100,
87 mmSDMA1_CHICKEN_BITS, 0xfc910007, 0x00810007,
88 mmSDMA1_GFX_IB_CNTL, 0x800f0111, 0x00000100,
89 mmSDMA1_RLC0_IB_CNTL, 0x800f0111, 0x00000100,
90 mmSDMA1_RLC1_IB_CNTL, 0x800f0111, 0x00000100,
93 static const u32 fiji_mgcg_cgcg_init[] =
95 mmSDMA0_CLK_CTRL, 0xff000ff0, 0x00000100,
96 mmSDMA1_CLK_CTRL, 0xff000ff0, 0x00000100
99 static const u32 golden_settings_polaris11_a11[] =
101 mmSDMA0_CHICKEN_BITS, 0xfc910007, 0x00810007,
102 mmSDMA0_CLK_CTRL, 0xff000fff, 0x00000000,
103 mmSDMA0_GFX_IB_CNTL, 0x800f0111, 0x00000100,
104 mmSDMA0_RLC0_IB_CNTL, 0x800f0111, 0x00000100,
105 mmSDMA0_RLC1_IB_CNTL, 0x800f0111, 0x00000100,
106 mmSDMA1_CHICKEN_BITS, 0xfc910007, 0x00810007,
107 mmSDMA1_CLK_CTRL, 0xff000fff, 0x00000000,
108 mmSDMA1_GFX_IB_CNTL, 0x800f0111, 0x00000100,
109 mmSDMA1_RLC0_IB_CNTL, 0x800f0111, 0x00000100,
110 mmSDMA1_RLC1_IB_CNTL, 0x800f0111, 0x00000100,
113 static const u32 golden_settings_polaris10_a11[] =
115 mmSDMA0_CHICKEN_BITS, 0xfc910007, 0x00810007,
116 mmSDMA0_CLK_CTRL, 0xff000fff, 0x00000000,
117 mmSDMA0_GFX_IB_CNTL, 0x800f0111, 0x00000100,
118 mmSDMA0_RLC0_IB_CNTL, 0x800f0111, 0x00000100,
119 mmSDMA0_RLC1_IB_CNTL, 0x800f0111, 0x00000100,
120 mmSDMA1_CHICKEN_BITS, 0xfc910007, 0x00810007,
121 mmSDMA1_CLK_CTRL, 0xff000fff, 0x00000000,
122 mmSDMA1_GFX_IB_CNTL, 0x800f0111, 0x00000100,
123 mmSDMA1_RLC0_IB_CNTL, 0x800f0111, 0x00000100,
124 mmSDMA1_RLC1_IB_CNTL, 0x800f0111, 0x00000100,
127 static const u32 cz_golden_settings_a11[] =
129 mmSDMA0_CHICKEN_BITS, 0xfc910007, 0x00810007,
130 mmSDMA0_CLK_CTRL, 0xff000fff, 0x00000000,
131 mmSDMA0_GFX_IB_CNTL, 0x00000100, 0x00000100,
132 mmSDMA0_POWER_CNTL, 0x00000800, 0x0003c800,
133 mmSDMA0_RLC0_IB_CNTL, 0x00000100, 0x00000100,
134 mmSDMA0_RLC1_IB_CNTL, 0x00000100, 0x00000100,
135 mmSDMA1_CHICKEN_BITS, 0xfc910007, 0x00810007,
136 mmSDMA1_CLK_CTRL, 0xff000fff, 0x00000000,
137 mmSDMA1_GFX_IB_CNTL, 0x00000100, 0x00000100,
138 mmSDMA1_POWER_CNTL, 0x00000800, 0x0003c800,
139 mmSDMA1_RLC0_IB_CNTL, 0x00000100, 0x00000100,
140 mmSDMA1_RLC1_IB_CNTL, 0x00000100, 0x00000100,
143 static const u32 cz_mgcg_cgcg_init[] =
145 mmSDMA0_CLK_CTRL, 0xff000ff0, 0x00000100,
146 mmSDMA1_CLK_CTRL, 0xff000ff0, 0x00000100
149 static const u32 stoney_golden_settings_a11[] =
151 mmSDMA0_GFX_IB_CNTL, 0x00000100, 0x00000100,
152 mmSDMA0_POWER_CNTL, 0x00000800, 0x0003c800,
153 mmSDMA0_RLC0_IB_CNTL, 0x00000100, 0x00000100,
154 mmSDMA0_RLC1_IB_CNTL, 0x00000100, 0x00000100,
157 static const u32 stoney_mgcg_cgcg_init[] =
159 mmSDMA0_CLK_CTRL, 0xffffffff, 0x00000100,
164 * Starting with CIK, the GPU has new asynchronous
165 * DMA engines. These engines are used for compute
166 * and gfx. There are two DMA engines (SDMA0, SDMA1)
167 * and each one supports 1 ring buffer used for gfx
168 * and 2 queues used for compute.
170 * The programming model is very similar to the CP
171 * (ring buffer, IBs, etc.), but sDMA has it's own
172 * packet format that is different from the PM4 format
173 * used by the CP. sDMA supports copying data, writing
174 * embedded data, solid fills, and a number of other
175 * things. It also has support for tiling/detiling of
179 static void sdma_v3_0_init_golden_registers(struct amdgpu_device *adev)
181 switch (adev->asic_type) {
183 amdgpu_program_register_sequence(adev,
185 (const u32)ARRAY_SIZE(fiji_mgcg_cgcg_init));
186 amdgpu_program_register_sequence(adev,
187 golden_settings_fiji_a10,
188 (const u32)ARRAY_SIZE(golden_settings_fiji_a10));
191 amdgpu_program_register_sequence(adev,
192 tonga_mgcg_cgcg_init,
193 (const u32)ARRAY_SIZE(tonga_mgcg_cgcg_init));
194 amdgpu_program_register_sequence(adev,
195 golden_settings_tonga_a11,
196 (const u32)ARRAY_SIZE(golden_settings_tonga_a11));
200 amdgpu_program_register_sequence(adev,
201 golden_settings_polaris11_a11,
202 (const u32)ARRAY_SIZE(golden_settings_polaris11_a11));
205 amdgpu_program_register_sequence(adev,
206 golden_settings_polaris10_a11,
207 (const u32)ARRAY_SIZE(golden_settings_polaris10_a11));
210 amdgpu_program_register_sequence(adev,
212 (const u32)ARRAY_SIZE(cz_mgcg_cgcg_init));
213 amdgpu_program_register_sequence(adev,
214 cz_golden_settings_a11,
215 (const u32)ARRAY_SIZE(cz_golden_settings_a11));
218 amdgpu_program_register_sequence(adev,
219 stoney_mgcg_cgcg_init,
220 (const u32)ARRAY_SIZE(stoney_mgcg_cgcg_init));
221 amdgpu_program_register_sequence(adev,
222 stoney_golden_settings_a11,
223 (const u32)ARRAY_SIZE(stoney_golden_settings_a11));
230 static void sdma_v3_0_free_microcode(struct amdgpu_device *adev)
233 for (i = 0; i < adev->sdma.num_instances; i++) {
234 release_firmware(adev->sdma.instance[i].fw);
235 adev->sdma.instance[i].fw = NULL;
240 * sdma_v3_0_init_microcode - load ucode images from disk
242 * @adev: amdgpu_device pointer
244 * Use the firmware interface to load the ucode images into
245 * the driver (not loaded into hw).
246 * Returns 0 on success, error on failure.
248 static int sdma_v3_0_init_microcode(struct amdgpu_device *adev)
250 const char *chip_name;
253 struct amdgpu_firmware_info *info = NULL;
254 const struct common_firmware_header *header = NULL;
255 const struct sdma_firmware_header_v1_0 *hdr;
259 switch (adev->asic_type) {
267 chip_name = "polaris11";
270 chip_name = "polaris10";
273 chip_name = "polaris12";
276 chip_name = "carrizo";
279 chip_name = "stoney";
284 for (i = 0; i < adev->sdma.num_instances; i++) {
286 snprintf(fw_name, sizeof(fw_name), "/*(DEBLOBBED)*/", chip_name);
288 snprintf(fw_name, sizeof(fw_name), "/*(DEBLOBBED)*/", chip_name);
289 err = reject_firmware(&adev->sdma.instance[i].fw, fw_name, adev->dev);
292 err = amdgpu_ucode_validate(adev->sdma.instance[i].fw);
295 hdr = (const struct sdma_firmware_header_v1_0 *)adev->sdma.instance[i].fw->data;
296 adev->sdma.instance[i].fw_version = le32_to_cpu(hdr->header.ucode_version);
297 adev->sdma.instance[i].feature_version = le32_to_cpu(hdr->ucode_feature_version);
298 if (adev->sdma.instance[i].feature_version >= 20)
299 adev->sdma.instance[i].burst_nop = true;
301 if (adev->firmware.load_type == AMDGPU_FW_LOAD_SMU) {
302 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_SDMA0 + i];
303 info->ucode_id = AMDGPU_UCODE_ID_SDMA0 + i;
304 info->fw = adev->sdma.instance[i].fw;
305 header = (const struct common_firmware_header *)info->fw->data;
306 adev->firmware.fw_size +=
307 ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
312 pr_err("sdma_v3_0: Failed to load firmware \"%s\"\n", fw_name);
313 for (i = 0; i < adev->sdma.num_instances; i++) {
314 release_firmware(adev->sdma.instance[i].fw);
315 adev->sdma.instance[i].fw = NULL;
322 * sdma_v3_0_ring_get_rptr - get the current read pointer
324 * @ring: amdgpu ring pointer
326 * Get the current rptr from the hardware (VI+).
328 static uint64_t sdma_v3_0_ring_get_rptr(struct amdgpu_ring *ring)
330 /* XXX check if swapping is necessary on BE */
331 return ring->adev->wb.wb[ring->rptr_offs] >> 2;
335 * sdma_v3_0_ring_get_wptr - get the current write pointer
337 * @ring: amdgpu ring pointer
339 * Get the current wptr from the hardware (VI+).
341 static uint64_t sdma_v3_0_ring_get_wptr(struct amdgpu_ring *ring)
343 struct amdgpu_device *adev = ring->adev;
346 if (ring->use_doorbell) {
347 /* XXX check if swapping is necessary on BE */
348 wptr = ring->adev->wb.wb[ring->wptr_offs] >> 2;
350 int me = (ring == &ring->adev->sdma.instance[0].ring) ? 0 : 1;
352 wptr = RREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[me]) >> 2;
359 * sdma_v3_0_ring_set_wptr - commit the write pointer
361 * @ring: amdgpu ring pointer
363 * Write the wptr back to the hardware (VI+).
365 static void sdma_v3_0_ring_set_wptr(struct amdgpu_ring *ring)
367 struct amdgpu_device *adev = ring->adev;
369 if (ring->use_doorbell) {
370 /* XXX check if swapping is necessary on BE */
371 adev->wb.wb[ring->wptr_offs] = lower_32_bits(ring->wptr) << 2;
372 WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr) << 2);
374 int me = (ring == &ring->adev->sdma.instance[0].ring) ? 0 : 1;
376 WREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[me], lower_32_bits(ring->wptr) << 2);
380 static void sdma_v3_0_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
382 struct amdgpu_sdma_instance *sdma = amdgpu_get_sdma_instance(ring);
385 for (i = 0; i < count; i++)
386 if (sdma && sdma->burst_nop && (i == 0))
387 amdgpu_ring_write(ring, ring->funcs->nop |
388 SDMA_PKT_NOP_HEADER_COUNT(count - 1));
390 amdgpu_ring_write(ring, ring->funcs->nop);
394 * sdma_v3_0_ring_emit_ib - Schedule an IB on the DMA engine
396 * @ring: amdgpu ring pointer
397 * @ib: IB object to schedule
399 * Schedule an IB in the DMA ring (VI).
401 static void sdma_v3_0_ring_emit_ib(struct amdgpu_ring *ring,
402 struct amdgpu_ib *ib,
403 unsigned vm_id, bool ctx_switch)
405 u32 vmid = vm_id & 0xf;
407 /* IB packet must end on a 8 DW boundary */
408 sdma_v3_0_ring_insert_nop(ring, (10 - (lower_32_bits(ring->wptr) & 7)) % 8);
410 amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_INDIRECT) |
411 SDMA_PKT_INDIRECT_HEADER_VMID(vmid));
412 /* base must be 32 byte aligned */
413 amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr) & 0xffffffe0);
414 amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
415 amdgpu_ring_write(ring, ib->length_dw);
416 amdgpu_ring_write(ring, 0);
417 amdgpu_ring_write(ring, 0);
422 * sdma_v3_0_ring_emit_hdp_flush - emit an hdp flush on the DMA ring
424 * @ring: amdgpu ring pointer
426 * Emit an hdp flush packet on the requested DMA ring.
428 static void sdma_v3_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
430 u32 ref_and_mask = 0;
432 if (ring == &ring->adev->sdma.instance[0].ring)
433 ref_and_mask = REG_SET_FIELD(ref_and_mask, GPU_HDP_FLUSH_DONE, SDMA0, 1);
435 ref_and_mask = REG_SET_FIELD(ref_and_mask, GPU_HDP_FLUSH_DONE, SDMA1, 1);
437 amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_POLL_REGMEM) |
438 SDMA_PKT_POLL_REGMEM_HEADER_HDP_FLUSH(1) |
439 SDMA_PKT_POLL_REGMEM_HEADER_FUNC(3)); /* == */
440 amdgpu_ring_write(ring, mmGPU_HDP_FLUSH_DONE << 2);
441 amdgpu_ring_write(ring, mmGPU_HDP_FLUSH_REQ << 2);
442 amdgpu_ring_write(ring, ref_and_mask); /* reference */
443 amdgpu_ring_write(ring, ref_and_mask); /* mask */
444 amdgpu_ring_write(ring, SDMA_PKT_POLL_REGMEM_DW5_RETRY_COUNT(0xfff) |
445 SDMA_PKT_POLL_REGMEM_DW5_INTERVAL(10)); /* retry count, poll interval */
448 static void sdma_v3_0_ring_emit_hdp_invalidate(struct amdgpu_ring *ring)
450 amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_SRBM_WRITE) |
451 SDMA_PKT_SRBM_WRITE_HEADER_BYTE_EN(0xf));
452 amdgpu_ring_write(ring, mmHDP_DEBUG0);
453 amdgpu_ring_write(ring, 1);
457 * sdma_v3_0_ring_emit_fence - emit a fence on the DMA ring
459 * @ring: amdgpu ring pointer
460 * @fence: amdgpu fence object
462 * Add a DMA fence packet to the ring to write
463 * the fence seq number and DMA trap packet to generate
464 * an interrupt if needed (VI).
466 static void sdma_v3_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq,
469 bool write64bit = flags & AMDGPU_FENCE_FLAG_64BIT;
470 /* write the fence */
471 amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_FENCE));
472 amdgpu_ring_write(ring, lower_32_bits(addr));
473 amdgpu_ring_write(ring, upper_32_bits(addr));
474 amdgpu_ring_write(ring, lower_32_bits(seq));
476 /* optionally write high bits as well */
479 amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_FENCE));
480 amdgpu_ring_write(ring, lower_32_bits(addr));
481 amdgpu_ring_write(ring, upper_32_bits(addr));
482 amdgpu_ring_write(ring, upper_32_bits(seq));
485 /* generate an interrupt */
486 amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_TRAP));
487 amdgpu_ring_write(ring, SDMA_PKT_TRAP_INT_CONTEXT_INT_CONTEXT(0));
491 * sdma_v3_0_gfx_stop - stop the gfx async dma engines
493 * @adev: amdgpu_device pointer
495 * Stop the gfx async dma ring buffers (VI).
497 static void sdma_v3_0_gfx_stop(struct amdgpu_device *adev)
499 struct amdgpu_ring *sdma0 = &adev->sdma.instance[0].ring;
500 struct amdgpu_ring *sdma1 = &adev->sdma.instance[1].ring;
501 u32 rb_cntl, ib_cntl;
504 if ((adev->mman.buffer_funcs_ring == sdma0) ||
505 (adev->mman.buffer_funcs_ring == sdma1))
506 amdgpu_ttm_set_active_vram_size(adev, adev->mc.visible_vram_size);
508 for (i = 0; i < adev->sdma.num_instances; i++) {
509 rb_cntl = RREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i]);
510 rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_ENABLE, 0);
511 WREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i], rb_cntl);
512 ib_cntl = RREG32(mmSDMA0_GFX_IB_CNTL + sdma_offsets[i]);
513 ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_GFX_IB_CNTL, IB_ENABLE, 0);
514 WREG32(mmSDMA0_GFX_IB_CNTL + sdma_offsets[i], ib_cntl);
516 sdma0->ready = false;
517 sdma1->ready = false;
521 * sdma_v3_0_rlc_stop - stop the compute async dma engines
523 * @adev: amdgpu_device pointer
525 * Stop the compute async dma queues (VI).
527 static void sdma_v3_0_rlc_stop(struct amdgpu_device *adev)
533 * sdma_v3_0_ctx_switch_enable - stop the async dma engines context switch
535 * @adev: amdgpu_device pointer
536 * @enable: enable/disable the DMA MEs context switch.
538 * Halt or unhalt the async dma engines context switch (VI).
540 static void sdma_v3_0_ctx_switch_enable(struct amdgpu_device *adev, bool enable)
542 u32 f32_cntl, phase_quantum = 0;
545 if (amdgpu_sdma_phase_quantum) {
546 unsigned value = amdgpu_sdma_phase_quantum;
549 while (value > (SDMA0_PHASE0_QUANTUM__VALUE_MASK >>
550 SDMA0_PHASE0_QUANTUM__VALUE__SHIFT)) {
551 value = (value + 1) >> 1;
554 if (unit > (SDMA0_PHASE0_QUANTUM__UNIT_MASK >>
555 SDMA0_PHASE0_QUANTUM__UNIT__SHIFT)) {
556 value = (SDMA0_PHASE0_QUANTUM__VALUE_MASK >>
557 SDMA0_PHASE0_QUANTUM__VALUE__SHIFT);
558 unit = (SDMA0_PHASE0_QUANTUM__UNIT_MASK >>
559 SDMA0_PHASE0_QUANTUM__UNIT__SHIFT);
561 "clamping sdma_phase_quantum to %uK clock cycles\n",
565 value << SDMA0_PHASE0_QUANTUM__VALUE__SHIFT |
566 unit << SDMA0_PHASE0_QUANTUM__UNIT__SHIFT;
569 for (i = 0; i < adev->sdma.num_instances; i++) {
570 f32_cntl = RREG32(mmSDMA0_CNTL + sdma_offsets[i]);
572 f32_cntl = REG_SET_FIELD(f32_cntl, SDMA0_CNTL,
573 AUTO_CTXSW_ENABLE, 1);
574 f32_cntl = REG_SET_FIELD(f32_cntl, SDMA0_CNTL,
576 if (amdgpu_sdma_phase_quantum) {
577 WREG32(mmSDMA0_PHASE0_QUANTUM + sdma_offsets[i],
579 WREG32(mmSDMA0_PHASE1_QUANTUM + sdma_offsets[i],
583 f32_cntl = REG_SET_FIELD(f32_cntl, SDMA0_CNTL,
584 AUTO_CTXSW_ENABLE, 0);
585 f32_cntl = REG_SET_FIELD(f32_cntl, SDMA0_CNTL,
589 WREG32(mmSDMA0_CNTL + sdma_offsets[i], f32_cntl);
594 * sdma_v3_0_enable - stop the async dma engines
596 * @adev: amdgpu_device pointer
597 * @enable: enable/disable the DMA MEs.
599 * Halt or unhalt the async dma engines (VI).
601 static void sdma_v3_0_enable(struct amdgpu_device *adev, bool enable)
607 sdma_v3_0_gfx_stop(adev);
608 sdma_v3_0_rlc_stop(adev);
611 for (i = 0; i < adev->sdma.num_instances; i++) {
612 f32_cntl = RREG32(mmSDMA0_F32_CNTL + sdma_offsets[i]);
614 f32_cntl = REG_SET_FIELD(f32_cntl, SDMA0_F32_CNTL, HALT, 0);
616 f32_cntl = REG_SET_FIELD(f32_cntl, SDMA0_F32_CNTL, HALT, 1);
617 WREG32(mmSDMA0_F32_CNTL + sdma_offsets[i], f32_cntl);
622 * sdma_v3_0_gfx_resume - setup and start the async dma engines
624 * @adev: amdgpu_device pointer
626 * Set up the gfx DMA ring buffers and enable them (VI).
627 * Returns 0 for success, error for failure.
629 static int sdma_v3_0_gfx_resume(struct amdgpu_device *adev)
631 struct amdgpu_ring *ring;
632 u32 rb_cntl, ib_cntl;
638 for (i = 0; i < adev->sdma.num_instances; i++) {
639 ring = &adev->sdma.instance[i].ring;
640 amdgpu_ring_clear_ring(ring);
641 wb_offset = (ring->rptr_offs * 4);
643 mutex_lock(&adev->srbm_mutex);
644 for (j = 0; j < 16; j++) {
645 vi_srbm_select(adev, 0, 0, 0, j);
647 WREG32(mmSDMA0_GFX_VIRTUAL_ADDR + sdma_offsets[i], 0);
648 WREG32(mmSDMA0_GFX_APE1_CNTL + sdma_offsets[i], 0);
650 vi_srbm_select(adev, 0, 0, 0, 0);
651 mutex_unlock(&adev->srbm_mutex);
653 WREG32(mmSDMA0_TILING_CONFIG + sdma_offsets[i],
654 adev->gfx.config.gb_addr_config & 0x70);
656 WREG32(mmSDMA0_SEM_WAIT_FAIL_TIMER_CNTL + sdma_offsets[i], 0);
658 /* Set ring buffer size in dwords */
659 rb_bufsz = order_base_2(ring->ring_size / 4);
660 rb_cntl = RREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i]);
661 rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_SIZE, rb_bufsz);
663 rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_SWAP_ENABLE, 1);
664 rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL,
665 RPTR_WRITEBACK_SWAP_ENABLE, 1);
667 WREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i], rb_cntl);
669 /* Initialize the ring buffer's read and write pointers */
671 WREG32(mmSDMA0_GFX_RB_RPTR + sdma_offsets[i], 0);
672 sdma_v3_0_ring_set_wptr(ring);
673 WREG32(mmSDMA0_GFX_IB_RPTR + sdma_offsets[i], 0);
674 WREG32(mmSDMA0_GFX_IB_OFFSET + sdma_offsets[i], 0);
676 /* set the wb address whether it's enabled or not */
677 WREG32(mmSDMA0_GFX_RB_RPTR_ADDR_HI + sdma_offsets[i],
678 upper_32_bits(adev->wb.gpu_addr + wb_offset) & 0xFFFFFFFF);
679 WREG32(mmSDMA0_GFX_RB_RPTR_ADDR_LO + sdma_offsets[i],
680 lower_32_bits(adev->wb.gpu_addr + wb_offset) & 0xFFFFFFFC);
682 rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RPTR_WRITEBACK_ENABLE, 1);
684 WREG32(mmSDMA0_GFX_RB_BASE + sdma_offsets[i], ring->gpu_addr >> 8);
685 WREG32(mmSDMA0_GFX_RB_BASE_HI + sdma_offsets[i], ring->gpu_addr >> 40);
687 doorbell = RREG32(mmSDMA0_GFX_DOORBELL + sdma_offsets[i]);
689 if (ring->use_doorbell) {
690 doorbell = REG_SET_FIELD(doorbell, SDMA0_GFX_DOORBELL,
691 OFFSET, ring->doorbell_index);
692 doorbell = REG_SET_FIELD(doorbell, SDMA0_GFX_DOORBELL, ENABLE, 1);
694 doorbell = REG_SET_FIELD(doorbell, SDMA0_GFX_DOORBELL, ENABLE, 0);
696 WREG32(mmSDMA0_GFX_DOORBELL + sdma_offsets[i], doorbell);
699 rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_ENABLE, 1);
700 WREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i], rb_cntl);
702 ib_cntl = RREG32(mmSDMA0_GFX_IB_CNTL + sdma_offsets[i]);
703 ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_GFX_IB_CNTL, IB_ENABLE, 1);
705 ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_GFX_IB_CNTL, IB_SWAP_ENABLE, 1);
708 WREG32(mmSDMA0_GFX_IB_CNTL + sdma_offsets[i], ib_cntl);
714 sdma_v3_0_enable(adev, true);
715 /* enable sdma ring preemption */
716 sdma_v3_0_ctx_switch_enable(adev, true);
718 for (i = 0; i < adev->sdma.num_instances; i++) {
719 ring = &adev->sdma.instance[i].ring;
720 r = amdgpu_ring_test_ring(ring);
726 if (adev->mman.buffer_funcs_ring == ring)
727 amdgpu_ttm_set_active_vram_size(adev, adev->mc.real_vram_size);
734 * sdma_v3_0_rlc_resume - setup and start the async dma engines
736 * @adev: amdgpu_device pointer
738 * Set up the compute DMA queues and enable them (VI).
739 * Returns 0 for success, error for failure.
741 static int sdma_v3_0_rlc_resume(struct amdgpu_device *adev)
748 * sdma_v3_0_load_microcode - load the sDMA ME ucode
750 * @adev: amdgpu_device pointer
752 * Loads the sDMA0/1 ucode.
753 * Returns 0 for success, -EINVAL if the ucode is not available.
755 static int sdma_v3_0_load_microcode(struct amdgpu_device *adev)
757 const struct sdma_firmware_header_v1_0 *hdr;
758 const __le32 *fw_data;
763 sdma_v3_0_enable(adev, false);
765 for (i = 0; i < adev->sdma.num_instances; i++) {
766 if (!adev->sdma.instance[i].fw)
768 hdr = (const struct sdma_firmware_header_v1_0 *)adev->sdma.instance[i].fw->data;
769 amdgpu_ucode_print_sdma_hdr(&hdr->header);
770 fw_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4;
771 fw_data = (const __le32 *)
772 (adev->sdma.instance[i].fw->data +
773 le32_to_cpu(hdr->header.ucode_array_offset_bytes));
774 WREG32(mmSDMA0_UCODE_ADDR + sdma_offsets[i], 0);
775 for (j = 0; j < fw_size; j++)
776 WREG32(mmSDMA0_UCODE_DATA + sdma_offsets[i], le32_to_cpup(fw_data++));
777 WREG32(mmSDMA0_UCODE_ADDR + sdma_offsets[i], adev->sdma.instance[i].fw_version);
784 * sdma_v3_0_start - setup and start the async dma engines
786 * @adev: amdgpu_device pointer
788 * Set up the DMA engines and enable them (VI).
789 * Returns 0 for success, error for failure.
791 static int sdma_v3_0_start(struct amdgpu_device *adev)
795 if (!adev->pp_enabled) {
796 if (adev->firmware.load_type != AMDGPU_FW_LOAD_SMU) {
797 r = sdma_v3_0_load_microcode(adev);
801 for (i = 0; i < adev->sdma.num_instances; i++) {
802 r = adev->smu.smumgr_funcs->check_fw_load_finish(adev,
804 AMDGPU_UCODE_ID_SDMA0 :
805 AMDGPU_UCODE_ID_SDMA1);
812 /* disable sdma engine before programing it */
813 sdma_v3_0_ctx_switch_enable(adev, false);
814 sdma_v3_0_enable(adev, false);
816 /* start the gfx rings and rlc compute queues */
817 r = sdma_v3_0_gfx_resume(adev);
820 r = sdma_v3_0_rlc_resume(adev);
828 * sdma_v3_0_ring_test_ring - simple async dma engine test
830 * @ring: amdgpu_ring structure holding ring information
832 * Test the DMA engine by writing using it to write an
833 * value to memory. (VI).
834 * Returns 0 for success, error for failure.
836 static int sdma_v3_0_ring_test_ring(struct amdgpu_ring *ring)
838 struct amdgpu_device *adev = ring->adev;
845 r = amdgpu_wb_get(adev, &index);
847 dev_err(adev->dev, "(%d) failed to allocate wb slot\n", r);
851 gpu_addr = adev->wb.gpu_addr + (index * 4);
853 adev->wb.wb[index] = cpu_to_le32(tmp);
855 r = amdgpu_ring_alloc(ring, 5);
857 DRM_ERROR("amdgpu: dma failed to lock ring %d (%d).\n", ring->idx, r);
858 amdgpu_wb_free(adev, index);
862 amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_WRITE) |
863 SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_WRITE_LINEAR));
864 amdgpu_ring_write(ring, lower_32_bits(gpu_addr));
865 amdgpu_ring_write(ring, upper_32_bits(gpu_addr));
866 amdgpu_ring_write(ring, SDMA_PKT_WRITE_UNTILED_DW_3_COUNT(1));
867 amdgpu_ring_write(ring, 0xDEADBEEF);
868 amdgpu_ring_commit(ring);
870 for (i = 0; i < adev->usec_timeout; i++) {
871 tmp = le32_to_cpu(adev->wb.wb[index]);
872 if (tmp == 0xDEADBEEF)
877 if (i < adev->usec_timeout) {
878 DRM_INFO("ring test on %d succeeded in %d usecs\n", ring->idx, i);
880 DRM_ERROR("amdgpu: ring %d test failed (0x%08X)\n",
884 amdgpu_wb_free(adev, index);
890 * sdma_v3_0_ring_test_ib - test an IB on the DMA engine
892 * @ring: amdgpu_ring structure holding ring information
894 * Test a simple IB in the DMA ring (VI).
895 * Returns 0 on success, error on failure.
897 static int sdma_v3_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
899 struct amdgpu_device *adev = ring->adev;
901 struct dma_fence *f = NULL;
907 r = amdgpu_wb_get(adev, &index);
909 dev_err(adev->dev, "(%ld) failed to allocate wb slot\n", r);
913 gpu_addr = adev->wb.gpu_addr + (index * 4);
915 adev->wb.wb[index] = cpu_to_le32(tmp);
916 memset(&ib, 0, sizeof(ib));
917 r = amdgpu_ib_get(adev, NULL, 256, &ib);
919 DRM_ERROR("amdgpu: failed to get ib (%ld).\n", r);
923 ib.ptr[0] = SDMA_PKT_HEADER_OP(SDMA_OP_WRITE) |
924 SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_WRITE_LINEAR);
925 ib.ptr[1] = lower_32_bits(gpu_addr);
926 ib.ptr[2] = upper_32_bits(gpu_addr);
927 ib.ptr[3] = SDMA_PKT_WRITE_UNTILED_DW_3_COUNT(1);
928 ib.ptr[4] = 0xDEADBEEF;
929 ib.ptr[5] = SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP);
930 ib.ptr[6] = SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP);
931 ib.ptr[7] = SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP);
934 r = amdgpu_ib_schedule(ring, 1, &ib, NULL, &f);
938 r = dma_fence_wait_timeout(f, false, timeout);
940 DRM_ERROR("amdgpu: IB test timed out\n");
944 DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r);
947 tmp = le32_to_cpu(adev->wb.wb[index]);
948 if (tmp == 0xDEADBEEF) {
949 DRM_INFO("ib test on ring %d succeeded\n", ring->idx);
952 DRM_ERROR("amdgpu: ib test failed (0x%08X)\n", tmp);
956 amdgpu_ib_free(adev, &ib, NULL);
959 amdgpu_wb_free(adev, index);
964 * sdma_v3_0_vm_copy_pte - update PTEs by copying them from the GART
966 * @ib: indirect buffer to fill with commands
967 * @pe: addr of the page entry
968 * @src: src addr to copy from
969 * @count: number of page entries to update
971 * Update PTEs by copying them from the GART using sDMA (CIK).
973 static void sdma_v3_0_vm_copy_pte(struct amdgpu_ib *ib,
974 uint64_t pe, uint64_t src,
977 unsigned bytes = count * 8;
979 ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_COPY) |
980 SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_COPY_LINEAR);
981 ib->ptr[ib->length_dw++] = bytes;
982 ib->ptr[ib->length_dw++] = 0; /* src/dst endian swap */
983 ib->ptr[ib->length_dw++] = lower_32_bits(src);
984 ib->ptr[ib->length_dw++] = upper_32_bits(src);
985 ib->ptr[ib->length_dw++] = lower_32_bits(pe);
986 ib->ptr[ib->length_dw++] = upper_32_bits(pe);
990 * sdma_v3_0_vm_write_pte - update PTEs by writing them manually
992 * @ib: indirect buffer to fill with commands
993 * @pe: addr of the page entry
994 * @value: dst addr to write into pe
995 * @count: number of page entries to update
996 * @incr: increase next addr by incr bytes
998 * Update PTEs by writing them manually using sDMA (CIK).
1000 static void sdma_v3_0_vm_write_pte(struct amdgpu_ib *ib, uint64_t pe,
1001 uint64_t value, unsigned count,
1004 unsigned ndw = count * 2;
1006 ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_WRITE) |
1007 SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_WRITE_LINEAR);
1008 ib->ptr[ib->length_dw++] = lower_32_bits(pe);
1009 ib->ptr[ib->length_dw++] = upper_32_bits(pe);
1010 ib->ptr[ib->length_dw++] = ndw;
1011 for (; ndw > 0; ndw -= 2) {
1012 ib->ptr[ib->length_dw++] = lower_32_bits(value);
1013 ib->ptr[ib->length_dw++] = upper_32_bits(value);
1019 * sdma_v3_0_vm_set_pte_pde - update the page tables using sDMA
1021 * @ib: indirect buffer to fill with commands
1022 * @pe: addr of the page entry
1023 * @addr: dst addr to write into pe
1024 * @count: number of page entries to update
1025 * @incr: increase next addr by incr bytes
1026 * @flags: access flags
1028 * Update the page tables using sDMA (CIK).
1030 static void sdma_v3_0_vm_set_pte_pde(struct amdgpu_ib *ib, uint64_t pe,
1031 uint64_t addr, unsigned count,
1032 uint32_t incr, uint64_t flags)
1034 /* for physically contiguous pages (vram) */
1035 ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_GEN_PTEPDE);
1036 ib->ptr[ib->length_dw++] = lower_32_bits(pe); /* dst addr */
1037 ib->ptr[ib->length_dw++] = upper_32_bits(pe);
1038 ib->ptr[ib->length_dw++] = lower_32_bits(flags); /* mask */
1039 ib->ptr[ib->length_dw++] = upper_32_bits(flags);
1040 ib->ptr[ib->length_dw++] = lower_32_bits(addr); /* value */
1041 ib->ptr[ib->length_dw++] = upper_32_bits(addr);
1042 ib->ptr[ib->length_dw++] = incr; /* increment size */
1043 ib->ptr[ib->length_dw++] = 0;
1044 ib->ptr[ib->length_dw++] = count; /* number of entries */
1048 * sdma_v3_0_ring_pad_ib - pad the IB to the required number of dw
1050 * @ib: indirect buffer to fill with padding
1053 static void sdma_v3_0_ring_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib)
1055 struct amdgpu_sdma_instance *sdma = amdgpu_get_sdma_instance(ring);
1059 pad_count = (8 - (ib->length_dw & 0x7)) % 8;
1060 for (i = 0; i < pad_count; i++)
1061 if (sdma && sdma->burst_nop && (i == 0))
1062 ib->ptr[ib->length_dw++] =
1063 SDMA_PKT_HEADER_OP(SDMA_OP_NOP) |
1064 SDMA_PKT_NOP_HEADER_COUNT(pad_count - 1);
1066 ib->ptr[ib->length_dw++] =
1067 SDMA_PKT_HEADER_OP(SDMA_OP_NOP);
1071 * sdma_v3_0_ring_emit_pipeline_sync - sync the pipeline
1073 * @ring: amdgpu_ring pointer
1075 * Make sure all previous operations are completed (CIK).
1077 static void sdma_v3_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
1079 uint32_t seq = ring->fence_drv.sync_seq;
1080 uint64_t addr = ring->fence_drv.gpu_addr;
1083 amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_POLL_REGMEM) |
1084 SDMA_PKT_POLL_REGMEM_HEADER_HDP_FLUSH(0) |
1085 SDMA_PKT_POLL_REGMEM_HEADER_FUNC(3) | /* equal */
1086 SDMA_PKT_POLL_REGMEM_HEADER_MEM_POLL(1));
1087 amdgpu_ring_write(ring, addr & 0xfffffffc);
1088 amdgpu_ring_write(ring, upper_32_bits(addr) & 0xffffffff);
1089 amdgpu_ring_write(ring, seq); /* reference */
1090 amdgpu_ring_write(ring, 0xffffffff); /* mask */
1091 amdgpu_ring_write(ring, SDMA_PKT_POLL_REGMEM_DW5_RETRY_COUNT(0xfff) |
1092 SDMA_PKT_POLL_REGMEM_DW5_INTERVAL(4)); /* retry count, poll interval */
1096 * sdma_v3_0_ring_emit_vm_flush - cik vm flush using sDMA
1098 * @ring: amdgpu_ring pointer
1099 * @vm: amdgpu_vm pointer
1101 * Update the page table base and flush the VM TLB
1104 static void sdma_v3_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
1105 unsigned vm_id, uint64_t pd_addr)
1107 amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_SRBM_WRITE) |
1108 SDMA_PKT_SRBM_WRITE_HEADER_BYTE_EN(0xf));
1110 amdgpu_ring_write(ring, (mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR + vm_id));
1112 amdgpu_ring_write(ring, (mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR + vm_id - 8));
1114 amdgpu_ring_write(ring, pd_addr >> 12);
1117 amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_SRBM_WRITE) |
1118 SDMA_PKT_SRBM_WRITE_HEADER_BYTE_EN(0xf));
1119 amdgpu_ring_write(ring, mmVM_INVALIDATE_REQUEST);
1120 amdgpu_ring_write(ring, 1 << vm_id);
1122 /* wait for flush */
1123 amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_POLL_REGMEM) |
1124 SDMA_PKT_POLL_REGMEM_HEADER_HDP_FLUSH(0) |
1125 SDMA_PKT_POLL_REGMEM_HEADER_FUNC(0)); /* always */
1126 amdgpu_ring_write(ring, mmVM_INVALIDATE_REQUEST << 2);
1127 amdgpu_ring_write(ring, 0);
1128 amdgpu_ring_write(ring, 0); /* reference */
1129 amdgpu_ring_write(ring, 0); /* mask */
1130 amdgpu_ring_write(ring, SDMA_PKT_POLL_REGMEM_DW5_RETRY_COUNT(0xfff) |
1131 SDMA_PKT_POLL_REGMEM_DW5_INTERVAL(10)); /* retry count, poll interval */
1134 static int sdma_v3_0_early_init(void *handle)
1136 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1138 switch (adev->asic_type) {
1140 adev->sdma.num_instances = 1;
1143 adev->sdma.num_instances = SDMA_MAX_INSTANCE;
1147 sdma_v3_0_set_ring_funcs(adev);
1148 sdma_v3_0_set_buffer_funcs(adev);
1149 sdma_v3_0_set_vm_pte_funcs(adev);
1150 sdma_v3_0_set_irq_funcs(adev);
1155 static int sdma_v3_0_sw_init(void *handle)
1157 struct amdgpu_ring *ring;
1159 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1161 /* SDMA trap event */
1162 r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 224,
1163 &adev->sdma.trap_irq);
1167 /* SDMA Privileged inst */
1168 r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 241,
1169 &adev->sdma.illegal_inst_irq);
1173 /* SDMA Privileged inst */
1174 r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 247,
1175 &adev->sdma.illegal_inst_irq);
1179 r = sdma_v3_0_init_microcode(adev);
1181 DRM_ERROR("Failed to load sdma firmware!\n");
1185 for (i = 0; i < adev->sdma.num_instances; i++) {
1186 ring = &adev->sdma.instance[i].ring;
1187 ring->ring_obj = NULL;
1188 ring->use_doorbell = true;
1189 ring->doorbell_index = (i == 0) ?
1190 AMDGPU_DOORBELL_sDMA_ENGINE0 : AMDGPU_DOORBELL_sDMA_ENGINE1;
1192 sprintf(ring->name, "sdma%d", i);
1193 r = amdgpu_ring_init(adev, ring, 1024,
1194 &adev->sdma.trap_irq,
1196 AMDGPU_SDMA_IRQ_TRAP0 :
1197 AMDGPU_SDMA_IRQ_TRAP1);
1205 static int sdma_v3_0_sw_fini(void *handle)
1207 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1210 for (i = 0; i < adev->sdma.num_instances; i++)
1211 amdgpu_ring_fini(&adev->sdma.instance[i].ring);
1213 sdma_v3_0_free_microcode(adev);
1217 static int sdma_v3_0_hw_init(void *handle)
1220 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1222 sdma_v3_0_init_golden_registers(adev);
1224 r = sdma_v3_0_start(adev);
1231 static int sdma_v3_0_hw_fini(void *handle)
1233 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1235 sdma_v3_0_ctx_switch_enable(adev, false);
1236 sdma_v3_0_enable(adev, false);
1241 static int sdma_v3_0_suspend(void *handle)
1243 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1245 return sdma_v3_0_hw_fini(adev);
1248 static int sdma_v3_0_resume(void *handle)
1250 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1252 return sdma_v3_0_hw_init(adev);
1255 static bool sdma_v3_0_is_idle(void *handle)
1257 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1258 u32 tmp = RREG32(mmSRBM_STATUS2);
1260 if (tmp & (SRBM_STATUS2__SDMA_BUSY_MASK |
1261 SRBM_STATUS2__SDMA1_BUSY_MASK))
1267 static int sdma_v3_0_wait_for_idle(void *handle)
1271 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1273 for (i = 0; i < adev->usec_timeout; i++) {
1274 tmp = RREG32(mmSRBM_STATUS2) & (SRBM_STATUS2__SDMA_BUSY_MASK |
1275 SRBM_STATUS2__SDMA1_BUSY_MASK);
1284 static bool sdma_v3_0_check_soft_reset(void *handle)
1286 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1287 u32 srbm_soft_reset = 0;
1288 u32 tmp = RREG32(mmSRBM_STATUS2);
1290 if ((tmp & SRBM_STATUS2__SDMA_BUSY_MASK) ||
1291 (tmp & SRBM_STATUS2__SDMA1_BUSY_MASK)) {
1292 srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_SDMA_MASK;
1293 srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_SDMA1_MASK;
1296 if (srbm_soft_reset) {
1297 adev->sdma.srbm_soft_reset = srbm_soft_reset;
1300 adev->sdma.srbm_soft_reset = 0;
1305 static int sdma_v3_0_pre_soft_reset(void *handle)
1307 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1308 u32 srbm_soft_reset = 0;
1310 if (!adev->sdma.srbm_soft_reset)
1313 srbm_soft_reset = adev->sdma.srbm_soft_reset;
1315 if (REG_GET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_SDMA) ||
1316 REG_GET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_SDMA1)) {
1317 sdma_v3_0_ctx_switch_enable(adev, false);
1318 sdma_v3_0_enable(adev, false);
1324 static int sdma_v3_0_post_soft_reset(void *handle)
1326 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1327 u32 srbm_soft_reset = 0;
1329 if (!adev->sdma.srbm_soft_reset)
1332 srbm_soft_reset = adev->sdma.srbm_soft_reset;
1334 if (REG_GET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_SDMA) ||
1335 REG_GET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_SDMA1)) {
1336 sdma_v3_0_gfx_resume(adev);
1337 sdma_v3_0_rlc_resume(adev);
1343 static int sdma_v3_0_soft_reset(void *handle)
1345 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1346 u32 srbm_soft_reset = 0;
1349 if (!adev->sdma.srbm_soft_reset)
1352 srbm_soft_reset = adev->sdma.srbm_soft_reset;
1354 if (srbm_soft_reset) {
1355 tmp = RREG32(mmSRBM_SOFT_RESET);
1356 tmp |= srbm_soft_reset;
1357 dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
1358 WREG32(mmSRBM_SOFT_RESET, tmp);
1359 tmp = RREG32(mmSRBM_SOFT_RESET);
1363 tmp &= ~srbm_soft_reset;
1364 WREG32(mmSRBM_SOFT_RESET, tmp);
1365 tmp = RREG32(mmSRBM_SOFT_RESET);
1367 /* Wait a little for things to settle down */
1374 static int sdma_v3_0_set_trap_irq_state(struct amdgpu_device *adev,
1375 struct amdgpu_irq_src *source,
1377 enum amdgpu_interrupt_state state)
1382 case AMDGPU_SDMA_IRQ_TRAP0:
1384 case AMDGPU_IRQ_STATE_DISABLE:
1385 sdma_cntl = RREG32(mmSDMA0_CNTL + SDMA0_REGISTER_OFFSET);
1386 sdma_cntl = REG_SET_FIELD(sdma_cntl, SDMA0_CNTL, TRAP_ENABLE, 0);
1387 WREG32(mmSDMA0_CNTL + SDMA0_REGISTER_OFFSET, sdma_cntl);
1389 case AMDGPU_IRQ_STATE_ENABLE:
1390 sdma_cntl = RREG32(mmSDMA0_CNTL + SDMA0_REGISTER_OFFSET);
1391 sdma_cntl = REG_SET_FIELD(sdma_cntl, SDMA0_CNTL, TRAP_ENABLE, 1);
1392 WREG32(mmSDMA0_CNTL + SDMA0_REGISTER_OFFSET, sdma_cntl);
1398 case AMDGPU_SDMA_IRQ_TRAP1:
1400 case AMDGPU_IRQ_STATE_DISABLE:
1401 sdma_cntl = RREG32(mmSDMA0_CNTL + SDMA1_REGISTER_OFFSET);
1402 sdma_cntl = REG_SET_FIELD(sdma_cntl, SDMA0_CNTL, TRAP_ENABLE, 0);
1403 WREG32(mmSDMA0_CNTL + SDMA1_REGISTER_OFFSET, sdma_cntl);
1405 case AMDGPU_IRQ_STATE_ENABLE:
1406 sdma_cntl = RREG32(mmSDMA0_CNTL + SDMA1_REGISTER_OFFSET);
1407 sdma_cntl = REG_SET_FIELD(sdma_cntl, SDMA0_CNTL, TRAP_ENABLE, 1);
1408 WREG32(mmSDMA0_CNTL + SDMA1_REGISTER_OFFSET, sdma_cntl);
1420 static int sdma_v3_0_process_trap_irq(struct amdgpu_device *adev,
1421 struct amdgpu_irq_src *source,
1422 struct amdgpu_iv_entry *entry)
1424 u8 instance_id, queue_id;
1426 instance_id = (entry->ring_id & 0x3) >> 0;
1427 queue_id = (entry->ring_id & 0xc) >> 2;
1428 DRM_DEBUG("IH: SDMA trap\n");
1429 switch (instance_id) {
1433 amdgpu_fence_process(&adev->sdma.instance[0].ring);
1446 amdgpu_fence_process(&adev->sdma.instance[1].ring);
1460 static int sdma_v3_0_process_illegal_inst_irq(struct amdgpu_device *adev,
1461 struct amdgpu_irq_src *source,
1462 struct amdgpu_iv_entry *entry)
1464 DRM_ERROR("Illegal instruction in SDMA command stream\n");
1465 schedule_work(&adev->reset_work);
1469 static void sdma_v3_0_update_sdma_medium_grain_clock_gating(
1470 struct amdgpu_device *adev,
1473 uint32_t temp, data;
1476 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_SDMA_MGCG)) {
1477 for (i = 0; i < adev->sdma.num_instances; i++) {
1478 temp = data = RREG32(mmSDMA0_CLK_CTRL + sdma_offsets[i]);
1479 data &= ~(SDMA0_CLK_CTRL__SOFT_OVERRIDE7_MASK |
1480 SDMA0_CLK_CTRL__SOFT_OVERRIDE6_MASK |
1481 SDMA0_CLK_CTRL__SOFT_OVERRIDE5_MASK |
1482 SDMA0_CLK_CTRL__SOFT_OVERRIDE4_MASK |
1483 SDMA0_CLK_CTRL__SOFT_OVERRIDE3_MASK |
1484 SDMA0_CLK_CTRL__SOFT_OVERRIDE2_MASK |
1485 SDMA0_CLK_CTRL__SOFT_OVERRIDE1_MASK |
1486 SDMA0_CLK_CTRL__SOFT_OVERRIDE0_MASK);
1488 WREG32(mmSDMA0_CLK_CTRL + sdma_offsets[i], data);
1491 for (i = 0; i < adev->sdma.num_instances; i++) {
1492 temp = data = RREG32(mmSDMA0_CLK_CTRL + sdma_offsets[i]);
1493 data |= SDMA0_CLK_CTRL__SOFT_OVERRIDE7_MASK |
1494 SDMA0_CLK_CTRL__SOFT_OVERRIDE6_MASK |
1495 SDMA0_CLK_CTRL__SOFT_OVERRIDE5_MASK |
1496 SDMA0_CLK_CTRL__SOFT_OVERRIDE4_MASK |
1497 SDMA0_CLK_CTRL__SOFT_OVERRIDE3_MASK |
1498 SDMA0_CLK_CTRL__SOFT_OVERRIDE2_MASK |
1499 SDMA0_CLK_CTRL__SOFT_OVERRIDE1_MASK |
1500 SDMA0_CLK_CTRL__SOFT_OVERRIDE0_MASK;
1503 WREG32(mmSDMA0_CLK_CTRL + sdma_offsets[i], data);
1508 static void sdma_v3_0_update_sdma_medium_grain_light_sleep(
1509 struct amdgpu_device *adev,
1512 uint32_t temp, data;
1515 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_SDMA_LS)) {
1516 for (i = 0; i < adev->sdma.num_instances; i++) {
1517 temp = data = RREG32(mmSDMA0_POWER_CNTL + sdma_offsets[i]);
1518 data |= SDMA0_POWER_CNTL__MEM_POWER_OVERRIDE_MASK;
1521 WREG32(mmSDMA0_POWER_CNTL + sdma_offsets[i], data);
1524 for (i = 0; i < adev->sdma.num_instances; i++) {
1525 temp = data = RREG32(mmSDMA0_POWER_CNTL + sdma_offsets[i]);
1526 data &= ~SDMA0_POWER_CNTL__MEM_POWER_OVERRIDE_MASK;
1529 WREG32(mmSDMA0_POWER_CNTL + sdma_offsets[i], data);
1534 static int sdma_v3_0_set_clockgating_state(void *handle,
1535 enum amd_clockgating_state state)
1537 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1539 if (amdgpu_sriov_vf(adev))
1542 switch (adev->asic_type) {
1546 sdma_v3_0_update_sdma_medium_grain_clock_gating(adev,
1547 state == AMD_CG_STATE_GATE);
1548 sdma_v3_0_update_sdma_medium_grain_light_sleep(adev,
1549 state == AMD_CG_STATE_GATE);
1557 static int sdma_v3_0_set_powergating_state(void *handle,
1558 enum amd_powergating_state state)
1563 static void sdma_v3_0_get_clockgating_state(void *handle, u32 *flags)
1565 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1568 if (amdgpu_sriov_vf(adev))
1571 /* AMD_CG_SUPPORT_SDMA_MGCG */
1572 data = RREG32(mmSDMA0_CLK_CTRL + sdma_offsets[0]);
1573 if (!(data & SDMA0_CLK_CTRL__SOFT_OVERRIDE0_MASK))
1574 *flags |= AMD_CG_SUPPORT_SDMA_MGCG;
1576 /* AMD_CG_SUPPORT_SDMA_LS */
1577 data = RREG32(mmSDMA0_POWER_CNTL + sdma_offsets[0]);
1578 if (data & SDMA0_POWER_CNTL__MEM_POWER_OVERRIDE_MASK)
1579 *flags |= AMD_CG_SUPPORT_SDMA_LS;
1582 static const struct amd_ip_funcs sdma_v3_0_ip_funcs = {
1583 .name = "sdma_v3_0",
1584 .early_init = sdma_v3_0_early_init,
1586 .sw_init = sdma_v3_0_sw_init,
1587 .sw_fini = sdma_v3_0_sw_fini,
1588 .hw_init = sdma_v3_0_hw_init,
1589 .hw_fini = sdma_v3_0_hw_fini,
1590 .suspend = sdma_v3_0_suspend,
1591 .resume = sdma_v3_0_resume,
1592 .is_idle = sdma_v3_0_is_idle,
1593 .wait_for_idle = sdma_v3_0_wait_for_idle,
1594 .check_soft_reset = sdma_v3_0_check_soft_reset,
1595 .pre_soft_reset = sdma_v3_0_pre_soft_reset,
1596 .post_soft_reset = sdma_v3_0_post_soft_reset,
1597 .soft_reset = sdma_v3_0_soft_reset,
1598 .set_clockgating_state = sdma_v3_0_set_clockgating_state,
1599 .set_powergating_state = sdma_v3_0_set_powergating_state,
1600 .get_clockgating_state = sdma_v3_0_get_clockgating_state,
1603 static const struct amdgpu_ring_funcs sdma_v3_0_ring_funcs = {
1604 .type = AMDGPU_RING_TYPE_SDMA,
1606 .nop = SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP),
1607 .support_64bit_ptrs = false,
1608 .get_rptr = sdma_v3_0_ring_get_rptr,
1609 .get_wptr = sdma_v3_0_ring_get_wptr,
1610 .set_wptr = sdma_v3_0_ring_set_wptr,
1612 6 + /* sdma_v3_0_ring_emit_hdp_flush */
1613 3 + /* sdma_v3_0_ring_emit_hdp_invalidate */
1614 6 + /* sdma_v3_0_ring_emit_pipeline_sync */
1615 12 + /* sdma_v3_0_ring_emit_vm_flush */
1616 10 + 10 + 10, /* sdma_v3_0_ring_emit_fence x3 for user fence, vm fence */
1617 .emit_ib_size = 7 + 6, /* sdma_v3_0_ring_emit_ib */
1618 .emit_ib = sdma_v3_0_ring_emit_ib,
1619 .emit_fence = sdma_v3_0_ring_emit_fence,
1620 .emit_pipeline_sync = sdma_v3_0_ring_emit_pipeline_sync,
1621 .emit_vm_flush = sdma_v3_0_ring_emit_vm_flush,
1622 .emit_hdp_flush = sdma_v3_0_ring_emit_hdp_flush,
1623 .emit_hdp_invalidate = sdma_v3_0_ring_emit_hdp_invalidate,
1624 .test_ring = sdma_v3_0_ring_test_ring,
1625 .test_ib = sdma_v3_0_ring_test_ib,
1626 .insert_nop = sdma_v3_0_ring_insert_nop,
1627 .pad_ib = sdma_v3_0_ring_pad_ib,
1630 static void sdma_v3_0_set_ring_funcs(struct amdgpu_device *adev)
1634 for (i = 0; i < adev->sdma.num_instances; i++)
1635 adev->sdma.instance[i].ring.funcs = &sdma_v3_0_ring_funcs;
1638 static const struct amdgpu_irq_src_funcs sdma_v3_0_trap_irq_funcs = {
1639 .set = sdma_v3_0_set_trap_irq_state,
1640 .process = sdma_v3_0_process_trap_irq,
1643 static const struct amdgpu_irq_src_funcs sdma_v3_0_illegal_inst_irq_funcs = {
1644 .process = sdma_v3_0_process_illegal_inst_irq,
1647 static void sdma_v3_0_set_irq_funcs(struct amdgpu_device *adev)
1649 adev->sdma.trap_irq.num_types = AMDGPU_SDMA_IRQ_LAST;
1650 adev->sdma.trap_irq.funcs = &sdma_v3_0_trap_irq_funcs;
1651 adev->sdma.illegal_inst_irq.funcs = &sdma_v3_0_illegal_inst_irq_funcs;
1655 * sdma_v3_0_emit_copy_buffer - copy buffer using the sDMA engine
1657 * @ring: amdgpu_ring structure holding ring information
1658 * @src_offset: src GPU address
1659 * @dst_offset: dst GPU address
1660 * @byte_count: number of bytes to xfer
1662 * Copy GPU buffers using the DMA engine (VI).
1663 * Used by the amdgpu ttm implementation to move pages if
1664 * registered as the asic copy callback.
1666 static void sdma_v3_0_emit_copy_buffer(struct amdgpu_ib *ib,
1667 uint64_t src_offset,
1668 uint64_t dst_offset,
1669 uint32_t byte_count)
1671 ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_COPY) |
1672 SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_COPY_LINEAR);
1673 ib->ptr[ib->length_dw++] = byte_count;
1674 ib->ptr[ib->length_dw++] = 0; /* src/dst endian swap */
1675 ib->ptr[ib->length_dw++] = lower_32_bits(src_offset);
1676 ib->ptr[ib->length_dw++] = upper_32_bits(src_offset);
1677 ib->ptr[ib->length_dw++] = lower_32_bits(dst_offset);
1678 ib->ptr[ib->length_dw++] = upper_32_bits(dst_offset);
1682 * sdma_v3_0_emit_fill_buffer - fill buffer using the sDMA engine
1684 * @ring: amdgpu_ring structure holding ring information
1685 * @src_data: value to write to buffer
1686 * @dst_offset: dst GPU address
1687 * @byte_count: number of bytes to xfer
1689 * Fill GPU buffers using the DMA engine (VI).
1691 static void sdma_v3_0_emit_fill_buffer(struct amdgpu_ib *ib,
1693 uint64_t dst_offset,
1694 uint32_t byte_count)
1696 ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_CONST_FILL);
1697 ib->ptr[ib->length_dw++] = lower_32_bits(dst_offset);
1698 ib->ptr[ib->length_dw++] = upper_32_bits(dst_offset);
1699 ib->ptr[ib->length_dw++] = src_data;
1700 ib->ptr[ib->length_dw++] = byte_count;
1703 static const struct amdgpu_buffer_funcs sdma_v3_0_buffer_funcs = {
1704 .copy_max_bytes = 0x1fffff,
1706 .emit_copy_buffer = sdma_v3_0_emit_copy_buffer,
1708 .fill_max_bytes = 0x1fffff,
1710 .emit_fill_buffer = sdma_v3_0_emit_fill_buffer,
1713 static void sdma_v3_0_set_buffer_funcs(struct amdgpu_device *adev)
1715 if (adev->mman.buffer_funcs == NULL) {
1716 adev->mman.buffer_funcs = &sdma_v3_0_buffer_funcs;
1717 adev->mman.buffer_funcs_ring = &adev->sdma.instance[0].ring;
1721 static const struct amdgpu_vm_pte_funcs sdma_v3_0_vm_pte_funcs = {
1722 .copy_pte = sdma_v3_0_vm_copy_pte,
1723 .write_pte = sdma_v3_0_vm_write_pte,
1724 .set_pte_pde = sdma_v3_0_vm_set_pte_pde,
1727 static void sdma_v3_0_set_vm_pte_funcs(struct amdgpu_device *adev)
1731 if (adev->vm_manager.vm_pte_funcs == NULL) {
1732 adev->vm_manager.vm_pte_funcs = &sdma_v3_0_vm_pte_funcs;
1733 for (i = 0; i < adev->sdma.num_instances; i++)
1734 adev->vm_manager.vm_pte_rings[i] =
1735 &adev->sdma.instance[i].ring;
1737 adev->vm_manager.vm_pte_num_rings = adev->sdma.num_instances;
1741 const struct amdgpu_ip_block_version sdma_v3_0_ip_block =
1743 .type = AMD_IP_BLOCK_TYPE_SDMA,
1747 .funcs = &sdma_v3_0_ip_funcs,
1750 const struct amdgpu_ip_block_version sdma_v3_1_ip_block =
1752 .type = AMD_IP_BLOCK_TYPE_SDMA,
1756 .funcs = &sdma_v3_0_ip_funcs,