2 * Copyright 2014 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
23 #include <linux/fdtable.h>
24 #include <linux/uaccess.h>
25 #include <linux/firmware.h>
28 #include "amdgpu_amdkfd.h"
31 #include "amdgpu_ucode.h"
32 #include "gca/gfx_7_2_d.h"
33 #include "gca/gfx_7_2_enum.h"
34 #include "gca/gfx_7_2_sh_mask.h"
35 #include "oss/oss_2_0_d.h"
36 #include "oss/oss_2_0_sh_mask.h"
37 #include "gmc/gmc_7_1_d.h"
38 #include "gmc/gmc_7_1_sh_mask.h"
39 #include "cik_structs.h"
41 #define CIK_PIPE_PER_MEC (4)
44 MAX_TRAPID = 8, /* 3 bits in the bitfield. */
45 MAX_WATCH_ADDRESSES = 4
49 ADDRESS_WATCH_REG_ADDR_HI = 0,
50 ADDRESS_WATCH_REG_ADDR_LO,
51 ADDRESS_WATCH_REG_CNTL,
55 /* not defined in the CI/KV reg file */
57 ADDRESS_WATCH_REG_CNTL_ATC_BIT = 0x10000000UL,
58 ADDRESS_WATCH_REG_CNTL_DEFAULT_MASK = 0x00FFFFFF,
59 ADDRESS_WATCH_REG_ADDLOW_MASK_EXTENSION = 0x03000000,
60 /* extend the mask to 26 bits to match the low address field */
61 ADDRESS_WATCH_REG_ADDLOW_SHIFT = 6,
62 ADDRESS_WATCH_REG_ADDHIGH_MASK = 0xFFFF
65 static const uint32_t watchRegs[MAX_WATCH_ADDRESSES * ADDRESS_WATCH_REG_MAX] = {
66 mmTCP_WATCH0_ADDR_H, mmTCP_WATCH0_ADDR_L, mmTCP_WATCH0_CNTL,
67 mmTCP_WATCH1_ADDR_H, mmTCP_WATCH1_ADDR_L, mmTCP_WATCH1_CNTL,
68 mmTCP_WATCH2_ADDR_H, mmTCP_WATCH2_ADDR_L, mmTCP_WATCH2_CNTL,
69 mmTCP_WATCH3_ADDR_H, mmTCP_WATCH3_ADDR_L, mmTCP_WATCH3_CNTL
72 union TCP_WATCH_CNTL_BITS {
86 * Register access functions
89 static void kgd_program_sh_mem_settings(struct kgd_dev *kgd, uint32_t vmid,
90 uint32_t sh_mem_config, uint32_t sh_mem_ape1_base,
91 uint32_t sh_mem_ape1_limit, uint32_t sh_mem_bases);
93 static int kgd_set_pasid_vmid_mapping(struct kgd_dev *kgd, unsigned int pasid,
96 static int kgd_init_pipeline(struct kgd_dev *kgd, uint32_t pipe_id,
97 uint32_t hpd_size, uint64_t hpd_gpu_addr);
98 static int kgd_init_interrupts(struct kgd_dev *kgd, uint32_t pipe_id);
99 static int kgd_hqd_load(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id,
100 uint32_t queue_id, uint32_t __user *wptr);
101 static int kgd_hqd_sdma_load(struct kgd_dev *kgd, void *mqd);
102 static bool kgd_hqd_is_occupied(struct kgd_dev *kgd, uint64_t queue_address,
103 uint32_t pipe_id, uint32_t queue_id);
105 static int kgd_hqd_destroy(struct kgd_dev *kgd, uint32_t reset_type,
106 unsigned int timeout, uint32_t pipe_id,
108 static bool kgd_hqd_sdma_is_occupied(struct kgd_dev *kgd, void *mqd);
109 static int kgd_hqd_sdma_destroy(struct kgd_dev *kgd, void *mqd,
110 unsigned int timeout);
111 static int kgd_address_watch_disable(struct kgd_dev *kgd);
112 static int kgd_address_watch_execute(struct kgd_dev *kgd,
113 unsigned int watch_point_id,
117 static int kgd_wave_control_execute(struct kgd_dev *kgd,
118 uint32_t gfx_index_val,
120 static uint32_t kgd_address_watch_get_offset(struct kgd_dev *kgd,
121 unsigned int watch_point_id,
122 unsigned int reg_offset);
124 static bool get_atc_vmid_pasid_mapping_valid(struct kgd_dev *kgd, uint8_t vmid);
125 static uint16_t get_atc_vmid_pasid_mapping_pasid(struct kgd_dev *kgd,
127 static void write_vmid_invalidate_request(struct kgd_dev *kgd, uint8_t vmid);
129 static uint16_t get_fw_version(struct kgd_dev *kgd, enum kgd_engine_type type);
131 static const struct kfd2kgd_calls kfd2kgd = {
132 .init_gtt_mem_allocation = alloc_gtt_mem,
133 .free_gtt_mem = free_gtt_mem,
134 .get_vmem_size = get_vmem_size,
135 .get_gpu_clock_counter = get_gpu_clock_counter,
136 .get_max_engine_clock_in_mhz = get_max_engine_clock_in_mhz,
137 .program_sh_mem_settings = kgd_program_sh_mem_settings,
138 .set_pasid_vmid_mapping = kgd_set_pasid_vmid_mapping,
139 .init_pipeline = kgd_init_pipeline,
140 .init_interrupts = kgd_init_interrupts,
141 .hqd_load = kgd_hqd_load,
142 .hqd_sdma_load = kgd_hqd_sdma_load,
143 .hqd_is_occupied = kgd_hqd_is_occupied,
144 .hqd_sdma_is_occupied = kgd_hqd_sdma_is_occupied,
145 .hqd_destroy = kgd_hqd_destroy,
146 .hqd_sdma_destroy = kgd_hqd_sdma_destroy,
147 .address_watch_disable = kgd_address_watch_disable,
148 .address_watch_execute = kgd_address_watch_execute,
149 .wave_control_execute = kgd_wave_control_execute,
150 .address_watch_get_offset = kgd_address_watch_get_offset,
151 .get_atc_vmid_pasid_mapping_pasid = get_atc_vmid_pasid_mapping_pasid,
152 .get_atc_vmid_pasid_mapping_valid = get_atc_vmid_pasid_mapping_valid,
153 .write_vmid_invalidate_request = write_vmid_invalidate_request,
154 .get_fw_version = get_fw_version
157 struct kfd2kgd_calls *amdgpu_amdkfd_gfx_7_get_functions()
159 return (struct kfd2kgd_calls *)&kfd2kgd;
162 static inline struct amdgpu_device *get_amdgpu_device(struct kgd_dev *kgd)
164 return (struct amdgpu_device *)kgd;
167 static void lock_srbm(struct kgd_dev *kgd, uint32_t mec, uint32_t pipe,
168 uint32_t queue, uint32_t vmid)
170 struct amdgpu_device *adev = get_amdgpu_device(kgd);
171 uint32_t value = PIPEID(pipe) | MEID(mec) | VMID(vmid) | QUEUEID(queue);
173 mutex_lock(&adev->srbm_mutex);
174 WREG32(mmSRBM_GFX_CNTL, value);
177 static void unlock_srbm(struct kgd_dev *kgd)
179 struct amdgpu_device *adev = get_amdgpu_device(kgd);
181 WREG32(mmSRBM_GFX_CNTL, 0);
182 mutex_unlock(&adev->srbm_mutex);
185 static void acquire_queue(struct kgd_dev *kgd, uint32_t pipe_id,
188 uint32_t mec = (++pipe_id / CIK_PIPE_PER_MEC) + 1;
189 uint32_t pipe = (pipe_id % CIK_PIPE_PER_MEC);
191 lock_srbm(kgd, mec, pipe, queue_id, 0);
194 static void release_queue(struct kgd_dev *kgd)
199 static void kgd_program_sh_mem_settings(struct kgd_dev *kgd, uint32_t vmid,
200 uint32_t sh_mem_config,
201 uint32_t sh_mem_ape1_base,
202 uint32_t sh_mem_ape1_limit,
203 uint32_t sh_mem_bases)
205 struct amdgpu_device *adev = get_amdgpu_device(kgd);
207 lock_srbm(kgd, 0, 0, 0, vmid);
209 WREG32(mmSH_MEM_CONFIG, sh_mem_config);
210 WREG32(mmSH_MEM_APE1_BASE, sh_mem_ape1_base);
211 WREG32(mmSH_MEM_APE1_LIMIT, sh_mem_ape1_limit);
212 WREG32(mmSH_MEM_BASES, sh_mem_bases);
217 static int kgd_set_pasid_vmid_mapping(struct kgd_dev *kgd, unsigned int pasid,
220 struct amdgpu_device *adev = get_amdgpu_device(kgd);
223 * We have to assume that there is no outstanding mapping.
224 * The ATC_VMID_PASID_MAPPING_UPDATE_STATUS bit could be 0 because
225 * a mapping is in progress or because a mapping finished and the
226 * SW cleared it. So the protocol is to always wait & clear.
228 uint32_t pasid_mapping = (pasid == 0) ? 0 : (uint32_t)pasid |
229 ATC_VMID0_PASID_MAPPING__VALID_MASK;
231 WREG32(mmATC_VMID0_PASID_MAPPING + vmid, pasid_mapping);
233 while (!(RREG32(mmATC_VMID_PASID_MAPPING_UPDATE_STATUS) & (1U << vmid)))
235 WREG32(mmATC_VMID_PASID_MAPPING_UPDATE_STATUS, 1U << vmid);
237 /* Mapping vmid to pasid also for IH block */
238 WREG32(mmIH_VMID_0_LUT + vmid, pasid_mapping);
243 static int kgd_init_pipeline(struct kgd_dev *kgd, uint32_t pipe_id,
244 uint32_t hpd_size, uint64_t hpd_gpu_addr)
246 struct amdgpu_device *adev = get_amdgpu_device(kgd);
248 uint32_t mec = (++pipe_id / CIK_PIPE_PER_MEC) + 1;
249 uint32_t pipe = (pipe_id % CIK_PIPE_PER_MEC);
251 lock_srbm(kgd, mec, pipe, 0, 0);
252 WREG32(mmCP_HPD_EOP_BASE_ADDR, lower_32_bits(hpd_gpu_addr >> 8));
253 WREG32(mmCP_HPD_EOP_BASE_ADDR_HI, upper_32_bits(hpd_gpu_addr >> 8));
254 WREG32(mmCP_HPD_EOP_VMID, 0);
255 WREG32(mmCP_HPD_EOP_CONTROL, hpd_size);
261 static int kgd_init_interrupts(struct kgd_dev *kgd, uint32_t pipe_id)
263 struct amdgpu_device *adev = get_amdgpu_device(kgd);
267 mec = (pipe_id / CIK_PIPE_PER_MEC) + 1;
268 pipe = (pipe_id % CIK_PIPE_PER_MEC);
270 lock_srbm(kgd, mec, pipe, 0, 0);
272 WREG32(mmCPC_INT_CNTL, CP_INT_CNTL_RING0__TIME_STAMP_INT_ENABLE_MASK |
273 CP_INT_CNTL_RING0__OPCODE_ERROR_INT_ENABLE_MASK);
280 static inline uint32_t get_sdma_base_addr(struct cik_sdma_rlc_registers *m)
284 retval = m->sdma_engine_id * SDMA1_REGISTER_OFFSET +
285 m->sdma_queue_id * KFD_CIK_SDMA_QUEUE_OFFSET;
287 pr_debug("kfd: sdma base address: 0x%x\n", retval);
292 static inline struct cik_mqd *get_mqd(void *mqd)
294 return (struct cik_mqd *)mqd;
297 static inline struct cik_sdma_rlc_registers *get_sdma_mqd(void *mqd)
299 return (struct cik_sdma_rlc_registers *)mqd;
302 static int kgd_hqd_load(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id,
303 uint32_t queue_id, uint32_t __user *wptr)
305 struct amdgpu_device *adev = get_amdgpu_device(kgd);
306 uint32_t wptr_shadow, is_wptr_shadow_valid;
311 is_wptr_shadow_valid = !get_user(wptr_shadow, wptr);
313 acquire_queue(kgd, pipe_id, queue_id);
314 WREG32(mmCP_MQD_BASE_ADDR, m->cp_mqd_base_addr_lo);
315 WREG32(mmCP_MQD_BASE_ADDR_HI, m->cp_mqd_base_addr_hi);
316 WREG32(mmCP_MQD_CONTROL, m->cp_mqd_control);
318 WREG32(mmCP_HQD_PQ_BASE, m->cp_hqd_pq_base_lo);
319 WREG32(mmCP_HQD_PQ_BASE_HI, m->cp_hqd_pq_base_hi);
320 WREG32(mmCP_HQD_PQ_CONTROL, m->cp_hqd_pq_control);
322 WREG32(mmCP_HQD_IB_CONTROL, m->cp_hqd_ib_control);
323 WREG32(mmCP_HQD_IB_BASE_ADDR, m->cp_hqd_ib_base_addr_lo);
324 WREG32(mmCP_HQD_IB_BASE_ADDR_HI, m->cp_hqd_ib_base_addr_hi);
326 WREG32(mmCP_HQD_IB_RPTR, m->cp_hqd_ib_rptr);
328 WREG32(mmCP_HQD_PERSISTENT_STATE, m->cp_hqd_persistent_state);
329 WREG32(mmCP_HQD_SEMA_CMD, m->cp_hqd_sema_cmd);
330 WREG32(mmCP_HQD_MSG_TYPE, m->cp_hqd_msg_type);
332 WREG32(mmCP_HQD_ATOMIC0_PREOP_LO, m->cp_hqd_atomic0_preop_lo);
333 WREG32(mmCP_HQD_ATOMIC0_PREOP_HI, m->cp_hqd_atomic0_preop_hi);
334 WREG32(mmCP_HQD_ATOMIC1_PREOP_LO, m->cp_hqd_atomic1_preop_lo);
335 WREG32(mmCP_HQD_ATOMIC1_PREOP_HI, m->cp_hqd_atomic1_preop_hi);
337 WREG32(mmCP_HQD_PQ_RPTR_REPORT_ADDR, m->cp_hqd_pq_rptr_report_addr_lo);
338 WREG32(mmCP_HQD_PQ_RPTR_REPORT_ADDR_HI,
339 m->cp_hqd_pq_rptr_report_addr_hi);
341 WREG32(mmCP_HQD_PQ_RPTR, m->cp_hqd_pq_rptr);
343 WREG32(mmCP_HQD_PQ_WPTR_POLL_ADDR, m->cp_hqd_pq_wptr_poll_addr_lo);
344 WREG32(mmCP_HQD_PQ_WPTR_POLL_ADDR_HI, m->cp_hqd_pq_wptr_poll_addr_hi);
346 WREG32(mmCP_HQD_PQ_DOORBELL_CONTROL, m->cp_hqd_pq_doorbell_control);
348 WREG32(mmCP_HQD_VMID, m->cp_hqd_vmid);
350 WREG32(mmCP_HQD_QUANTUM, m->cp_hqd_quantum);
352 WREG32(mmCP_HQD_PIPE_PRIORITY, m->cp_hqd_pipe_priority);
353 WREG32(mmCP_HQD_QUEUE_PRIORITY, m->cp_hqd_queue_priority);
355 WREG32(mmCP_HQD_IQ_RPTR, m->cp_hqd_iq_rptr);
357 if (is_wptr_shadow_valid)
358 WREG32(mmCP_HQD_PQ_WPTR, wptr_shadow);
360 WREG32(mmCP_HQD_ACTIVE, m->cp_hqd_active);
366 static int kgd_hqd_sdma_load(struct kgd_dev *kgd, void *mqd)
368 struct amdgpu_device *adev = get_amdgpu_device(kgd);
369 struct cik_sdma_rlc_registers *m;
370 unsigned long end_jiffies;
371 uint32_t sdma_base_addr;
374 m = get_sdma_mqd(mqd);
375 sdma_base_addr = get_sdma_base_addr(m);
377 WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL,
378 m->sdma_rlc_rb_cntl & (~SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK));
380 end_jiffies = msecs_to_jiffies(2000) + jiffies;
382 data = RREG32(sdma_base_addr + mmSDMA0_RLC0_CONTEXT_STATUS);
383 if (data & SDMA0_RLC0_CONTEXT_STATUS__IDLE_MASK)
385 if (time_after(jiffies, end_jiffies))
387 usleep_range(500, 1000);
389 if (m->sdma_engine_id) {
390 data = RREG32(mmSDMA1_GFX_CONTEXT_CNTL);
391 data = REG_SET_FIELD(data, SDMA1_GFX_CONTEXT_CNTL,
393 WREG32(mmSDMA1_GFX_CONTEXT_CNTL, data);
395 data = RREG32(mmSDMA0_GFX_CONTEXT_CNTL);
396 data = REG_SET_FIELD(data, SDMA0_GFX_CONTEXT_CNTL,
398 WREG32(mmSDMA0_GFX_CONTEXT_CNTL, data);
401 WREG32(sdma_base_addr + mmSDMA0_RLC0_DOORBELL,
402 m->sdma_rlc_doorbell);
403 WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR, 0);
404 WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_WPTR, 0);
405 WREG32(sdma_base_addr + mmSDMA0_RLC0_VIRTUAL_ADDR,
406 m->sdma_rlc_virtual_addr);
407 WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_BASE, m->sdma_rlc_rb_base);
408 WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_BASE_HI,
409 m->sdma_rlc_rb_base_hi);
410 WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR_ADDR_LO,
411 m->sdma_rlc_rb_rptr_addr_lo);
412 WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR_ADDR_HI,
413 m->sdma_rlc_rb_rptr_addr_hi);
414 WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL,
415 m->sdma_rlc_rb_cntl);
420 static bool kgd_hqd_is_occupied(struct kgd_dev *kgd, uint64_t queue_address,
421 uint32_t pipe_id, uint32_t queue_id)
423 struct amdgpu_device *adev = get_amdgpu_device(kgd);
428 acquire_queue(kgd, pipe_id, queue_id);
429 act = RREG32(mmCP_HQD_ACTIVE);
431 low = lower_32_bits(queue_address >> 8);
432 high = upper_32_bits(queue_address >> 8);
434 if (low == RREG32(mmCP_HQD_PQ_BASE) &&
435 high == RREG32(mmCP_HQD_PQ_BASE_HI))
442 static bool kgd_hqd_sdma_is_occupied(struct kgd_dev *kgd, void *mqd)
444 struct amdgpu_device *adev = get_amdgpu_device(kgd);
445 struct cik_sdma_rlc_registers *m;
446 uint32_t sdma_base_addr;
447 uint32_t sdma_rlc_rb_cntl;
449 m = get_sdma_mqd(mqd);
450 sdma_base_addr = get_sdma_base_addr(m);
452 sdma_rlc_rb_cntl = RREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL);
454 if (sdma_rlc_rb_cntl & SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK)
460 static int kgd_hqd_destroy(struct kgd_dev *kgd, uint32_t reset_type,
461 unsigned int timeout, uint32_t pipe_id,
464 struct amdgpu_device *adev = get_amdgpu_device(kgd);
467 acquire_queue(kgd, pipe_id, queue_id);
468 WREG32(mmCP_HQD_PQ_DOORBELL_CONTROL, 0);
470 WREG32(mmCP_HQD_DEQUEUE_REQUEST, reset_type);
473 temp = RREG32(mmCP_HQD_ACTIVE);
474 if (temp & CP_HQD_ACTIVE__ACTIVE_MASK)
477 pr_err("kfd: cp queue preemption time out (%dms)\n",
490 static int kgd_hqd_sdma_destroy(struct kgd_dev *kgd, void *mqd,
491 unsigned int timeout)
493 struct amdgpu_device *adev = get_amdgpu_device(kgd);
494 struct cik_sdma_rlc_registers *m;
495 uint32_t sdma_base_addr;
498 m = get_sdma_mqd(mqd);
499 sdma_base_addr = get_sdma_base_addr(m);
501 temp = RREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL);
502 temp = temp & ~SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK;
503 WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL, temp);
506 temp = RREG32(sdma_base_addr + mmSDMA0_RLC0_CONTEXT_STATUS);
507 if (temp & SDMA0_RLC0_CONTEXT_STATUS__IDLE_MASK)
515 WREG32(sdma_base_addr + mmSDMA0_RLC0_DOORBELL, 0);
516 WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL,
517 RREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL) |
518 SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK);
523 static int kgd_address_watch_disable(struct kgd_dev *kgd)
525 struct amdgpu_device *adev = get_amdgpu_device(kgd);
526 union TCP_WATCH_CNTL_BITS cntl;
531 cntl.bitfields.valid = 0;
532 cntl.bitfields.mask = ADDRESS_WATCH_REG_CNTL_DEFAULT_MASK;
533 cntl.bitfields.atc = 1;
535 /* Turning off this address until we set all the registers */
536 for (i = 0; i < MAX_WATCH_ADDRESSES; i++)
537 WREG32(watchRegs[i * ADDRESS_WATCH_REG_MAX +
538 ADDRESS_WATCH_REG_CNTL], cntl.u32All);
543 static int kgd_address_watch_execute(struct kgd_dev *kgd,
544 unsigned int watch_point_id,
549 struct amdgpu_device *adev = get_amdgpu_device(kgd);
550 union TCP_WATCH_CNTL_BITS cntl;
552 cntl.u32All = cntl_val;
554 /* Turning off this watch point until we set all the registers */
555 cntl.bitfields.valid = 0;
556 WREG32(watchRegs[watch_point_id * ADDRESS_WATCH_REG_MAX +
557 ADDRESS_WATCH_REG_CNTL], cntl.u32All);
559 WREG32(watchRegs[watch_point_id * ADDRESS_WATCH_REG_MAX +
560 ADDRESS_WATCH_REG_ADDR_HI], addr_hi);
562 WREG32(watchRegs[watch_point_id * ADDRESS_WATCH_REG_MAX +
563 ADDRESS_WATCH_REG_ADDR_LO], addr_lo);
565 /* Enable the watch point */
566 cntl.bitfields.valid = 1;
568 WREG32(watchRegs[watch_point_id * ADDRESS_WATCH_REG_MAX +
569 ADDRESS_WATCH_REG_CNTL], cntl.u32All);
574 static int kgd_wave_control_execute(struct kgd_dev *kgd,
575 uint32_t gfx_index_val,
578 struct amdgpu_device *adev = get_amdgpu_device(kgd);
581 mutex_lock(&adev->grbm_idx_mutex);
583 WREG32(mmGRBM_GFX_INDEX, gfx_index_val);
584 WREG32(mmSQ_CMD, sq_cmd);
586 /* Restore the GRBM_GFX_INDEX register */
588 data = GRBM_GFX_INDEX__INSTANCE_BROADCAST_WRITES_MASK |
589 GRBM_GFX_INDEX__SH_BROADCAST_WRITES_MASK |
590 GRBM_GFX_INDEX__SE_BROADCAST_WRITES_MASK;
592 WREG32(mmGRBM_GFX_INDEX, data);
594 mutex_unlock(&adev->grbm_idx_mutex);
599 static uint32_t kgd_address_watch_get_offset(struct kgd_dev *kgd,
600 unsigned int watch_point_id,
601 unsigned int reg_offset)
603 return watchRegs[watch_point_id * ADDRESS_WATCH_REG_MAX + reg_offset];
606 static bool get_atc_vmid_pasid_mapping_valid(struct kgd_dev *kgd,
610 struct amdgpu_device *adev = (struct amdgpu_device *) kgd;
612 reg = RREG32(mmATC_VMID0_PASID_MAPPING + vmid);
613 return reg & ATC_VMID0_PASID_MAPPING__VALID_MASK;
616 static uint16_t get_atc_vmid_pasid_mapping_pasid(struct kgd_dev *kgd,
620 struct amdgpu_device *adev = (struct amdgpu_device *) kgd;
622 reg = RREG32(mmATC_VMID0_PASID_MAPPING + vmid);
623 return reg & ATC_VMID0_PASID_MAPPING__VALID_MASK;
626 static void write_vmid_invalidate_request(struct kgd_dev *kgd, uint8_t vmid)
628 struct amdgpu_device *adev = (struct amdgpu_device *) kgd;
630 WREG32(mmVM_INVALIDATE_REQUEST, 1 << vmid);
633 static uint16_t get_fw_version(struct kgd_dev *kgd, enum kgd_engine_type type)
635 struct amdgpu_device *adev = (struct amdgpu_device *) kgd;
636 const union amdgpu_firmware_header *hdr;
642 hdr = (const union amdgpu_firmware_header *)
643 adev->gfx.pfp_fw->data;
647 hdr = (const union amdgpu_firmware_header *)
648 adev->gfx.me_fw->data;
652 hdr = (const union amdgpu_firmware_header *)
653 adev->gfx.ce_fw->data;
656 case KGD_ENGINE_MEC1:
657 hdr = (const union amdgpu_firmware_header *)
658 adev->gfx.mec_fw->data;
661 case KGD_ENGINE_MEC2:
662 hdr = (const union amdgpu_firmware_header *)
663 adev->gfx.mec2_fw->data;
667 hdr = (const union amdgpu_firmware_header *)
668 adev->gfx.rlc_fw->data;
671 case KGD_ENGINE_SDMA1:
672 hdr = (const union amdgpu_firmware_header *)
673 adev->sdma.instance[0].fw->data;
676 case KGD_ENGINE_SDMA2:
677 hdr = (const union amdgpu_firmware_header *)
678 adev->sdma.instance[1].fw->data;
688 /* Only 12 bit in use*/
689 return hdr->common.ucode_version;