2 * Copyright 2014 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
23 #include <linux/module.h>
24 #include <linux/fdtable.h>
25 #include <linux/uaccess.h>
30 #include "radeon_kfd.h"
31 #include "radeon_ucode.h"
32 #include <linux/firmware.h>
33 #include "cik_structs.h"
35 #define CIK_PIPE_PER_MEC (4)
37 static const uint32_t watchRegs[MAX_WATCH_ADDRESSES * ADDRESS_WATCH_REG_MAX] = {
38 TCP_WATCH0_ADDR_H, TCP_WATCH0_ADDR_L, TCP_WATCH0_CNTL,
39 TCP_WATCH1_ADDR_H, TCP_WATCH1_ADDR_L, TCP_WATCH1_CNTL,
40 TCP_WATCH2_ADDR_H, TCP_WATCH2_ADDR_L, TCP_WATCH2_CNTL,
41 TCP_WATCH3_ADDR_H, TCP_WATCH3_ADDR_L, TCP_WATCH3_CNTL
51 static int alloc_gtt_mem(struct kgd_dev *kgd, size_t size,
52 void **mem_obj, uint64_t *gpu_addr,
55 static void free_gtt_mem(struct kgd_dev *kgd, void *mem_obj);
57 static uint64_t get_vmem_size(struct kgd_dev *kgd);
58 static uint64_t get_gpu_clock_counter(struct kgd_dev *kgd);
60 static uint32_t get_max_engine_clock_in_mhz(struct kgd_dev *kgd);
61 static uint16_t get_fw_version(struct kgd_dev *kgd, enum kgd_engine_type type);
64 * Register access functions
67 static void kgd_program_sh_mem_settings(struct kgd_dev *kgd, uint32_t vmid,
68 uint32_t sh_mem_config, uint32_t sh_mem_ape1_base,
69 uint32_t sh_mem_ape1_limit, uint32_t sh_mem_bases);
71 static int kgd_set_pasid_vmid_mapping(struct kgd_dev *kgd, unsigned int pasid,
74 static int kgd_init_pipeline(struct kgd_dev *kgd, uint32_t pipe_id,
75 uint32_t hpd_size, uint64_t hpd_gpu_addr);
76 static int kgd_init_interrupts(struct kgd_dev *kgd, uint32_t pipe_id);
77 static int kgd_hqd_load(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id,
78 uint32_t queue_id, uint32_t __user *wptr,
79 uint32_t wptr_shift, uint32_t wptr_mask,
80 struct mm_struct *mm);
81 static int kgd_hqd_sdma_load(struct kgd_dev *kgd, void *mqd);
82 static bool kgd_hqd_is_occupied(struct kgd_dev *kgd, uint64_t queue_address,
83 uint32_t pipe_id, uint32_t queue_id);
85 static int kgd_hqd_destroy(struct kgd_dev *kgd, void *mqd, uint32_t reset_type,
86 unsigned int timeout, uint32_t pipe_id,
88 static bool kgd_hqd_sdma_is_occupied(struct kgd_dev *kgd, void *mqd);
89 static int kgd_hqd_sdma_destroy(struct kgd_dev *kgd, void *mqd,
90 unsigned int timeout);
91 static int kgd_address_watch_disable(struct kgd_dev *kgd);
92 static int kgd_address_watch_execute(struct kgd_dev *kgd,
93 unsigned int watch_point_id,
97 static int kgd_wave_control_execute(struct kgd_dev *kgd,
98 uint32_t gfx_index_val,
100 static uint32_t kgd_address_watch_get_offset(struct kgd_dev *kgd,
101 unsigned int watch_point_id,
102 unsigned int reg_offset);
104 static bool get_atc_vmid_pasid_mapping_valid(struct kgd_dev *kgd, uint8_t vmid);
105 static uint16_t get_atc_vmid_pasid_mapping_pasid(struct kgd_dev *kgd,
107 static void write_vmid_invalidate_request(struct kgd_dev *kgd, uint8_t vmid);
109 static const struct kfd2kgd_calls kfd2kgd = {
110 .init_gtt_mem_allocation = alloc_gtt_mem,
111 .free_gtt_mem = free_gtt_mem,
112 .get_vmem_size = get_vmem_size,
113 .get_gpu_clock_counter = get_gpu_clock_counter,
114 .get_max_engine_clock_in_mhz = get_max_engine_clock_in_mhz,
115 .program_sh_mem_settings = kgd_program_sh_mem_settings,
116 .set_pasid_vmid_mapping = kgd_set_pasid_vmid_mapping,
117 .init_pipeline = kgd_init_pipeline,
118 .init_interrupts = kgd_init_interrupts,
119 .hqd_load = kgd_hqd_load,
120 .hqd_sdma_load = kgd_hqd_sdma_load,
121 .hqd_is_occupied = kgd_hqd_is_occupied,
122 .hqd_sdma_is_occupied = kgd_hqd_sdma_is_occupied,
123 .hqd_destroy = kgd_hqd_destroy,
124 .hqd_sdma_destroy = kgd_hqd_sdma_destroy,
125 .address_watch_disable = kgd_address_watch_disable,
126 .address_watch_execute = kgd_address_watch_execute,
127 .wave_control_execute = kgd_wave_control_execute,
128 .address_watch_get_offset = kgd_address_watch_get_offset,
129 .get_atc_vmid_pasid_mapping_pasid = get_atc_vmid_pasid_mapping_pasid,
130 .get_atc_vmid_pasid_mapping_valid = get_atc_vmid_pasid_mapping_valid,
131 .write_vmid_invalidate_request = write_vmid_invalidate_request,
132 .get_fw_version = get_fw_version
135 static const struct kgd2kfd_calls *kgd2kfd;
137 int radeon_kfd_init(void)
141 #if defined(CONFIG_HSA_AMD_MODULE)
142 int (*kgd2kfd_init_p)(unsigned, const struct kgd2kfd_calls**);
144 kgd2kfd_init_p = symbol_request(kgd2kfd_init);
146 if (kgd2kfd_init_p == NULL)
149 ret = kgd2kfd_init_p(KFD_INTERFACE_VERSION, &kgd2kfd);
151 symbol_put(kgd2kfd_init);
155 #elif defined(CONFIG_HSA_AMD)
156 ret = kgd2kfd_init(KFD_INTERFACE_VERSION, &kgd2kfd);
167 void radeon_kfd_fini(void)
171 symbol_put(kgd2kfd_init);
175 void radeon_kfd_device_probe(struct radeon_device *rdev)
178 rdev->kfd = kgd2kfd->probe((struct kgd_dev *)rdev,
179 rdev->pdev, &kfd2kgd);
182 void radeon_kfd_device_init(struct radeon_device *rdev)
184 int i, queue, pipe, mec;
187 struct kgd2kfd_shared_resources gpu_resources = {
188 .compute_vmid_bitmap = 0xFF00,
189 .num_pipe_per_mec = 4,
190 .num_queue_per_pipe = 8
193 bitmap_zero(gpu_resources.queue_bitmap, KGD_MAX_QUEUES);
195 for (i = 0; i < KGD_MAX_QUEUES; ++i) {
196 queue = i % gpu_resources.num_queue_per_pipe;
197 pipe = (i / gpu_resources.num_queue_per_pipe)
198 % gpu_resources.num_pipe_per_mec;
199 mec = (i / gpu_resources.num_queue_per_pipe)
200 / gpu_resources.num_pipe_per_mec;
202 if (mec == 0 && pipe > 0)
203 set_bit(i, gpu_resources.queue_bitmap);
206 radeon_doorbell_get_kfd_info(rdev,
207 &gpu_resources.doorbell_physical_address,
208 &gpu_resources.doorbell_aperture_size,
209 &gpu_resources.doorbell_start_offset);
211 kgd2kfd->device_init(rdev->kfd, &gpu_resources);
215 void radeon_kfd_device_fini(struct radeon_device *rdev)
218 kgd2kfd->device_exit(rdev->kfd);
223 void radeon_kfd_interrupt(struct radeon_device *rdev, const void *ih_ring_entry)
226 kgd2kfd->interrupt(rdev->kfd, ih_ring_entry);
229 void radeon_kfd_suspend(struct radeon_device *rdev)
232 kgd2kfd->suspend(rdev->kfd);
235 int radeon_kfd_resume(struct radeon_device *rdev)
240 r = kgd2kfd->resume(rdev->kfd);
245 static int alloc_gtt_mem(struct kgd_dev *kgd, size_t size,
246 void **mem_obj, uint64_t *gpu_addr,
249 struct radeon_device *rdev = (struct radeon_device *)kgd;
250 struct kgd_mem **mem = (struct kgd_mem **) mem_obj;
254 BUG_ON(gpu_addr == NULL);
255 BUG_ON(cpu_ptr == NULL);
257 *mem = kmalloc(sizeof(struct kgd_mem), GFP_KERNEL);
261 r = radeon_bo_create(rdev, size, PAGE_SIZE, true, RADEON_GEM_DOMAIN_GTT,
262 RADEON_GEM_GTT_WC, NULL, NULL, &(*mem)->bo);
265 "failed to allocate BO for amdkfd (%d)\n", r);
270 r = radeon_bo_reserve((*mem)->bo, true);
272 dev_err(rdev->dev, "(%d) failed to reserve bo for amdkfd\n", r);
273 goto allocate_mem_reserve_bo_failed;
276 r = radeon_bo_pin((*mem)->bo, RADEON_GEM_DOMAIN_GTT,
279 dev_err(rdev->dev, "(%d) failed to pin bo for amdkfd\n", r);
280 goto allocate_mem_pin_bo_failed;
282 *gpu_addr = (*mem)->gpu_addr;
284 r = radeon_bo_kmap((*mem)->bo, &(*mem)->cpu_ptr);
287 "(%d) failed to map bo to kernel for amdkfd\n", r);
288 goto allocate_mem_kmap_bo_failed;
290 *cpu_ptr = (*mem)->cpu_ptr;
292 radeon_bo_unreserve((*mem)->bo);
296 allocate_mem_kmap_bo_failed:
297 radeon_bo_unpin((*mem)->bo);
298 allocate_mem_pin_bo_failed:
299 radeon_bo_unreserve((*mem)->bo);
300 allocate_mem_reserve_bo_failed:
301 radeon_bo_unref(&(*mem)->bo);
306 static void free_gtt_mem(struct kgd_dev *kgd, void *mem_obj)
308 struct kgd_mem *mem = (struct kgd_mem *) mem_obj;
312 radeon_bo_reserve(mem->bo, true);
313 radeon_bo_kunmap(mem->bo);
314 radeon_bo_unpin(mem->bo);
315 radeon_bo_unreserve(mem->bo);
316 radeon_bo_unref(&(mem->bo));
320 static uint64_t get_vmem_size(struct kgd_dev *kgd)
322 struct radeon_device *rdev = (struct radeon_device *)kgd;
326 return rdev->mc.real_vram_size;
329 static uint64_t get_gpu_clock_counter(struct kgd_dev *kgd)
331 struct radeon_device *rdev = (struct radeon_device *)kgd;
333 return rdev->asic->get_gpu_clock_counter(rdev);
336 static uint32_t get_max_engine_clock_in_mhz(struct kgd_dev *kgd)
338 struct radeon_device *rdev = (struct radeon_device *)kgd;
340 /* The sclk is in quantas of 10kHz */
341 return rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac.sclk / 100;
344 static inline struct radeon_device *get_radeon_device(struct kgd_dev *kgd)
346 return (struct radeon_device *)kgd;
349 static void write_register(struct kgd_dev *kgd, uint32_t offset, uint32_t value)
351 struct radeon_device *rdev = get_radeon_device(kgd);
353 writel(value, (void __iomem *)(rdev->rmmio + offset));
356 static uint32_t read_register(struct kgd_dev *kgd, uint32_t offset)
358 struct radeon_device *rdev = get_radeon_device(kgd);
360 return readl((void __iomem *)(rdev->rmmio + offset));
363 static void lock_srbm(struct kgd_dev *kgd, uint32_t mec, uint32_t pipe,
364 uint32_t queue, uint32_t vmid)
366 struct radeon_device *rdev = get_radeon_device(kgd);
367 uint32_t value = PIPEID(pipe) | MEID(mec) | VMID(vmid) | QUEUEID(queue);
369 mutex_lock(&rdev->srbm_mutex);
370 write_register(kgd, SRBM_GFX_CNTL, value);
373 static void unlock_srbm(struct kgd_dev *kgd)
375 struct radeon_device *rdev = get_radeon_device(kgd);
377 write_register(kgd, SRBM_GFX_CNTL, 0);
378 mutex_unlock(&rdev->srbm_mutex);
381 static void acquire_queue(struct kgd_dev *kgd, uint32_t pipe_id,
384 uint32_t mec = (++pipe_id / CIK_PIPE_PER_MEC) + 1;
385 uint32_t pipe = (pipe_id % CIK_PIPE_PER_MEC);
387 lock_srbm(kgd, mec, pipe, queue_id, 0);
390 static void release_queue(struct kgd_dev *kgd)
395 static void kgd_program_sh_mem_settings(struct kgd_dev *kgd, uint32_t vmid,
396 uint32_t sh_mem_config,
397 uint32_t sh_mem_ape1_base,
398 uint32_t sh_mem_ape1_limit,
399 uint32_t sh_mem_bases)
401 lock_srbm(kgd, 0, 0, 0, vmid);
403 write_register(kgd, SH_MEM_CONFIG, sh_mem_config);
404 write_register(kgd, SH_MEM_APE1_BASE, sh_mem_ape1_base);
405 write_register(kgd, SH_MEM_APE1_LIMIT, sh_mem_ape1_limit);
406 write_register(kgd, SH_MEM_BASES, sh_mem_bases);
411 static int kgd_set_pasid_vmid_mapping(struct kgd_dev *kgd, unsigned int pasid,
415 * We have to assume that there is no outstanding mapping.
416 * The ATC_VMID_PASID_MAPPING_UPDATE_STATUS bit could be 0
417 * because a mapping is in progress or because a mapping finished and
419 * So the protocol is to always wait & clear.
421 uint32_t pasid_mapping = (pasid == 0) ? 0 : (uint32_t)pasid |
422 ATC_VMID_PASID_MAPPING_VALID_MASK;
424 write_register(kgd, ATC_VMID0_PASID_MAPPING + vmid*sizeof(uint32_t),
427 while (!(read_register(kgd, ATC_VMID_PASID_MAPPING_UPDATE_STATUS) &
430 write_register(kgd, ATC_VMID_PASID_MAPPING_UPDATE_STATUS, 1U << vmid);
432 /* Mapping vmid to pasid also for IH block */
433 write_register(kgd, IH_VMID_0_LUT + vmid * sizeof(uint32_t),
439 static int kgd_init_pipeline(struct kgd_dev *kgd, uint32_t pipe_id,
440 uint32_t hpd_size, uint64_t hpd_gpu_addr)
442 /* nothing to do here */
446 static int kgd_init_interrupts(struct kgd_dev *kgd, uint32_t pipe_id)
451 mec = (pipe_id / CIK_PIPE_PER_MEC) + 1;
452 pipe = (pipe_id % CIK_PIPE_PER_MEC);
454 lock_srbm(kgd, mec, pipe, 0, 0);
456 write_register(kgd, CPC_INT_CNTL,
457 TIME_STAMP_INT_ENABLE | OPCODE_ERROR_INT_ENABLE);
464 static inline uint32_t get_sdma_base_addr(struct cik_sdma_rlc_registers *m)
468 retval = m->sdma_engine_id * SDMA1_REGISTER_OFFSET +
469 m->sdma_queue_id * KFD_CIK_SDMA_QUEUE_OFFSET;
471 pr_debug("kfd: sdma base address: 0x%x\n", retval);
476 static inline struct cik_mqd *get_mqd(void *mqd)
478 return (struct cik_mqd *)mqd;
481 static inline struct cik_sdma_rlc_registers *get_sdma_mqd(void *mqd)
483 return (struct cik_sdma_rlc_registers *)mqd;
486 static int kgd_hqd_load(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id,
487 uint32_t queue_id, uint32_t __user *wptr,
488 uint32_t wptr_shift, uint32_t wptr_mask,
489 struct mm_struct *mm)
491 uint32_t wptr_shadow, is_wptr_shadow_valid;
496 is_wptr_shadow_valid = !get_user(wptr_shadow, wptr);
498 acquire_queue(kgd, pipe_id, queue_id);
499 write_register(kgd, CP_MQD_BASE_ADDR, m->cp_mqd_base_addr_lo);
500 write_register(kgd, CP_MQD_BASE_ADDR_HI, m->cp_mqd_base_addr_hi);
501 write_register(kgd, CP_MQD_CONTROL, m->cp_mqd_control);
503 write_register(kgd, CP_HQD_PQ_BASE, m->cp_hqd_pq_base_lo);
504 write_register(kgd, CP_HQD_PQ_BASE_HI, m->cp_hqd_pq_base_hi);
505 write_register(kgd, CP_HQD_PQ_CONTROL, m->cp_hqd_pq_control);
507 write_register(kgd, CP_HQD_IB_CONTROL, m->cp_hqd_ib_control);
508 write_register(kgd, CP_HQD_IB_BASE_ADDR, m->cp_hqd_ib_base_addr_lo);
509 write_register(kgd, CP_HQD_IB_BASE_ADDR_HI, m->cp_hqd_ib_base_addr_hi);
511 write_register(kgd, CP_HQD_IB_RPTR, m->cp_hqd_ib_rptr);
513 write_register(kgd, CP_HQD_PERSISTENT_STATE,
514 m->cp_hqd_persistent_state);
515 write_register(kgd, CP_HQD_SEMA_CMD, m->cp_hqd_sema_cmd);
516 write_register(kgd, CP_HQD_MSG_TYPE, m->cp_hqd_msg_type);
518 write_register(kgd, CP_HQD_ATOMIC0_PREOP_LO,
519 m->cp_hqd_atomic0_preop_lo);
521 write_register(kgd, CP_HQD_ATOMIC0_PREOP_HI,
522 m->cp_hqd_atomic0_preop_hi);
524 write_register(kgd, CP_HQD_ATOMIC1_PREOP_LO,
525 m->cp_hqd_atomic1_preop_lo);
527 write_register(kgd, CP_HQD_ATOMIC1_PREOP_HI,
528 m->cp_hqd_atomic1_preop_hi);
530 write_register(kgd, CP_HQD_PQ_RPTR_REPORT_ADDR,
531 m->cp_hqd_pq_rptr_report_addr_lo);
533 write_register(kgd, CP_HQD_PQ_RPTR_REPORT_ADDR_HI,
534 m->cp_hqd_pq_rptr_report_addr_hi);
536 write_register(kgd, CP_HQD_PQ_RPTR, m->cp_hqd_pq_rptr);
538 write_register(kgd, CP_HQD_PQ_WPTR_POLL_ADDR,
539 m->cp_hqd_pq_wptr_poll_addr_lo);
541 write_register(kgd, CP_HQD_PQ_WPTR_POLL_ADDR_HI,
542 m->cp_hqd_pq_wptr_poll_addr_hi);
544 write_register(kgd, CP_HQD_PQ_DOORBELL_CONTROL,
545 m->cp_hqd_pq_doorbell_control);
547 write_register(kgd, CP_HQD_VMID, m->cp_hqd_vmid);
549 write_register(kgd, CP_HQD_QUANTUM, m->cp_hqd_quantum);
551 write_register(kgd, CP_HQD_PIPE_PRIORITY, m->cp_hqd_pipe_priority);
552 write_register(kgd, CP_HQD_QUEUE_PRIORITY, m->cp_hqd_queue_priority);
554 write_register(kgd, CP_HQD_IQ_RPTR, m->cp_hqd_iq_rptr);
556 if (is_wptr_shadow_valid)
557 write_register(kgd, CP_HQD_PQ_WPTR, wptr_shadow);
559 write_register(kgd, CP_HQD_ACTIVE, m->cp_hqd_active);
565 static int kgd_hqd_sdma_load(struct kgd_dev *kgd, void *mqd)
567 struct cik_sdma_rlc_registers *m;
568 uint32_t sdma_base_addr;
570 m = get_sdma_mqd(mqd);
571 sdma_base_addr = get_sdma_base_addr(m);
574 sdma_base_addr + SDMA0_RLC0_VIRTUAL_ADDR,
575 m->sdma_rlc_virtual_addr);
578 sdma_base_addr + SDMA0_RLC0_RB_BASE,
579 m->sdma_rlc_rb_base);
582 sdma_base_addr + SDMA0_RLC0_RB_BASE_HI,
583 m->sdma_rlc_rb_base_hi);
586 sdma_base_addr + SDMA0_RLC0_RB_RPTR_ADDR_LO,
587 m->sdma_rlc_rb_rptr_addr_lo);
590 sdma_base_addr + SDMA0_RLC0_RB_RPTR_ADDR_HI,
591 m->sdma_rlc_rb_rptr_addr_hi);
594 sdma_base_addr + SDMA0_RLC0_DOORBELL,
595 m->sdma_rlc_doorbell);
598 sdma_base_addr + SDMA0_RLC0_RB_CNTL,
599 m->sdma_rlc_rb_cntl);
604 static bool kgd_hqd_is_occupied(struct kgd_dev *kgd, uint64_t queue_address,
605 uint32_t pipe_id, uint32_t queue_id)
611 acquire_queue(kgd, pipe_id, queue_id);
612 act = read_register(kgd, CP_HQD_ACTIVE);
614 low = lower_32_bits(queue_address >> 8);
615 high = upper_32_bits(queue_address >> 8);
617 if (low == read_register(kgd, CP_HQD_PQ_BASE) &&
618 high == read_register(kgd, CP_HQD_PQ_BASE_HI))
625 static bool kgd_hqd_sdma_is_occupied(struct kgd_dev *kgd, void *mqd)
627 struct cik_sdma_rlc_registers *m;
628 uint32_t sdma_base_addr;
629 uint32_t sdma_rlc_rb_cntl;
631 m = get_sdma_mqd(mqd);
632 sdma_base_addr = get_sdma_base_addr(m);
634 sdma_rlc_rb_cntl = read_register(kgd,
635 sdma_base_addr + SDMA0_RLC0_RB_CNTL);
637 if (sdma_rlc_rb_cntl & SDMA_RB_ENABLE)
643 static int kgd_hqd_destroy(struct kgd_dev *kgd, void *mqd, uint32_t reset_type,
644 unsigned int timeout, uint32_t pipe_id,
649 acquire_queue(kgd, pipe_id, queue_id);
650 write_register(kgd, CP_HQD_PQ_DOORBELL_CONTROL, 0);
652 write_register(kgd, CP_HQD_DEQUEUE_REQUEST, reset_type);
655 temp = read_register(kgd, CP_HQD_ACTIVE);
659 pr_err("kfd: cp queue preemption time out (%dms)\n",
672 static int kgd_hqd_sdma_destroy(struct kgd_dev *kgd, void *mqd,
673 unsigned int timeout)
675 struct cik_sdma_rlc_registers *m;
676 uint32_t sdma_base_addr;
679 m = get_sdma_mqd(mqd);
680 sdma_base_addr = get_sdma_base_addr(m);
682 temp = read_register(kgd, sdma_base_addr + SDMA0_RLC0_RB_CNTL);
683 temp = temp & ~SDMA_RB_ENABLE;
684 write_register(kgd, sdma_base_addr + SDMA0_RLC0_RB_CNTL, temp);
687 temp = read_register(kgd, sdma_base_addr +
688 SDMA0_RLC0_CONTEXT_STATUS);
689 if (temp & SDMA_RLC_IDLE)
697 write_register(kgd, sdma_base_addr + SDMA0_RLC0_DOORBELL, 0);
698 write_register(kgd, sdma_base_addr + SDMA0_RLC0_RB_RPTR, 0);
699 write_register(kgd, sdma_base_addr + SDMA0_RLC0_RB_WPTR, 0);
700 write_register(kgd, sdma_base_addr + SDMA0_RLC0_RB_BASE, 0);
705 static int kgd_address_watch_disable(struct kgd_dev *kgd)
707 union TCP_WATCH_CNTL_BITS cntl;
712 cntl.bitfields.valid = 0;
713 cntl.bitfields.mask = ADDRESS_WATCH_REG_CNTL_DEFAULT_MASK;
714 cntl.bitfields.atc = 1;
716 /* Turning off this address until we set all the registers */
717 for (i = 0; i < MAX_WATCH_ADDRESSES; i++)
719 watchRegs[i * ADDRESS_WATCH_REG_MAX +
720 ADDRESS_WATCH_REG_CNTL],
726 static int kgd_address_watch_execute(struct kgd_dev *kgd,
727 unsigned int watch_point_id,
732 union TCP_WATCH_CNTL_BITS cntl;
734 cntl.u32All = cntl_val;
736 /* Turning off this watch point until we set all the registers */
737 cntl.bitfields.valid = 0;
739 watchRegs[watch_point_id * ADDRESS_WATCH_REG_MAX +
740 ADDRESS_WATCH_REG_CNTL],
744 watchRegs[watch_point_id * ADDRESS_WATCH_REG_MAX +
745 ADDRESS_WATCH_REG_ADDR_HI],
749 watchRegs[watch_point_id * ADDRESS_WATCH_REG_MAX +
750 ADDRESS_WATCH_REG_ADDR_LO],
753 /* Enable the watch point */
754 cntl.bitfields.valid = 1;
757 watchRegs[watch_point_id * ADDRESS_WATCH_REG_MAX +
758 ADDRESS_WATCH_REG_CNTL],
764 static int kgd_wave_control_execute(struct kgd_dev *kgd,
765 uint32_t gfx_index_val,
768 struct radeon_device *rdev = get_radeon_device(kgd);
771 mutex_lock(&rdev->grbm_idx_mutex);
773 write_register(kgd, GRBM_GFX_INDEX, gfx_index_val);
774 write_register(kgd, SQ_CMD, sq_cmd);
776 /* Restore the GRBM_GFX_INDEX register */
778 data = INSTANCE_BROADCAST_WRITES | SH_BROADCAST_WRITES |
781 write_register(kgd, GRBM_GFX_INDEX, data);
783 mutex_unlock(&rdev->grbm_idx_mutex);
788 static uint32_t kgd_address_watch_get_offset(struct kgd_dev *kgd,
789 unsigned int watch_point_id,
790 unsigned int reg_offset)
792 return watchRegs[watch_point_id * ADDRESS_WATCH_REG_MAX + reg_offset]
796 static bool get_atc_vmid_pasid_mapping_valid(struct kgd_dev *kgd, uint8_t vmid)
799 struct radeon_device *rdev = (struct radeon_device *) kgd;
801 reg = RREG32(ATC_VMID0_PASID_MAPPING + vmid*4);
802 return reg & ATC_VMID_PASID_MAPPING_VALID_MASK;
805 static uint16_t get_atc_vmid_pasid_mapping_pasid(struct kgd_dev *kgd,
809 struct radeon_device *rdev = (struct radeon_device *) kgd;
811 reg = RREG32(ATC_VMID0_PASID_MAPPING + vmid*4);
812 return reg & ATC_VMID_PASID_MAPPING_PASID_MASK;
815 static void write_vmid_invalidate_request(struct kgd_dev *kgd, uint8_t vmid)
817 struct radeon_device *rdev = (struct radeon_device *) kgd;
819 return WREG32(VM_INVALIDATE_REQUEST, 1 << vmid);
822 static uint16_t get_fw_version(struct kgd_dev *kgd, enum kgd_engine_type type)
824 struct radeon_device *rdev = (struct radeon_device *) kgd;
825 const union radeon_firmware_header *hdr;
827 BUG_ON(kgd == NULL || rdev->mec_fw == NULL);
831 hdr = (const union radeon_firmware_header *) rdev->pfp_fw->data;
835 hdr = (const union radeon_firmware_header *) rdev->me_fw->data;
839 hdr = (const union radeon_firmware_header *) rdev->ce_fw->data;
842 case KGD_ENGINE_MEC1:
843 hdr = (const union radeon_firmware_header *) rdev->mec_fw->data;
846 case KGD_ENGINE_MEC2:
847 hdr = (const union radeon_firmware_header *)
852 hdr = (const union radeon_firmware_header *) rdev->rlc_fw->data;
855 case KGD_ENGINE_SDMA1:
856 case KGD_ENGINE_SDMA2:
857 hdr = (const union radeon_firmware_header *)
868 /* Only 12 bit in use*/
869 return hdr->common.ucode_version;