2 * Copyright 2014 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
23 #include <linux/bsearch.h>
24 #include <linux/pci.h>
25 #include <linux/slab.h>
27 #include "kfd_device_queue_manager.h"
28 #include "kfd_pm4_headers_vi.h"
29 #include "cwsr_trap_handler.h"
30 #include "kfd_iommu.h"
32 #define MQD_SIZE_ALIGNED 768
35 * kfd_locked is used to lock the kfd driver during suspend or reset
36 * once locked, kfd driver will stop any further GPU execution.
37 * create process (open) will return -EAGAIN.
39 static atomic_t kfd_locked = ATOMIC_INIT(0);
41 #ifdef KFD_SUPPORT_IOMMU_V2
42 static const struct kfd_device_info kaveri_device_info = {
43 .asic_family = CHIP_KAVERI,
45 /* max num of queues for KV.TODO should be a dynamic value */
48 .ih_ring_entry_size = 4 * sizeof(uint32_t),
49 .event_interrupt_class = &event_interrupt_class_cik,
50 .num_of_watch_points = 4,
51 .mqd_size_aligned = MQD_SIZE_ALIGNED,
52 .supports_cwsr = false,
53 .needs_iommu_device = true,
54 .needs_pci_atomics = false,
55 .num_sdma_engines = 2,
58 static const struct kfd_device_info carrizo_device_info = {
59 .asic_family = CHIP_CARRIZO,
61 /* max num of queues for CZ.TODO should be a dynamic value */
64 .ih_ring_entry_size = 4 * sizeof(uint32_t),
65 .event_interrupt_class = &event_interrupt_class_cik,
66 .num_of_watch_points = 4,
67 .mqd_size_aligned = MQD_SIZE_ALIGNED,
68 .supports_cwsr = true,
69 .needs_iommu_device = true,
70 .needs_pci_atomics = false,
71 .num_sdma_engines = 2,
74 static const struct kfd_device_info raven_device_info = {
75 .asic_family = CHIP_RAVEN,
79 .ih_ring_entry_size = 8 * sizeof(uint32_t),
80 .event_interrupt_class = &event_interrupt_class_v9,
81 .num_of_watch_points = 4,
82 .mqd_size_aligned = MQD_SIZE_ALIGNED,
83 .supports_cwsr = true,
84 .needs_iommu_device = true,
85 .needs_pci_atomics = true,
86 .num_sdma_engines = 1,
90 static const struct kfd_device_info hawaii_device_info = {
91 .asic_family = CHIP_HAWAII,
93 /* max num of queues for KV.TODO should be a dynamic value */
96 .ih_ring_entry_size = 4 * sizeof(uint32_t),
97 .event_interrupt_class = &event_interrupt_class_cik,
98 .num_of_watch_points = 4,
99 .mqd_size_aligned = MQD_SIZE_ALIGNED,
100 .supports_cwsr = false,
101 .needs_iommu_device = false,
102 .needs_pci_atomics = false,
103 .num_sdma_engines = 2,
106 static const struct kfd_device_info tonga_device_info = {
107 .asic_family = CHIP_TONGA,
108 .max_pasid_bits = 16,
111 .ih_ring_entry_size = 4 * sizeof(uint32_t),
112 .event_interrupt_class = &event_interrupt_class_cik,
113 .num_of_watch_points = 4,
114 .mqd_size_aligned = MQD_SIZE_ALIGNED,
115 .supports_cwsr = false,
116 .needs_iommu_device = false,
117 .needs_pci_atomics = true,
118 .num_sdma_engines = 2,
121 static const struct kfd_device_info tonga_vf_device_info = {
122 .asic_family = CHIP_TONGA,
123 .max_pasid_bits = 16,
126 .ih_ring_entry_size = 4 * sizeof(uint32_t),
127 .event_interrupt_class = &event_interrupt_class_cik,
128 .num_of_watch_points = 4,
129 .mqd_size_aligned = MQD_SIZE_ALIGNED,
130 .supports_cwsr = false,
131 .needs_iommu_device = false,
132 .needs_pci_atomics = false,
133 .num_sdma_engines = 2,
136 static const struct kfd_device_info fiji_device_info = {
137 .asic_family = CHIP_FIJI,
138 .max_pasid_bits = 16,
141 .ih_ring_entry_size = 4 * sizeof(uint32_t),
142 .event_interrupt_class = &event_interrupt_class_cik,
143 .num_of_watch_points = 4,
144 .mqd_size_aligned = MQD_SIZE_ALIGNED,
145 .supports_cwsr = true,
146 .needs_iommu_device = false,
147 .needs_pci_atomics = true,
148 .num_sdma_engines = 2,
151 static const struct kfd_device_info fiji_vf_device_info = {
152 .asic_family = CHIP_FIJI,
153 .max_pasid_bits = 16,
156 .ih_ring_entry_size = 4 * sizeof(uint32_t),
157 .event_interrupt_class = &event_interrupt_class_cik,
158 .num_of_watch_points = 4,
159 .mqd_size_aligned = MQD_SIZE_ALIGNED,
160 .supports_cwsr = true,
161 .needs_iommu_device = false,
162 .needs_pci_atomics = false,
163 .num_sdma_engines = 2,
167 static const struct kfd_device_info polaris10_device_info = {
168 .asic_family = CHIP_POLARIS10,
169 .max_pasid_bits = 16,
172 .ih_ring_entry_size = 4 * sizeof(uint32_t),
173 .event_interrupt_class = &event_interrupt_class_cik,
174 .num_of_watch_points = 4,
175 .mqd_size_aligned = MQD_SIZE_ALIGNED,
176 .supports_cwsr = true,
177 .needs_iommu_device = false,
178 .needs_pci_atomics = true,
179 .num_sdma_engines = 2,
182 static const struct kfd_device_info polaris10_vf_device_info = {
183 .asic_family = CHIP_POLARIS10,
184 .max_pasid_bits = 16,
187 .ih_ring_entry_size = 4 * sizeof(uint32_t),
188 .event_interrupt_class = &event_interrupt_class_cik,
189 .num_of_watch_points = 4,
190 .mqd_size_aligned = MQD_SIZE_ALIGNED,
191 .supports_cwsr = true,
192 .needs_iommu_device = false,
193 .needs_pci_atomics = false,
194 .num_sdma_engines = 2,
197 static const struct kfd_device_info polaris11_device_info = {
198 .asic_family = CHIP_POLARIS11,
199 .max_pasid_bits = 16,
202 .ih_ring_entry_size = 4 * sizeof(uint32_t),
203 .event_interrupt_class = &event_interrupt_class_cik,
204 .num_of_watch_points = 4,
205 .mqd_size_aligned = MQD_SIZE_ALIGNED,
206 .supports_cwsr = true,
207 .needs_iommu_device = false,
208 .needs_pci_atomics = true,
209 .num_sdma_engines = 2,
212 static const struct kfd_device_info vega10_device_info = {
213 .asic_family = CHIP_VEGA10,
214 .max_pasid_bits = 16,
217 .ih_ring_entry_size = 8 * sizeof(uint32_t),
218 .event_interrupt_class = &event_interrupt_class_v9,
219 .num_of_watch_points = 4,
220 .mqd_size_aligned = MQD_SIZE_ALIGNED,
221 .supports_cwsr = true,
222 .needs_iommu_device = false,
223 .needs_pci_atomics = false,
224 .num_sdma_engines = 2,
227 static const struct kfd_device_info vega10_vf_device_info = {
228 .asic_family = CHIP_VEGA10,
229 .max_pasid_bits = 16,
232 .ih_ring_entry_size = 8 * sizeof(uint32_t),
233 .event_interrupt_class = &event_interrupt_class_v9,
234 .num_of_watch_points = 4,
235 .mqd_size_aligned = MQD_SIZE_ALIGNED,
236 .supports_cwsr = true,
237 .needs_iommu_device = false,
238 .needs_pci_atomics = false,
239 .num_sdma_engines = 2,
243 struct kfd_deviceid {
245 const struct kfd_device_info *device_info;
248 static const struct kfd_deviceid supported_devices[] = {
249 #ifdef KFD_SUPPORT_IOMMU_V2
250 { 0x1304, &kaveri_device_info }, /* Kaveri */
251 { 0x1305, &kaveri_device_info }, /* Kaveri */
252 { 0x1306, &kaveri_device_info }, /* Kaveri */
253 { 0x1307, &kaveri_device_info }, /* Kaveri */
254 { 0x1309, &kaveri_device_info }, /* Kaveri */
255 { 0x130A, &kaveri_device_info }, /* Kaveri */
256 { 0x130B, &kaveri_device_info }, /* Kaveri */
257 { 0x130C, &kaveri_device_info }, /* Kaveri */
258 { 0x130D, &kaveri_device_info }, /* Kaveri */
259 { 0x130E, &kaveri_device_info }, /* Kaveri */
260 { 0x130F, &kaveri_device_info }, /* Kaveri */
261 { 0x1310, &kaveri_device_info }, /* Kaveri */
262 { 0x1311, &kaveri_device_info }, /* Kaveri */
263 { 0x1312, &kaveri_device_info }, /* Kaveri */
264 { 0x1313, &kaveri_device_info }, /* Kaveri */
265 { 0x1315, &kaveri_device_info }, /* Kaveri */
266 { 0x1316, &kaveri_device_info }, /* Kaveri */
267 { 0x1317, &kaveri_device_info }, /* Kaveri */
268 { 0x1318, &kaveri_device_info }, /* Kaveri */
269 { 0x131B, &kaveri_device_info }, /* Kaveri */
270 { 0x131C, &kaveri_device_info }, /* Kaveri */
271 { 0x131D, &kaveri_device_info }, /* Kaveri */
272 { 0x9870, &carrizo_device_info }, /* Carrizo */
273 { 0x9874, &carrizo_device_info }, /* Carrizo */
274 { 0x9875, &carrizo_device_info }, /* Carrizo */
275 { 0x9876, &carrizo_device_info }, /* Carrizo */
276 { 0x9877, &carrizo_device_info }, /* Carrizo */
277 { 0x15DD, &raven_device_info }, /* Raven */
279 { 0x67A0, &hawaii_device_info }, /* Hawaii */
280 { 0x67A1, &hawaii_device_info }, /* Hawaii */
281 { 0x67A2, &hawaii_device_info }, /* Hawaii */
282 { 0x67A8, &hawaii_device_info }, /* Hawaii */
283 { 0x67A9, &hawaii_device_info }, /* Hawaii */
284 { 0x67AA, &hawaii_device_info }, /* Hawaii */
285 { 0x67B0, &hawaii_device_info }, /* Hawaii */
286 { 0x67B1, &hawaii_device_info }, /* Hawaii */
287 { 0x67B8, &hawaii_device_info }, /* Hawaii */
288 { 0x67B9, &hawaii_device_info }, /* Hawaii */
289 { 0x67BA, &hawaii_device_info }, /* Hawaii */
290 { 0x67BE, &hawaii_device_info }, /* Hawaii */
291 { 0x6920, &tonga_device_info }, /* Tonga */
292 { 0x6921, &tonga_device_info }, /* Tonga */
293 { 0x6928, &tonga_device_info }, /* Tonga */
294 { 0x6929, &tonga_device_info }, /* Tonga */
295 { 0x692B, &tonga_device_info }, /* Tonga */
296 { 0x692F, &tonga_vf_device_info }, /* Tonga vf */
297 { 0x6938, &tonga_device_info }, /* Tonga */
298 { 0x6939, &tonga_device_info }, /* Tonga */
299 { 0x7300, &fiji_device_info }, /* Fiji */
300 { 0x730F, &fiji_vf_device_info }, /* Fiji vf*/
301 { 0x67C0, &polaris10_device_info }, /* Polaris10 */
302 { 0x67C1, &polaris10_device_info }, /* Polaris10 */
303 { 0x67C2, &polaris10_device_info }, /* Polaris10 */
304 { 0x67C4, &polaris10_device_info }, /* Polaris10 */
305 { 0x67C7, &polaris10_device_info }, /* Polaris10 */
306 { 0x67C8, &polaris10_device_info }, /* Polaris10 */
307 { 0x67C9, &polaris10_device_info }, /* Polaris10 */
308 { 0x67CA, &polaris10_device_info }, /* Polaris10 */
309 { 0x67CC, &polaris10_device_info }, /* Polaris10 */
310 { 0x67CF, &polaris10_device_info }, /* Polaris10 */
311 { 0x67D0, &polaris10_vf_device_info }, /* Polaris10 vf*/
312 { 0x67DF, &polaris10_device_info }, /* Polaris10 */
313 { 0x6FDF, &polaris10_device_info }, /* Polaris10 */
314 { 0x67E0, &polaris11_device_info }, /* Polaris11 */
315 { 0x67E1, &polaris11_device_info }, /* Polaris11 */
316 { 0x67E3, &polaris11_device_info }, /* Polaris11 */
317 { 0x67E7, &polaris11_device_info }, /* Polaris11 */
318 { 0x67E8, &polaris11_device_info }, /* Polaris11 */
319 { 0x67E9, &polaris11_device_info }, /* Polaris11 */
320 { 0x67EB, &polaris11_device_info }, /* Polaris11 */
321 { 0x67EF, &polaris11_device_info }, /* Polaris11 */
322 { 0x67FF, &polaris11_device_info }, /* Polaris11 */
323 { 0x6860, &vega10_device_info }, /* Vega10 */
324 { 0x6861, &vega10_device_info }, /* Vega10 */
325 { 0x6862, &vega10_device_info }, /* Vega10 */
326 { 0x6863, &vega10_device_info }, /* Vega10 */
327 { 0x6864, &vega10_device_info }, /* Vega10 */
328 { 0x6867, &vega10_device_info }, /* Vega10 */
329 { 0x6868, &vega10_device_info }, /* Vega10 */
330 { 0x6869, &vega10_device_info }, /* Vega10 */
331 { 0x686A, &vega10_device_info }, /* Vega10 */
332 { 0x686B, &vega10_device_info }, /* Vega10 */
333 { 0x686C, &vega10_vf_device_info }, /* Vega10 vf*/
334 { 0x686D, &vega10_device_info }, /* Vega10 */
335 { 0x686E, &vega10_device_info }, /* Vega10 */
336 { 0x686F, &vega10_device_info }, /* Vega10 */
337 { 0x687F, &vega10_device_info }, /* Vega10 */
340 static int kfd_gtt_sa_init(struct kfd_dev *kfd, unsigned int buf_size,
341 unsigned int chunk_size);
342 static void kfd_gtt_sa_fini(struct kfd_dev *kfd);
344 static int kfd_resume(struct kfd_dev *kfd);
346 static const struct kfd_device_info *lookup_device_info(unsigned short did)
350 for (i = 0; i < ARRAY_SIZE(supported_devices); i++) {
351 if (supported_devices[i].did == did) {
352 WARN_ON(!supported_devices[i].device_info);
353 return supported_devices[i].device_info;
357 dev_warn(kfd_device, "DID %04x is missing in supported_devices\n",
363 struct kfd_dev *kgd2kfd_probe(struct kgd_dev *kgd,
364 struct pci_dev *pdev, const struct kfd2kgd_calls *f2g)
368 const struct kfd_device_info *device_info =
369 lookup_device_info(pdev->device);
372 dev_err(kfd_device, "kgd2kfd_probe failed\n");
376 /* Allow BIF to recode atomics to PCIe 3.0 AtomicOps.
377 * 32 and 64-bit requests are possible and must be
380 ret = pci_enable_atomic_ops_to_root(pdev,
381 PCI_EXP_DEVCAP2_ATOMIC_COMP32 |
382 PCI_EXP_DEVCAP2_ATOMIC_COMP64);
383 if (device_info->needs_pci_atomics && ret < 0) {
385 "skipped device %x:%x, PCI rejects atomics\n",
386 pdev->vendor, pdev->device);
390 kfd = kzalloc(sizeof(*kfd), GFP_KERNEL);
395 kfd->device_info = device_info;
397 kfd->init_complete = false;
400 mutex_init(&kfd->doorbell_mutex);
401 memset(&kfd->doorbell_available_index, 0,
402 sizeof(kfd->doorbell_available_index));
407 static void kfd_cwsr_init(struct kfd_dev *kfd)
409 if (cwsr_enable && kfd->device_info->supports_cwsr) {
410 if (kfd->device_info->asic_family < CHIP_VEGA10) {
411 BUILD_BUG_ON(sizeof(cwsr_trap_gfx8_hex) > PAGE_SIZE);
412 kfd->cwsr_isa = cwsr_trap_gfx8_hex;
413 kfd->cwsr_isa_size = sizeof(cwsr_trap_gfx8_hex);
415 BUILD_BUG_ON(sizeof(cwsr_trap_gfx9_hex) > PAGE_SIZE);
416 kfd->cwsr_isa = cwsr_trap_gfx9_hex;
417 kfd->cwsr_isa_size = sizeof(cwsr_trap_gfx9_hex);
420 kfd->cwsr_enabled = true;
424 bool kgd2kfd_device_init(struct kfd_dev *kfd,
425 const struct kgd2kfd_shared_resources *gpu_resources)
429 kfd->shared_resources = *gpu_resources;
431 kfd->vm_info.first_vmid_kfd = ffs(gpu_resources->compute_vmid_bitmap)-1;
432 kfd->vm_info.last_vmid_kfd = fls(gpu_resources->compute_vmid_bitmap)-1;
433 kfd->vm_info.vmid_num_kfd = kfd->vm_info.last_vmid_kfd
434 - kfd->vm_info.first_vmid_kfd + 1;
436 /* Verify module parameters regarding mapped process number*/
437 if ((hws_max_conc_proc < 0)
438 || (hws_max_conc_proc > kfd->vm_info.vmid_num_kfd)) {
440 "hws_max_conc_proc %d must be between 0 and %d, use %d instead\n",
441 hws_max_conc_proc, kfd->vm_info.vmid_num_kfd,
442 kfd->vm_info.vmid_num_kfd);
443 kfd->max_proc_per_quantum = kfd->vm_info.vmid_num_kfd;
445 kfd->max_proc_per_quantum = hws_max_conc_proc;
447 /* calculate max size of mqds needed for queues */
448 size = max_num_of_queues_per_device *
449 kfd->device_info->mqd_size_aligned;
452 * calculate max size of runlist packet.
453 * There can be only 2 packets at once
455 size += (KFD_MAX_NUM_OF_PROCESSES * sizeof(struct pm4_mes_map_process) +
456 max_num_of_queues_per_device * sizeof(struct pm4_mes_map_queues)
457 + sizeof(struct pm4_mes_runlist)) * 2;
459 /* Add size of HIQ & DIQ */
460 size += KFD_KERNEL_QUEUE_SIZE * 2;
462 /* add another 512KB for all other allocations on gart (HPD, fences) */
465 if (kfd->kfd2kgd->init_gtt_mem_allocation(
466 kfd->kgd, size, &kfd->gtt_mem,
467 &kfd->gtt_start_gpu_addr, &kfd->gtt_start_cpu_ptr,
469 dev_err(kfd_device, "Could not allocate %d bytes\n", size);
473 dev_info(kfd_device, "Allocated %d bytes on gart\n", size);
475 /* Initialize GTT sa with 512 byte chunk size */
476 if (kfd_gtt_sa_init(kfd, size, 512) != 0) {
477 dev_err(kfd_device, "Error initializing gtt sub-allocator\n");
478 goto kfd_gtt_sa_init_error;
481 if (kfd_doorbell_init(kfd)) {
483 "Error initializing doorbell aperture\n");
484 goto kfd_doorbell_error;
487 if (kfd_topology_add_device(kfd)) {
488 dev_err(kfd_device, "Error adding device to topology\n");
489 goto kfd_topology_add_device_error;
492 if (kfd_interrupt_init(kfd)) {
493 dev_err(kfd_device, "Error initializing interrupts\n");
494 goto kfd_interrupt_error;
497 kfd->dqm = device_queue_manager_init(kfd);
499 dev_err(kfd_device, "Error initializing queue manager\n");
500 goto device_queue_manager_error;
503 if (kfd_iommu_device_init(kfd)) {
504 dev_err(kfd_device, "Error initializing iommuv2\n");
505 goto device_iommu_error;
511 goto kfd_resume_error;
515 kfd->init_complete = true;
516 dev_info(kfd_device, "added device %x:%x\n", kfd->pdev->vendor,
519 pr_debug("Starting kfd with the following scheduling policy %d\n",
520 kfd->dqm->sched_policy);
526 device_queue_manager_uninit(kfd->dqm);
527 device_queue_manager_error:
528 kfd_interrupt_exit(kfd);
530 kfd_topology_remove_device(kfd);
531 kfd_topology_add_device_error:
532 kfd_doorbell_fini(kfd);
534 kfd_gtt_sa_fini(kfd);
535 kfd_gtt_sa_init_error:
536 kfd->kfd2kgd->free_gtt_mem(kfd->kgd, kfd->gtt_mem);
538 "device %x:%x NOT added due to errors\n",
539 kfd->pdev->vendor, kfd->pdev->device);
541 return kfd->init_complete;
544 void kgd2kfd_device_exit(struct kfd_dev *kfd)
546 if (kfd->init_complete) {
547 kgd2kfd_suspend(kfd);
548 device_queue_manager_uninit(kfd->dqm);
549 kfd_interrupt_exit(kfd);
550 kfd_topology_remove_device(kfd);
551 kfd_doorbell_fini(kfd);
552 kfd_gtt_sa_fini(kfd);
553 kfd->kfd2kgd->free_gtt_mem(kfd->kgd, kfd->gtt_mem);
559 int kgd2kfd_pre_reset(struct kfd_dev *kfd)
561 if (!kfd->init_complete)
563 kgd2kfd_suspend(kfd);
565 /* hold dqm->lock to prevent further execution*/
568 kfd_signal_reset_event(kfd);
573 * Fix me. KFD won't be able to resume existing process for now.
574 * We will keep all existing process in a evicted state and
575 * wait the process to be terminated.
578 int kgd2kfd_post_reset(struct kfd_dev *kfd)
582 if (!kfd->init_complete)
585 dqm_unlock(kfd->dqm);
587 ret = kfd_resume(kfd);
590 count = atomic_dec_return(&kfd_locked);
591 WARN_ONCE(count != 0, "KFD reset ref. error");
595 bool kfd_is_locked(void)
597 return (atomic_read(&kfd_locked) > 0);
600 void kgd2kfd_suspend(struct kfd_dev *kfd)
602 if (!kfd->init_complete)
605 /* For first KFD device suspend all the KFD processes */
606 if (atomic_inc_return(&kfd_locked) == 1)
607 kfd_suspend_all_processes();
609 kfd->dqm->ops.stop(kfd->dqm);
611 kfd_iommu_suspend(kfd);
614 int kgd2kfd_resume(struct kfd_dev *kfd)
618 if (!kfd->init_complete)
621 ret = kfd_resume(kfd);
625 count = atomic_dec_return(&kfd_locked);
626 WARN_ONCE(count < 0, "KFD suspend / resume ref. error");
628 ret = kfd_resume_all_processes();
633 static int kfd_resume(struct kfd_dev *kfd)
637 err = kfd_iommu_resume(kfd);
640 "Failed to resume IOMMU for device %x:%x\n",
641 kfd->pdev->vendor, kfd->pdev->device);
645 err = kfd->dqm->ops.start(kfd->dqm);
648 "Error starting queue manager for device %x:%x\n",
649 kfd->pdev->vendor, kfd->pdev->device);
650 goto dqm_start_error;
656 kfd_iommu_suspend(kfd);
660 /* This is called directly from KGD at ISR. */
661 void kgd2kfd_interrupt(struct kfd_dev *kfd, const void *ih_ring_entry)
663 uint32_t patched_ihre[KFD_MAX_RING_ENTRY_SIZE];
664 bool is_patched = false;
667 if (!kfd->init_complete)
670 if (kfd->device_info->ih_ring_entry_size > sizeof(patched_ihre)) {
671 dev_err_once(kfd_device, "Ring entry too small\n");
675 spin_lock_irqsave(&kfd->interrupt_lock, flags);
677 if (kfd->interrupts_active
678 && interrupt_is_wanted(kfd, ih_ring_entry,
679 patched_ihre, &is_patched)
680 && enqueue_ih_ring_entry(kfd,
681 is_patched ? patched_ihre : ih_ring_entry))
682 queue_work(kfd->ih_wq, &kfd->interrupt_work);
684 spin_unlock_irqrestore(&kfd->interrupt_lock, flags);
687 int kgd2kfd_quiesce_mm(struct mm_struct *mm)
689 struct kfd_process *p;
692 /* Because we are called from arbitrary context (workqueue) as opposed
693 * to process context, kfd_process could attempt to exit while we are
694 * running so the lookup function increments the process ref count.
696 p = kfd_lookup_process_by_mm(mm);
700 r = kfd_process_evict_queues(p);
702 kfd_unref_process(p);
706 int kgd2kfd_resume_mm(struct mm_struct *mm)
708 struct kfd_process *p;
711 /* Because we are called from arbitrary context (workqueue) as opposed
712 * to process context, kfd_process could attempt to exit while we are
713 * running so the lookup function increments the process ref count.
715 p = kfd_lookup_process_by_mm(mm);
719 r = kfd_process_restore_queues(p);
721 kfd_unref_process(p);
725 /** kgd2kfd_schedule_evict_and_restore_process - Schedules work queue that will
726 * prepare for safe eviction of KFD BOs that belong to the specified
729 * @mm: mm_struct that identifies the specified KFD process
730 * @fence: eviction fence attached to KFD process BOs
733 int kgd2kfd_schedule_evict_and_restore_process(struct mm_struct *mm,
734 struct dma_fence *fence)
736 struct kfd_process *p;
737 unsigned long active_time;
738 unsigned long delay_jiffies = msecs_to_jiffies(PROCESS_ACTIVE_TIME_MS);
743 if (dma_fence_is_signaled(fence))
746 p = kfd_lookup_process_by_mm(mm);
750 if (fence->seqno == p->last_eviction_seqno)
753 p->last_eviction_seqno = fence->seqno;
755 /* Avoid KFD process starvation. Wait for at least
756 * PROCESS_ACTIVE_TIME_MS before evicting the process again
758 active_time = get_jiffies_64() - p->last_restore_timestamp;
759 if (delay_jiffies > active_time)
760 delay_jiffies -= active_time;
764 /* During process initialization eviction_work.dwork is initialized
765 * to kfd_evict_bo_worker
767 schedule_delayed_work(&p->eviction_work, delay_jiffies);
769 kfd_unref_process(p);
773 static int kfd_gtt_sa_init(struct kfd_dev *kfd, unsigned int buf_size,
774 unsigned int chunk_size)
776 unsigned int num_of_longs;
778 if (WARN_ON(buf_size < chunk_size))
780 if (WARN_ON(buf_size == 0))
782 if (WARN_ON(chunk_size == 0))
785 kfd->gtt_sa_chunk_size = chunk_size;
786 kfd->gtt_sa_num_of_chunks = buf_size / chunk_size;
788 num_of_longs = (kfd->gtt_sa_num_of_chunks + BITS_PER_LONG - 1) /
791 kfd->gtt_sa_bitmap = kcalloc(num_of_longs, sizeof(long), GFP_KERNEL);
793 if (!kfd->gtt_sa_bitmap)
796 pr_debug("gtt_sa_num_of_chunks = %d, gtt_sa_bitmap = %p\n",
797 kfd->gtt_sa_num_of_chunks, kfd->gtt_sa_bitmap);
799 mutex_init(&kfd->gtt_sa_lock);
805 static void kfd_gtt_sa_fini(struct kfd_dev *kfd)
807 mutex_destroy(&kfd->gtt_sa_lock);
808 kfree(kfd->gtt_sa_bitmap);
811 static inline uint64_t kfd_gtt_sa_calc_gpu_addr(uint64_t start_addr,
812 unsigned int bit_num,
813 unsigned int chunk_size)
815 return start_addr + bit_num * chunk_size;
818 static inline uint32_t *kfd_gtt_sa_calc_cpu_addr(void *start_addr,
819 unsigned int bit_num,
820 unsigned int chunk_size)
822 return (uint32_t *) ((uint64_t) start_addr + bit_num * chunk_size);
825 int kfd_gtt_sa_allocate(struct kfd_dev *kfd, unsigned int size,
826 struct kfd_mem_obj **mem_obj)
828 unsigned int found, start_search, cur_size;
833 if (size > kfd->gtt_sa_num_of_chunks * kfd->gtt_sa_chunk_size)
836 *mem_obj = kzalloc(sizeof(struct kfd_mem_obj), GFP_KERNEL);
840 pr_debug("Allocated mem_obj = %p for size = %d\n", *mem_obj, size);
844 mutex_lock(&kfd->gtt_sa_lock);
846 kfd_gtt_restart_search:
847 /* Find the first chunk that is free */
848 found = find_next_zero_bit(kfd->gtt_sa_bitmap,
849 kfd->gtt_sa_num_of_chunks,
852 pr_debug("Found = %d\n", found);
854 /* If there wasn't any free chunk, bail out */
855 if (found == kfd->gtt_sa_num_of_chunks)
856 goto kfd_gtt_no_free_chunk;
858 /* Update fields of mem_obj */
859 (*mem_obj)->range_start = found;
860 (*mem_obj)->range_end = found;
861 (*mem_obj)->gpu_addr = kfd_gtt_sa_calc_gpu_addr(
862 kfd->gtt_start_gpu_addr,
864 kfd->gtt_sa_chunk_size);
865 (*mem_obj)->cpu_ptr = kfd_gtt_sa_calc_cpu_addr(
866 kfd->gtt_start_cpu_ptr,
868 kfd->gtt_sa_chunk_size);
870 pr_debug("gpu_addr = %p, cpu_addr = %p\n",
871 (uint64_t *) (*mem_obj)->gpu_addr, (*mem_obj)->cpu_ptr);
873 /* If we need only one chunk, mark it as allocated and get out */
874 if (size <= kfd->gtt_sa_chunk_size) {
875 pr_debug("Single bit\n");
876 set_bit(found, kfd->gtt_sa_bitmap);
880 /* Otherwise, try to see if we have enough contiguous chunks */
881 cur_size = size - kfd->gtt_sa_chunk_size;
883 (*mem_obj)->range_end =
884 find_next_zero_bit(kfd->gtt_sa_bitmap,
885 kfd->gtt_sa_num_of_chunks, ++found);
887 * If next free chunk is not contiguous than we need to
888 * restart our search from the last free chunk we found (which
889 * wasn't contiguous to the previous ones
891 if ((*mem_obj)->range_end != found) {
892 start_search = found;
893 goto kfd_gtt_restart_search;
897 * If we reached end of buffer, bail out with error
899 if (found == kfd->gtt_sa_num_of_chunks)
900 goto kfd_gtt_no_free_chunk;
902 /* Check if we don't need another chunk */
903 if (cur_size <= kfd->gtt_sa_chunk_size)
906 cur_size -= kfd->gtt_sa_chunk_size;
908 } while (cur_size > 0);
910 pr_debug("range_start = %d, range_end = %d\n",
911 (*mem_obj)->range_start, (*mem_obj)->range_end);
913 /* Mark the chunks as allocated */
914 for (found = (*mem_obj)->range_start;
915 found <= (*mem_obj)->range_end;
917 set_bit(found, kfd->gtt_sa_bitmap);
920 mutex_unlock(&kfd->gtt_sa_lock);
923 kfd_gtt_no_free_chunk:
924 pr_debug("Allocation failed with mem_obj = %p\n", *mem_obj);
925 mutex_unlock(&kfd->gtt_sa_lock);
930 int kfd_gtt_sa_free(struct kfd_dev *kfd, struct kfd_mem_obj *mem_obj)
934 /* Act like kfree when trying to free a NULL object */
938 pr_debug("Free mem_obj = %p, range_start = %d, range_end = %d\n",
939 mem_obj, mem_obj->range_start, mem_obj->range_end);
941 mutex_lock(&kfd->gtt_sa_lock);
943 /* Mark the chunks as free */
944 for (bit = mem_obj->range_start;
945 bit <= mem_obj->range_end;
947 clear_bit(bit, kfd->gtt_sa_bitmap);
949 mutex_unlock(&kfd->gtt_sa_lock);
955 #if defined(CONFIG_DEBUG_FS)
957 /* This function will send a package to HIQ to hang the HWS
958 * which will trigger a GPU reset and bring the HWS back to normal state
960 int kfd_debugfs_hang_hws(struct kfd_dev *dev)
964 if (dev->dqm->sched_policy != KFD_SCHED_POLICY_HWS) {
965 pr_err("HWS is not enabled");
969 r = pm_debugfs_hang_hws(&dev->dqm->packets);
971 r = dqm_debugfs_execute_queues(dev->dqm);