1 // SPDX-License-Identifier: GPL-2.0
4 * Copyright 2016-2022 HabanaLabs, Ltd.
8 #define pr_fmt(fmt) "habanalabs: " fmt
10 #include <uapi/drm/habanalabs_accel.h>
11 #include "habanalabs.h"
13 #include <linux/pci.h>
14 #include <linux/hwmon.h>
15 #include <linux/vmalloc.h>
17 #include <drm/drm_accel.h>
18 #include <drm/drm_drv.h>
20 #include <trace/events/habanalabs.h>
22 #define HL_RESET_DELAY_USEC 10000 /* 10ms */
24 #define HL_DEVICE_RELEASE_WATCHDOG_TIMEOUT_SEC 30
31 #define MEM_SCRUB_DEFAULT_VAL 0x1122334455667788
34 * hl_set_dram_bar- sets the bar to allow later access to address
36 * @hdev: pointer to habanalabs device structure.
37 * @addr: the address the caller wants to access.
38 * @region: the PCI region.
39 * @new_bar_region_base: the new BAR region base address.
41 * @return: the old BAR base address on success, U64_MAX for failure.
42 * The caller should set it back to the old address after use.
44 * In case the bar space does not cover the whole address space,
45 * the bar base address should be set to allow access to a given address.
46 * This function can be called also if the bar doesn't need to be set,
47 * in that case it just won't change the base.
49 static u64 hl_set_dram_bar(struct hl_device *hdev, u64 addr, struct pci_mem_region *region,
50 u64 *new_bar_region_base)
52 struct asic_fixed_properties *prop = &hdev->asic_prop;
53 u64 bar_base_addr, old_base;
55 if (is_power_of_2(prop->dram_pci_bar_size))
56 bar_base_addr = addr & ~(prop->dram_pci_bar_size - 0x1ull);
58 bar_base_addr = DIV_ROUND_DOWN_ULL(addr, prop->dram_pci_bar_size) *
59 prop->dram_pci_bar_size;
61 old_base = hdev->asic_funcs->set_dram_bar_base(hdev, bar_base_addr);
63 /* in case of success we need to update the new BAR base */
64 if ((old_base != U64_MAX) && new_bar_region_base)
65 *new_bar_region_base = bar_base_addr;
70 int hl_access_sram_dram_region(struct hl_device *hdev, u64 addr, u64 *val,
71 enum debugfs_access_type acc_type, enum pci_region region_type, bool set_dram_bar)
73 struct pci_mem_region *region = &hdev->pci_mem_region[region_type];
74 u64 old_base = 0, rc, bar_region_base = region->region_base;
75 void __iomem *acc_addr;
78 old_base = hl_set_dram_bar(hdev, addr, region, &bar_region_base);
79 if (old_base == U64_MAX)
83 acc_addr = hdev->pcie_bar[region->bar_id] + region->offset_in_bar +
84 (addr - bar_region_base);
88 *val = readb(acc_addr);
91 writeb(*val, acc_addr);
94 *val = readl(acc_addr);
97 writel(*val, acc_addr);
100 *val = readq(acc_addr);
102 case DEBUGFS_WRITE64:
103 writeq(*val, acc_addr);
108 rc = hl_set_dram_bar(hdev, old_base, region, NULL);
116 static void *hl_dma_alloc_common(struct hl_device *hdev, size_t size, dma_addr_t *dma_handle,
117 gfp_t flag, enum dma_alloc_type alloc_type,
122 switch (alloc_type) {
123 case DMA_ALLOC_COHERENT:
124 ptr = hdev->asic_funcs->asic_dma_alloc_coherent(hdev, size, dma_handle, flag);
127 ptr = hdev->asic_funcs->asic_dma_pool_zalloc(hdev, size, flag, dma_handle);
131 if (trace_habanalabs_dma_alloc_enabled() && !ZERO_OR_NULL_PTR(ptr))
132 trace_habanalabs_dma_alloc(hdev->dev, (u64) (uintptr_t) ptr, *dma_handle, size,
138 static void hl_asic_dma_free_common(struct hl_device *hdev, size_t size, void *cpu_addr,
139 dma_addr_t dma_handle, enum dma_alloc_type alloc_type,
142 /* this is needed to avoid warning on using freed pointer */
143 u64 store_cpu_addr = (u64) (uintptr_t) cpu_addr;
145 switch (alloc_type) {
146 case DMA_ALLOC_COHERENT:
147 hdev->asic_funcs->asic_dma_free_coherent(hdev, size, cpu_addr, dma_handle);
150 hdev->asic_funcs->asic_dma_pool_free(hdev, cpu_addr, dma_handle);
154 trace_habanalabs_dma_free(hdev->dev, store_cpu_addr, dma_handle, size, caller);
157 void *hl_asic_dma_alloc_coherent_caller(struct hl_device *hdev, size_t size, dma_addr_t *dma_handle,
158 gfp_t flag, const char *caller)
160 return hl_dma_alloc_common(hdev, size, dma_handle, flag, DMA_ALLOC_COHERENT, caller);
163 void hl_asic_dma_free_coherent_caller(struct hl_device *hdev, size_t size, void *cpu_addr,
164 dma_addr_t dma_handle, const char *caller)
166 hl_asic_dma_free_common(hdev, size, cpu_addr, dma_handle, DMA_ALLOC_COHERENT, caller);
169 void *hl_asic_dma_pool_zalloc_caller(struct hl_device *hdev, size_t size, gfp_t mem_flags,
170 dma_addr_t *dma_handle, const char *caller)
172 return hl_dma_alloc_common(hdev, size, dma_handle, mem_flags, DMA_ALLOC_POOL, caller);
175 void hl_asic_dma_pool_free_caller(struct hl_device *hdev, void *vaddr, dma_addr_t dma_addr,
178 hl_asic_dma_free_common(hdev, 0, vaddr, dma_addr, DMA_ALLOC_POOL, caller);
181 void *hl_cpu_accessible_dma_pool_alloc(struct hl_device *hdev, size_t size, dma_addr_t *dma_handle)
183 return hdev->asic_funcs->cpu_accessible_dma_pool_alloc(hdev, size, dma_handle);
186 void hl_cpu_accessible_dma_pool_free(struct hl_device *hdev, size_t size, void *vaddr)
188 hdev->asic_funcs->cpu_accessible_dma_pool_free(hdev, size, vaddr);
191 int hl_dma_map_sgtable_caller(struct hl_device *hdev, struct sg_table *sgt,
192 enum dma_data_direction dir, const char *caller)
194 struct asic_fixed_properties *prop = &hdev->asic_prop;
195 struct scatterlist *sg;
198 rc = hdev->asic_funcs->dma_map_sgtable(hdev, sgt, dir);
202 if (!trace_habanalabs_dma_map_page_enabled())
205 for_each_sgtable_dma_sg(sgt, sg, i)
206 trace_habanalabs_dma_map_page(hdev->dev,
207 page_to_phys(sg_page(sg)),
208 sg->dma_address - prop->device_dma_offset_for_host_access,
209 #ifdef CONFIG_NEED_SG_DMA_LENGTH
219 int hl_asic_dma_map_sgtable(struct hl_device *hdev, struct sg_table *sgt,
220 enum dma_data_direction dir)
222 struct asic_fixed_properties *prop = &hdev->asic_prop;
223 struct scatterlist *sg;
226 rc = dma_map_sgtable(&hdev->pdev->dev, sgt, dir, 0);
230 /* Shift to the device's base physical address of host memory if necessary */
231 if (prop->device_dma_offset_for_host_access)
232 for_each_sgtable_dma_sg(sgt, sg, i)
233 sg->dma_address += prop->device_dma_offset_for_host_access;
238 void hl_dma_unmap_sgtable_caller(struct hl_device *hdev, struct sg_table *sgt,
239 enum dma_data_direction dir, const char *caller)
241 struct asic_fixed_properties *prop = &hdev->asic_prop;
242 struct scatterlist *sg;
245 hdev->asic_funcs->dma_unmap_sgtable(hdev, sgt, dir);
247 if (trace_habanalabs_dma_unmap_page_enabled()) {
248 for_each_sgtable_dma_sg(sgt, sg, i)
249 trace_habanalabs_dma_unmap_page(hdev->dev, page_to_phys(sg_page(sg)),
250 sg->dma_address - prop->device_dma_offset_for_host_access,
251 #ifdef CONFIG_NEED_SG_DMA_LENGTH
260 void hl_asic_dma_unmap_sgtable(struct hl_device *hdev, struct sg_table *sgt,
261 enum dma_data_direction dir)
263 struct asic_fixed_properties *prop = &hdev->asic_prop;
264 struct scatterlist *sg;
267 /* Cancel the device's base physical address of host memory if necessary */
268 if (prop->device_dma_offset_for_host_access)
269 for_each_sgtable_dma_sg(sgt, sg, i)
270 sg->dma_address -= prop->device_dma_offset_for_host_access;
272 dma_unmap_sgtable(&hdev->pdev->dev, sgt, dir, 0);
276 * hl_access_cfg_region - access the config region
278 * @hdev: pointer to habanalabs device structure
279 * @addr: the address to access
280 * @val: the value to write from or read to
281 * @acc_type: the type of access (read/write 64/32)
283 int hl_access_cfg_region(struct hl_device *hdev, u64 addr, u64 *val,
284 enum debugfs_access_type acc_type)
286 struct pci_mem_region *cfg_region = &hdev->pci_mem_region[PCI_REGION_CFG];
289 if (!IS_ALIGNED(addr, sizeof(u32))) {
290 dev_err(hdev->dev, "address %#llx not a multiple of %zu\n", addr, sizeof(u32));
296 *val = RREG32(addr - cfg_region->region_base);
298 case DEBUGFS_WRITE32:
299 WREG32(addr - cfg_region->region_base, *val);
302 val_l = RREG32(addr - cfg_region->region_base);
303 val_h = RREG32(addr + sizeof(u32) - cfg_region->region_base);
305 *val = (((u64) val_h) << 32) | val_l;
307 case DEBUGFS_WRITE64:
308 WREG32(addr - cfg_region->region_base, lower_32_bits(*val));
309 WREG32(addr + sizeof(u32) - cfg_region->region_base, upper_32_bits(*val));
312 dev_err(hdev->dev, "access type %d is not supported\n", acc_type);
320 * hl_access_dev_mem - access device memory
322 * @hdev: pointer to habanalabs device structure
323 * @region_type: the type of the region the address belongs to
324 * @addr: the address to access
325 * @val: the value to write from or read to
326 * @acc_type: the type of access (r/w, 32/64)
328 int hl_access_dev_mem(struct hl_device *hdev, enum pci_region region_type,
329 u64 addr, u64 *val, enum debugfs_access_type acc_type)
331 switch (region_type) {
333 return hl_access_cfg_region(hdev, addr, val, acc_type);
334 case PCI_REGION_SRAM:
335 case PCI_REGION_DRAM:
336 return hl_access_sram_dram_region(hdev, addr, val, acc_type,
337 region_type, (region_type == PCI_REGION_DRAM));
345 void hl_engine_data_sprintf(struct engines_data *e, const char *fmt, ...)
351 /* Calculate formatted string length. Assuming each string is null terminated, hence
352 * increment result by 1
354 str_size = vsnprintf(NULL, 0, fmt, args) + 1;
357 if ((e->actual_size + str_size) < e->allocated_buf_size) {
359 vsnprintf(e->buf + e->actual_size, str_size, fmt, args);
363 /* Need to update the size even when not updating destination buffer to get the exact size
364 * of all input strings
366 e->actual_size += str_size;
369 enum hl_device_status hl_device_status(struct hl_device *hdev)
371 enum hl_device_status status;
373 if (hdev->device_fini_pending) {
374 status = HL_DEVICE_STATUS_MALFUNCTION;
375 } else if (hdev->reset_info.in_reset) {
376 if (hdev->reset_info.in_compute_reset)
377 status = HL_DEVICE_STATUS_IN_RESET_AFTER_DEVICE_RELEASE;
379 status = HL_DEVICE_STATUS_IN_RESET;
380 } else if (hdev->reset_info.needs_reset) {
381 status = HL_DEVICE_STATUS_NEEDS_RESET;
382 } else if (hdev->disabled) {
383 status = HL_DEVICE_STATUS_MALFUNCTION;
384 } else if (!hdev->init_done) {
385 status = HL_DEVICE_STATUS_IN_DEVICE_CREATION;
387 status = HL_DEVICE_STATUS_OPERATIONAL;
393 bool hl_device_operational(struct hl_device *hdev,
394 enum hl_device_status *status)
396 enum hl_device_status current_status;
398 current_status = hl_device_status(hdev);
400 *status = current_status;
402 switch (current_status) {
403 case HL_DEVICE_STATUS_MALFUNCTION:
404 case HL_DEVICE_STATUS_IN_RESET:
405 case HL_DEVICE_STATUS_IN_RESET_AFTER_DEVICE_RELEASE:
406 case HL_DEVICE_STATUS_NEEDS_RESET:
408 case HL_DEVICE_STATUS_OPERATIONAL:
409 case HL_DEVICE_STATUS_IN_DEVICE_CREATION:
415 bool hl_ctrl_device_operational(struct hl_device *hdev,
416 enum hl_device_status *status)
418 enum hl_device_status current_status;
420 current_status = hl_device_status(hdev);
422 *status = current_status;
424 switch (current_status) {
425 case HL_DEVICE_STATUS_MALFUNCTION:
427 case HL_DEVICE_STATUS_IN_RESET:
428 case HL_DEVICE_STATUS_IN_RESET_AFTER_DEVICE_RELEASE:
429 case HL_DEVICE_STATUS_NEEDS_RESET:
430 case HL_DEVICE_STATUS_OPERATIONAL:
431 case HL_DEVICE_STATUS_IN_DEVICE_CREATION:
437 static void print_idle_status_mask(struct hl_device *hdev, const char *message,
438 u64 idle_mask[HL_BUSY_ENGINES_MASK_EXT_SIZE])
441 dev_err(hdev->dev, "%s (mask %#llx_%016llx_%016llx_%016llx)\n",
442 message, idle_mask[3], idle_mask[2], idle_mask[1], idle_mask[0]);
443 else if (idle_mask[2])
444 dev_err(hdev->dev, "%s (mask %#llx_%016llx_%016llx)\n",
445 message, idle_mask[2], idle_mask[1], idle_mask[0]);
446 else if (idle_mask[1])
447 dev_err(hdev->dev, "%s (mask %#llx_%016llx)\n",
448 message, idle_mask[1], idle_mask[0]);
450 dev_err(hdev->dev, "%s (mask %#llx)\n", message, idle_mask[0]);
453 static void hpriv_release(struct kref *ref)
455 u64 idle_mask[HL_BUSY_ENGINES_MASK_EXT_SIZE] = {0};
456 bool reset_device, device_is_idle = true;
457 struct hl_fpriv *hpriv;
458 struct hl_device *hdev;
460 hpriv = container_of(ref, struct hl_fpriv, refcount);
464 hdev->asic_funcs->send_device_activity(hdev, false);
466 hl_debugfs_remove_file(hpriv);
468 mutex_destroy(&hpriv->ctx_lock);
469 mutex_destroy(&hpriv->restore_phase_mutex);
471 /* There should be no memory buffers at this point and handles IDR can be destroyed */
472 hl_mem_mgr_idr_destroy(&hpriv->mem_mgr);
474 /* Device should be reset if reset-upon-device-release is enabled, or if there is a pending
475 * reset that waits for device release.
477 reset_device = hdev->reset_upon_device_release || hdev->reset_info.watchdog_active;
479 /* Check the device idle status and reset if not idle.
480 * Skip it if already in reset, or if device is going to be reset in any case.
482 if (!hdev->reset_info.in_reset && !reset_device && !hdev->pldm)
483 device_is_idle = hdev->asic_funcs->is_device_idle(hdev, idle_mask,
484 HL_BUSY_ENGINES_MASK_EXT_SIZE, NULL);
485 if (!device_is_idle) {
486 print_idle_status_mask(hdev, "device is not idle after user context is closed",
491 /* We need to remove the user from the list to make sure the reset process won't
492 * try to kill the user process. Because, if we got here, it means there are no
493 * more driver/device resources that the user process is occupying so there is
496 * However, we can't set the compute_ctx to NULL at this stage. This is to prevent
497 * a race between the release and opening the device again. We don't want to let
498 * a user open the device while there a reset is about to happen.
500 mutex_lock(&hdev->fpriv_list_lock);
501 list_del(&hpriv->dev_node);
502 mutex_unlock(&hdev->fpriv_list_lock);
504 put_pid(hpriv->taskpid);
507 hl_device_reset(hdev, HL_DRV_RESET_DEV_RELEASE);
509 /* Scrubbing is handled within hl_device_reset(), so here need to do it directly */
510 int rc = hdev->asic_funcs->scrub_device_mem(hdev);
513 dev_err(hdev->dev, "failed to scrub memory from hpriv release (%d)\n", rc);
514 hl_device_reset(hdev, HL_DRV_RESET_HARD);
518 /* Now we can mark the compute_ctx as not active. Even if a reset is running in a different
519 * thread, we don't care because the in_reset is marked so if a user will try to open
520 * the device it will fail on that, even if compute_ctx is false.
522 mutex_lock(&hdev->fpriv_list_lock);
523 hdev->is_compute_ctx_active = false;
524 mutex_unlock(&hdev->fpriv_list_lock);
526 hdev->compute_ctx_in_release = 0;
528 /* release the eventfd */
529 if (hpriv->notifier_event.eventfd)
530 eventfd_ctx_put(hpriv->notifier_event.eventfd);
532 mutex_destroy(&hpriv->notifier_event.lock);
537 void hl_hpriv_get(struct hl_fpriv *hpriv)
539 kref_get(&hpriv->refcount);
542 int hl_hpriv_put(struct hl_fpriv *hpriv)
544 return kref_put(&hpriv->refcount, hpriv_release);
547 static void print_device_in_use_info(struct hl_device *hdev, const char *message)
549 u32 active_cs_num, dmabuf_export_cnt;
550 bool unknown_reason = true;
558 active_cs_num = hl_get_active_cs_num(hdev);
560 unknown_reason = false;
561 offset += scnprintf(buf + offset, size - offset, " [%u active CS]", active_cs_num);
564 dmabuf_export_cnt = atomic_read(&hdev->dmabuf_export_cnt);
565 if (dmabuf_export_cnt) {
566 unknown_reason = false;
567 offset += scnprintf(buf + offset, size - offset, " [%u exported dma-buf]",
572 scnprintf(buf + offset, size - offset, " [unknown reason]");
574 dev_notice(hdev->dev, "%s%s\n", message, buf);
578 * hl_device_release() - release function for habanalabs device.
579 * @ddev: pointer to DRM device structure.
580 * @file: pointer to DRM file private data structure.
582 * Called when process closes an habanalabs device
584 void hl_device_release(struct drm_device *ddev, struct drm_file *file_priv)
586 struct hl_fpriv *hpriv = file_priv->driver_priv;
587 struct hl_device *hdev = to_hl_device(ddev);
590 pr_crit("Closing FD after device was removed. Memory leak will occur and it is advised to reboot.\n");
591 put_pid(hpriv->taskpid);
594 hl_ctx_mgr_fini(hdev, &hpriv->ctx_mgr);
596 /* Memory buffers might be still in use at this point and thus the handles IDR destruction
597 * is postponed to hpriv_release().
599 hl_mem_mgr_fini(&hpriv->mem_mgr);
601 hdev->compute_ctx_in_release = 1;
603 if (!hl_hpriv_put(hpriv)) {
604 print_device_in_use_info(hdev, "User process closed FD but device still in use");
605 hl_device_reset(hdev, HL_DRV_RESET_HARD);
608 hdev->last_open_session_duration_jif = jiffies - hdev->last_successful_open_jif;
611 static int hl_device_release_ctrl(struct inode *inode, struct file *filp)
613 struct hl_fpriv *hpriv = filp->private_data;
614 struct hl_device *hdev = hpriv->hdev;
616 filp->private_data = NULL;
619 pr_err("Closing FD after device was removed\n");
623 mutex_lock(&hdev->fpriv_ctrl_list_lock);
624 list_del(&hpriv->dev_node);
625 mutex_unlock(&hdev->fpriv_ctrl_list_lock);
627 put_pid(hpriv->taskpid);
634 static int __hl_mmap(struct hl_fpriv *hpriv, struct vm_area_struct *vma)
636 struct hl_device *hdev = hpriv->hdev;
637 unsigned long vm_pgoff;
640 pr_err_ratelimited("Trying to mmap after device was removed! Please close FD\n");
644 vm_pgoff = vma->vm_pgoff;
646 switch (vm_pgoff & HL_MMAP_TYPE_MASK) {
647 case HL_MMAP_TYPE_BLOCK:
648 vma->vm_pgoff = HL_MMAP_OFFSET_VALUE_GET(vm_pgoff);
649 return hl_hw_block_mmap(hpriv, vma);
651 case HL_MMAP_TYPE_CB:
652 case HL_MMAP_TYPE_TS_BUFF:
653 return hl_mem_mgr_mmap(&hpriv->mem_mgr, vma, NULL);
659 * hl_mmap - mmap function for habanalabs device
661 * @*filp: pointer to file structure
662 * @*vma: pointer to vm_area_struct of the process
664 * Called when process does an mmap on habanalabs device. Call the relevant mmap
665 * function at the end of the common code.
667 int hl_mmap(struct file *filp, struct vm_area_struct *vma)
669 struct drm_file *file_priv = filp->private_data;
670 struct hl_fpriv *hpriv = file_priv->driver_priv;
672 return __hl_mmap(hpriv, vma);
675 static const struct file_operations hl_ctrl_ops = {
676 .owner = THIS_MODULE,
677 .open = hl_device_open_ctrl,
678 .release = hl_device_release_ctrl,
679 .unlocked_ioctl = hl_ioctl_control,
680 .compat_ioctl = hl_ioctl_control
683 static void device_release_func(struct device *dev)
689 * device_init_cdev - Initialize cdev and device for habanalabs device
691 * @hdev: pointer to habanalabs device structure
692 * @class: pointer to the class object of the device
693 * @minor: minor number of the specific device
694 * @fops: file operations to install for this device
695 * @name: name of the device as it will appear in the filesystem
696 * @cdev: pointer to the char device object that will be initialized
697 * @dev: pointer to the device object that will be initialized
699 * Initialize a cdev and a Linux device for habanalabs's device.
701 static int device_init_cdev(struct hl_device *hdev, const struct class *class,
702 int minor, const struct file_operations *fops,
703 char *name, struct cdev *cdev,
706 cdev_init(cdev, fops);
707 cdev->owner = THIS_MODULE;
709 *dev = kzalloc(sizeof(**dev), GFP_KERNEL);
713 device_initialize(*dev);
714 (*dev)->devt = MKDEV(hdev->major, minor);
715 (*dev)->class = class;
716 (*dev)->release = device_release_func;
717 dev_set_drvdata(*dev, hdev);
718 dev_set_name(*dev, "%s", name);
723 static int cdev_sysfs_debugfs_add(struct hl_device *hdev)
725 const struct class *accel_class = hdev->drm.accel->kdev->class;
729 hdev->cdev_idx = hdev->drm.accel->index;
731 /* Initialize cdev and device structures for the control device */
732 snprintf(name, sizeof(name), "accel_controlD%d", hdev->cdev_idx);
733 rc = device_init_cdev(hdev, accel_class, hdev->cdev_idx, &hl_ctrl_ops, name,
734 &hdev->cdev_ctrl, &hdev->dev_ctrl);
738 rc = cdev_device_add(&hdev->cdev_ctrl, hdev->dev_ctrl);
740 dev_err(hdev->dev_ctrl,
741 "failed to add an accel control char device to the system\n");
742 goto free_ctrl_device;
745 rc = hl_sysfs_init(hdev);
747 dev_err(hdev->dev, "failed to initialize sysfs\n");
748 goto delete_ctrl_cdev_device;
751 hl_debugfs_add_device(hdev);
753 hdev->cdev_sysfs_debugfs_created = true;
757 delete_ctrl_cdev_device:
758 cdev_device_del(&hdev->cdev_ctrl, hdev->dev_ctrl);
760 put_device(hdev->dev_ctrl);
764 static void cdev_sysfs_debugfs_remove(struct hl_device *hdev)
766 if (!hdev->cdev_sysfs_debugfs_created)
771 cdev_device_del(&hdev->cdev_ctrl, hdev->dev_ctrl);
772 put_device(hdev->dev_ctrl);
775 static void device_hard_reset_pending(struct work_struct *work)
777 struct hl_device_reset_work *device_reset_work =
778 container_of(work, struct hl_device_reset_work, reset_work.work);
779 struct hl_device *hdev = device_reset_work->hdev;
783 flags = device_reset_work->flags | HL_DRV_RESET_FROM_RESET_THR;
785 rc = hl_device_reset(hdev, flags);
787 if ((rc == -EBUSY) && !hdev->device_fini_pending) {
788 struct hl_ctx *ctx = hl_get_compute_ctx(hdev);
791 /* The read refcount value should subtracted by one, because the read is
792 * protected with hl_get_compute_ctx().
795 "Could not reset device (compute_ctx refcount %u). will try again in %u seconds",
796 kref_read(&ctx->refcount) - 1, HL_PENDING_RESET_PER_SEC);
799 dev_info(hdev->dev, "Could not reset device. will try again in %u seconds",
800 HL_PENDING_RESET_PER_SEC);
803 queue_delayed_work(hdev->reset_wq, &device_reset_work->reset_work,
804 msecs_to_jiffies(HL_PENDING_RESET_PER_SEC * 1000));
808 static void device_release_watchdog_func(struct work_struct *work)
810 struct hl_device_reset_work *watchdog_work =
811 container_of(work, struct hl_device_reset_work, reset_work.work);
812 struct hl_device *hdev = watchdog_work->hdev;
815 dev_dbg(hdev->dev, "Device wasn't released in time. Initiate hard-reset.\n");
817 flags = watchdog_work->flags | HL_DRV_RESET_HARD | HL_DRV_RESET_FROM_WD_THR;
819 hl_device_reset(hdev, flags);
823 * device_early_init - do some early initialization for the habanalabs device
825 * @hdev: pointer to habanalabs device structure
827 * Install the relevant function pointers and call the early_init function,
828 * if such a function exists
830 static int device_early_init(struct hl_device *hdev)
835 switch (hdev->asic_type) {
837 goya_set_asic_funcs(hdev);
838 strscpy(hdev->asic_name, "GOYA", sizeof(hdev->asic_name));
841 gaudi_set_asic_funcs(hdev);
842 strscpy(hdev->asic_name, "GAUDI", sizeof(hdev->asic_name));
845 gaudi_set_asic_funcs(hdev);
846 strscpy(hdev->asic_name, "GAUDI SEC", sizeof(hdev->asic_name));
849 gaudi2_set_asic_funcs(hdev);
850 strscpy(hdev->asic_name, "GAUDI2", sizeof(hdev->asic_name));
853 gaudi2_set_asic_funcs(hdev);
854 strscpy(hdev->asic_name, "GAUDI2B", sizeof(hdev->asic_name));
857 gaudi2_set_asic_funcs(hdev);
858 strscpy(hdev->asic_name, "GAUDI2C", sizeof(hdev->asic_name));
861 dev_err(hdev->dev, "Unrecognized ASIC type %d\n",
866 rc = hdev->asic_funcs->early_init(hdev);
870 rc = hl_asid_init(hdev);
874 if (hdev->asic_prop.completion_queues_count) {
875 hdev->cq_wq = kcalloc(hdev->asic_prop.completion_queues_count,
876 sizeof(struct workqueue_struct *),
884 for (i = 0 ; i < hdev->asic_prop.completion_queues_count ; i++) {
885 snprintf(workq_name, 32, "hl%u-free-jobs-%u", hdev->cdev_idx, (u32) i);
886 hdev->cq_wq[i] = create_singlethread_workqueue(workq_name);
887 if (hdev->cq_wq[i] == NULL) {
888 dev_err(hdev->dev, "Failed to allocate CQ workqueue\n");
894 snprintf(workq_name, 32, "hl%u-events", hdev->cdev_idx);
895 hdev->eq_wq = create_singlethread_workqueue(workq_name);
896 if (hdev->eq_wq == NULL) {
897 dev_err(hdev->dev, "Failed to allocate EQ workqueue\n");
902 snprintf(workq_name, 32, "hl%u-cs-completions", hdev->cdev_idx);
903 hdev->cs_cmplt_wq = alloc_workqueue(workq_name, WQ_UNBOUND, 0);
904 if (!hdev->cs_cmplt_wq) {
906 "Failed to allocate CS completions workqueue\n");
911 snprintf(workq_name, 32, "hl%u-ts-free-obj", hdev->cdev_idx);
912 hdev->ts_free_obj_wq = alloc_workqueue(workq_name, WQ_UNBOUND, 0);
913 if (!hdev->ts_free_obj_wq) {
915 "Failed to allocate Timestamp registration free workqueue\n");
917 goto free_cs_cmplt_wq;
920 snprintf(workq_name, 32, "hl%u-prefetch", hdev->cdev_idx);
921 hdev->prefetch_wq = alloc_workqueue(workq_name, WQ_UNBOUND, 0);
922 if (!hdev->prefetch_wq) {
923 dev_err(hdev->dev, "Failed to allocate MMU prefetch workqueue\n");
925 goto free_ts_free_wq;
928 hdev->hl_chip_info = kzalloc(sizeof(struct hwmon_chip_info), GFP_KERNEL);
929 if (!hdev->hl_chip_info) {
931 goto free_prefetch_wq;
934 rc = hl_mmu_if_set_funcs(hdev);
938 hl_mem_mgr_init(hdev->dev, &hdev->kernel_mem_mgr);
940 snprintf(workq_name, 32, "hl%u_device_reset", hdev->cdev_idx);
941 hdev->reset_wq = create_singlethread_workqueue(workq_name);
942 if (!hdev->reset_wq) {
944 dev_err(hdev->dev, "Failed to create device reset WQ\n");
948 INIT_DELAYED_WORK(&hdev->device_reset_work.reset_work, device_hard_reset_pending);
949 hdev->device_reset_work.hdev = hdev;
950 hdev->device_fini_pending = 0;
952 INIT_DELAYED_WORK(&hdev->device_release_watchdog_work.reset_work,
953 device_release_watchdog_func);
954 hdev->device_release_watchdog_work.hdev = hdev;
956 mutex_init(&hdev->send_cpu_message_lock);
957 mutex_init(&hdev->debug_lock);
958 INIT_LIST_HEAD(&hdev->cs_mirror_list);
959 spin_lock_init(&hdev->cs_mirror_lock);
960 spin_lock_init(&hdev->reset_info.lock);
961 INIT_LIST_HEAD(&hdev->fpriv_list);
962 INIT_LIST_HEAD(&hdev->fpriv_ctrl_list);
963 mutex_init(&hdev->fpriv_list_lock);
964 mutex_init(&hdev->fpriv_ctrl_list_lock);
965 mutex_init(&hdev->clk_throttling.lock);
970 hl_mem_mgr_fini(&hdev->kernel_mem_mgr);
971 hl_mem_mgr_idr_destroy(&hdev->kernel_mem_mgr);
973 kfree(hdev->hl_chip_info);
975 destroy_workqueue(hdev->prefetch_wq);
977 destroy_workqueue(hdev->ts_free_obj_wq);
979 destroy_workqueue(hdev->cs_cmplt_wq);
981 destroy_workqueue(hdev->eq_wq);
983 for (i = 0 ; i < hdev->asic_prop.completion_queues_count ; i++)
985 destroy_workqueue(hdev->cq_wq[i]);
990 if (hdev->asic_funcs->early_fini)
991 hdev->asic_funcs->early_fini(hdev);
997 * device_early_fini - finalize all that was done in device_early_init
999 * @hdev: pointer to habanalabs device structure
1002 static void device_early_fini(struct hl_device *hdev)
1006 mutex_destroy(&hdev->debug_lock);
1007 mutex_destroy(&hdev->send_cpu_message_lock);
1009 mutex_destroy(&hdev->fpriv_list_lock);
1010 mutex_destroy(&hdev->fpriv_ctrl_list_lock);
1012 mutex_destroy(&hdev->clk_throttling.lock);
1014 hl_mem_mgr_fini(&hdev->kernel_mem_mgr);
1015 hl_mem_mgr_idr_destroy(&hdev->kernel_mem_mgr);
1017 kfree(hdev->hl_chip_info);
1019 destroy_workqueue(hdev->prefetch_wq);
1020 destroy_workqueue(hdev->ts_free_obj_wq);
1021 destroy_workqueue(hdev->cs_cmplt_wq);
1022 destroy_workqueue(hdev->eq_wq);
1023 destroy_workqueue(hdev->reset_wq);
1025 for (i = 0 ; i < hdev->asic_prop.completion_queues_count ; i++)
1026 destroy_workqueue(hdev->cq_wq[i]);
1031 if (hdev->asic_funcs->early_fini)
1032 hdev->asic_funcs->early_fini(hdev);
1035 static bool is_pci_link_healthy(struct hl_device *hdev)
1042 pci_read_config_word(hdev->pdev, PCI_VENDOR_ID, &vendor_id);
1044 return (vendor_id == PCI_VENDOR_ID_HABANALABS);
1047 static int hl_device_eq_heartbeat_check(struct hl_device *hdev)
1049 struct asic_fixed_properties *prop = &hdev->asic_prop;
1051 if (!prop->cpucp_info.eq_health_check_supported)
1054 if (hdev->eq_heartbeat_received) {
1055 hdev->eq_heartbeat_received = false;
1057 dev_err(hdev->dev, "EQ heartbeat event was not received!\n");
1064 static void hl_device_heartbeat(struct work_struct *work)
1066 struct hl_device *hdev = container_of(work, struct hl_device,
1067 work_heartbeat.work);
1068 struct hl_info_fw_err_info info = {0};
1069 u64 event_mask = HL_NOTIFIER_EVENT_DEVICE_RESET | HL_NOTIFIER_EVENT_DEVICE_UNAVAILABLE;
1071 /* Start heartbeat checks only after driver has enabled events from FW */
1072 if (!hl_device_operational(hdev, NULL) || !hdev->init_done)
1076 * For EQ health check need to check if driver received the heartbeat eq event
1077 * in order to validate the eq is working.
1078 * Only if both the EQ is healthy and we managed to send the next heartbeat reschedule.
1080 if ((!hl_device_eq_heartbeat_check(hdev)) && (!hdev->asic_funcs->send_heartbeat(hdev)))
1083 if (hl_device_operational(hdev, NULL))
1084 dev_err(hdev->dev, "Device heartbeat failed! PCI link is %s\n",
1085 is_pci_link_healthy(hdev) ? "healthy" : "broken");
1087 info.err_type = HL_INFO_FW_HEARTBEAT_ERR;
1088 info.event_mask = &event_mask;
1089 hl_handle_fw_err(hdev, &info);
1090 hl_device_cond_reset(hdev, HL_DRV_RESET_HARD | HL_DRV_RESET_HEARTBEAT, event_mask);
1096 * prev_reset_trigger tracks consecutive fatal h/w errors until first
1097 * heartbeat immediately post reset.
1098 * If control reached here, then at least one heartbeat work has been
1099 * scheduled since last reset/init cycle.
1100 * So if the device is not already in reset cycle, reset the flag
1101 * prev_reset_trigger as no reset occurred with HL_DRV_RESET_FW_FATAL_ERR
1102 * status for at least one heartbeat. From this point driver restarts
1103 * tracking future consecutive fatal errors.
1105 if (!hdev->reset_info.in_reset)
1106 hdev->reset_info.prev_reset_trigger = HL_RESET_TRIGGER_DEFAULT;
1108 schedule_delayed_work(&hdev->work_heartbeat,
1109 usecs_to_jiffies(HL_HEARTBEAT_PER_USEC));
1113 * device_late_init - do late stuff initialization for the habanalabs device
1115 * @hdev: pointer to habanalabs device structure
1117 * Do stuff that either needs the device H/W queues to be active or needs
1118 * to happen after all the rest of the initialization is finished
1120 static int device_late_init(struct hl_device *hdev)
1124 if (hdev->asic_funcs->late_init) {
1125 rc = hdev->asic_funcs->late_init(hdev);
1128 "failed late initialization for the H/W\n");
1133 hdev->high_pll = hdev->asic_prop.high_pll;
1135 if (hdev->heartbeat) {
1137 * Before scheduling the heartbeat driver will check if eq event has received.
1138 * for the first schedule we need to set the indication as true then for the next
1139 * one this indication will be true only if eq event was sent by FW.
1141 hdev->eq_heartbeat_received = true;
1143 INIT_DELAYED_WORK(&hdev->work_heartbeat, hl_device_heartbeat);
1145 schedule_delayed_work(&hdev->work_heartbeat,
1146 usecs_to_jiffies(HL_HEARTBEAT_PER_USEC));
1149 hdev->late_init_done = true;
1155 * device_late_fini - finalize all that was done in device_late_init
1157 * @hdev: pointer to habanalabs device structure
1160 static void device_late_fini(struct hl_device *hdev)
1162 if (!hdev->late_init_done)
1165 if (hdev->heartbeat)
1166 cancel_delayed_work_sync(&hdev->work_heartbeat);
1168 if (hdev->asic_funcs->late_fini)
1169 hdev->asic_funcs->late_fini(hdev);
1171 hdev->late_init_done = false;
1174 int hl_device_utilization(struct hl_device *hdev, u32 *utilization)
1176 u64 max_power, curr_power, dc_power, dividend, divisor;
1179 max_power = hdev->max_power;
1180 dc_power = hdev->asic_prop.dc_power_default;
1181 divisor = max_power - dc_power;
1183 dev_warn(hdev->dev, "device utilization is not supported\n");
1186 rc = hl_fw_cpucp_power_get(hdev, &curr_power);
1191 curr_power = clamp(curr_power, dc_power, max_power);
1193 dividend = (curr_power - dc_power) * 100;
1194 *utilization = (u32) div_u64(dividend, divisor);
1199 int hl_device_set_debug_mode(struct hl_device *hdev, struct hl_ctx *ctx, bool enable)
1203 mutex_lock(&hdev->debug_lock);
1206 if (!hdev->in_debug) {
1208 "Failed to disable debug mode because device was not in debug mode\n");
1213 if (!hdev->reset_info.hard_reset_pending)
1214 hdev->asic_funcs->halt_coresight(hdev, ctx);
1221 if (hdev->in_debug) {
1223 "Failed to enable debug mode because device is already in debug mode\n");
1231 mutex_unlock(&hdev->debug_lock);
1236 static void take_release_locks(struct hl_device *hdev)
1238 /* Flush anyone that is inside the critical section of enqueue
1241 hdev->asic_funcs->hw_queues_lock(hdev);
1242 hdev->asic_funcs->hw_queues_unlock(hdev);
1244 /* Flush processes that are sending message to CPU */
1245 mutex_lock(&hdev->send_cpu_message_lock);
1246 mutex_unlock(&hdev->send_cpu_message_lock);
1248 /* Flush anyone that is inside device open */
1249 mutex_lock(&hdev->fpriv_list_lock);
1250 mutex_unlock(&hdev->fpriv_list_lock);
1251 mutex_lock(&hdev->fpriv_ctrl_list_lock);
1252 mutex_unlock(&hdev->fpriv_ctrl_list_lock);
1255 static void hl_abort_waiting_for_completions(struct hl_device *hdev)
1257 hl_abort_waiting_for_cs_completions(hdev);
1259 /* Release all pending user interrupts, each pending user interrupt
1260 * holds a reference to a user context.
1262 hl_release_pending_user_interrupts(hdev);
1265 static void cleanup_resources(struct hl_device *hdev, bool hard_reset, bool fw_reset,
1269 device_late_fini(hdev);
1272 * Halt the engines and disable interrupts so we won't get any more
1273 * completions from H/W and we won't have any accesses from the
1274 * H/W to the host machine
1276 hdev->asic_funcs->halt_engines(hdev, hard_reset, fw_reset);
1278 /* Go over all the queues, release all CS and their jobs */
1279 hl_cs_rollback_all(hdev, skip_wq_flush);
1281 /* flush the MMU prefetch workqueue */
1282 flush_workqueue(hdev->prefetch_wq);
1284 hl_abort_waiting_for_completions(hdev);
1288 * hl_device_suspend - initiate device suspend
1290 * @hdev: pointer to habanalabs device structure
1292 * Puts the hw in the suspend state (all asics).
1293 * Returns 0 for success or an error on failure.
1294 * Called at driver suspend.
1296 int hl_device_suspend(struct hl_device *hdev)
1300 pci_save_state(hdev->pdev);
1302 /* Block future CS/VM/JOB completion operations */
1303 spin_lock(&hdev->reset_info.lock);
1304 if (hdev->reset_info.in_reset) {
1305 spin_unlock(&hdev->reset_info.lock);
1306 dev_err(hdev->dev, "Can't suspend while in reset\n");
1309 hdev->reset_info.in_reset = 1;
1310 spin_unlock(&hdev->reset_info.lock);
1312 /* This blocks all other stuff that is not blocked by in_reset */
1313 hdev->disabled = true;
1315 take_release_locks(hdev);
1317 rc = hdev->asic_funcs->suspend(hdev);
1320 "Failed to disable PCI access of device CPU\n");
1322 /* Shut down the device */
1323 pci_disable_device(hdev->pdev);
1324 pci_set_power_state(hdev->pdev, PCI_D3hot);
1330 * hl_device_resume - initiate device resume
1332 * @hdev: pointer to habanalabs device structure
1334 * Bring the hw back to operating state (all asics).
1335 * Returns 0 for success or an error on failure.
1336 * Called at driver resume.
1338 int hl_device_resume(struct hl_device *hdev)
1342 pci_set_power_state(hdev->pdev, PCI_D0);
1343 pci_restore_state(hdev->pdev);
1344 rc = pci_enable_device_mem(hdev->pdev);
1347 "Failed to enable PCI device in resume\n");
1351 pci_set_master(hdev->pdev);
1353 rc = hdev->asic_funcs->resume(hdev);
1355 dev_err(hdev->dev, "Failed to resume device after suspend\n");
1356 goto disable_device;
1360 /* 'in_reset' was set to true during suspend, now we must clear it in order
1361 * for hard reset to be performed
1363 spin_lock(&hdev->reset_info.lock);
1364 hdev->reset_info.in_reset = 0;
1365 spin_unlock(&hdev->reset_info.lock);
1367 rc = hl_device_reset(hdev, HL_DRV_RESET_HARD);
1369 dev_err(hdev->dev, "Failed to reset device during resume\n");
1370 goto disable_device;
1376 pci_disable_device(hdev->pdev);
1381 static int device_kill_open_processes(struct hl_device *hdev, u32 timeout, bool control_dev)
1383 struct task_struct *task = NULL;
1384 struct list_head *hpriv_list;
1385 struct hl_fpriv *hpriv;
1386 struct mutex *hpriv_lock;
1389 hpriv_lock = control_dev ? &hdev->fpriv_ctrl_list_lock : &hdev->fpriv_list_lock;
1390 hpriv_list = control_dev ? &hdev->fpriv_ctrl_list : &hdev->fpriv_list;
1392 /* Giving time for user to close FD, and for processes that are inside
1393 * hl_device_open to finish
1395 if (!list_empty(hpriv_list))
1399 pending_cnt = timeout;
1401 if (hdev->process_kill_trial_cnt) {
1402 /* Processes have been already killed */
1404 goto wait_for_processes;
1406 /* Wait a small period after process kill */
1407 pending_cnt = HL_PENDING_RESET_PER_SEC;
1411 mutex_lock(hpriv_lock);
1413 /* This section must be protected because we are dereferencing
1414 * pointers that are freed if the process exits
1416 list_for_each_entry(hpriv, hpriv_list, dev_node) {
1417 task = get_pid_task(hpriv->taskpid, PIDTYPE_PID);
1419 dev_info(hdev->dev, "Killing user process pid=%d\n",
1421 send_sig(SIGKILL, task, 1);
1422 usleep_range(1000, 10000);
1424 put_task_struct(task);
1427 "Can't get task struct for user process %d, process was killed from outside the driver\n",
1428 pid_nr(hpriv->taskpid));
1432 mutex_unlock(hpriv_lock);
1435 * We killed the open users, but that doesn't mean they are closed.
1436 * It could be that they are running a long cleanup phase in the driver
1437 * e.g. MMU unmappings, or running other long teardown flow even before
1439 * Therefore we need to wait again to make sure they are closed before
1440 * continuing with the reset.
1444 while ((!list_empty(hpriv_list)) && (pending_cnt)) {
1446 "Waiting for all unmap operations to finish before hard reset\n");
1453 /* All processes exited successfully */
1454 if (list_empty(hpriv_list))
1457 /* Give up waiting for processes to exit */
1458 if (hdev->process_kill_trial_cnt == HL_PENDING_RESET_MAX_TRIALS)
1461 hdev->process_kill_trial_cnt++;
1466 static void device_disable_open_processes(struct hl_device *hdev, bool control_dev)
1468 struct list_head *hpriv_list;
1469 struct hl_fpriv *hpriv;
1470 struct mutex *hpriv_lock;
1472 hpriv_lock = control_dev ? &hdev->fpriv_ctrl_list_lock : &hdev->fpriv_list_lock;
1473 hpriv_list = control_dev ? &hdev->fpriv_ctrl_list : &hdev->fpriv_list;
1475 mutex_lock(hpriv_lock);
1476 list_for_each_entry(hpriv, hpriv_list, dev_node)
1478 mutex_unlock(hpriv_lock);
1481 static void send_disable_pci_access(struct hl_device *hdev, u32 flags)
1483 /* If reset is due to heartbeat, device CPU is no responsive in
1484 * which case no point sending PCI disable message to it.
1486 if ((flags & HL_DRV_RESET_HARD) &&
1487 !(flags & (HL_DRV_RESET_HEARTBEAT | HL_DRV_RESET_BYPASS_REQ_TO_FW))) {
1488 /* Disable PCI access from device F/W so he won't send
1489 * us additional interrupts. We disable MSI/MSI-X at
1490 * the halt_engines function and we can't have the F/W
1491 * sending us interrupts after that. We need to disable
1492 * the access here because if the device is marked
1493 * disable, the message won't be send. Also, in case
1494 * of heartbeat, the device CPU is marked as disable
1495 * so this message won't be sent
1497 if (hl_fw_send_pci_access_msg(hdev, CPUCP_PACKET_DISABLE_PCI_ACCESS, 0x0)) {
1498 dev_warn(hdev->dev, "Failed to disable FW's PCI access\n");
1502 /* verify that last EQs are handled before disabled is set */
1503 if (hdev->cpu_queues_enable)
1504 synchronize_irq(pci_irq_vector(hdev->pdev,
1505 hdev->asic_prop.eq_interrupt_id));
1509 static void handle_reset_trigger(struct hl_device *hdev, u32 flags)
1511 u32 cur_reset_trigger = HL_RESET_TRIGGER_DEFAULT;
1513 /* No consecutive mechanism when user context exists */
1514 if (hdev->is_compute_ctx_active)
1518 * 'reset cause' is being updated here, because getting here
1519 * means that it's the 1st time and the last time we're here
1520 * ('in_reset' makes sure of it). This makes sure that
1521 * 'reset_cause' will continue holding its 1st recorded reason!
1523 if (flags & HL_DRV_RESET_HEARTBEAT) {
1524 hdev->reset_info.curr_reset_cause = HL_RESET_CAUSE_HEARTBEAT;
1525 cur_reset_trigger = HL_DRV_RESET_HEARTBEAT;
1526 } else if (flags & HL_DRV_RESET_TDR) {
1527 hdev->reset_info.curr_reset_cause = HL_RESET_CAUSE_TDR;
1528 cur_reset_trigger = HL_DRV_RESET_TDR;
1529 } else if (flags & HL_DRV_RESET_FW_FATAL_ERR) {
1530 hdev->reset_info.curr_reset_cause = HL_RESET_CAUSE_UNKNOWN;
1531 cur_reset_trigger = HL_DRV_RESET_FW_FATAL_ERR;
1533 hdev->reset_info.curr_reset_cause = HL_RESET_CAUSE_UNKNOWN;
1537 * If reset cause is same twice, then reset_trigger_repeated
1538 * is set and if this reset is due to a fatal FW error
1539 * device is set to an unstable state.
1541 if (hdev->reset_info.prev_reset_trigger != cur_reset_trigger) {
1542 hdev->reset_info.prev_reset_trigger = cur_reset_trigger;
1543 hdev->reset_info.reset_trigger_repeated = 0;
1545 hdev->reset_info.reset_trigger_repeated = 1;
1550 * hl_device_reset - reset the device
1552 * @hdev: pointer to habanalabs device structure
1553 * @flags: reset flags.
1555 * Block future CS and wait for pending CS to be enqueued
1556 * Call ASIC H/W fini
1557 * Flush all completions
1558 * Re-initialize all internal data structures
1559 * Call ASIC H/W init, late_init
1563 * Returns 0 for success or an error on failure.
1565 int hl_device_reset(struct hl_device *hdev, u32 flags)
1567 bool hard_reset, from_hard_reset_thread, fw_reset, reset_upon_device_release,
1568 schedule_hard_reset = false, delay_reset, from_dev_release, from_watchdog_thread;
1569 u64 idle_mask[HL_BUSY_ENGINES_MASK_EXT_SIZE] = {0};
1571 int i, rc, hw_fini_rc;
1573 if (!hdev->init_done) {
1574 dev_err(hdev->dev, "Can't reset before initialization is done\n");
1578 hard_reset = !!(flags & HL_DRV_RESET_HARD);
1579 from_hard_reset_thread = !!(flags & HL_DRV_RESET_FROM_RESET_THR);
1580 fw_reset = !!(flags & HL_DRV_RESET_BYPASS_REQ_TO_FW);
1581 from_dev_release = !!(flags & HL_DRV_RESET_DEV_RELEASE);
1582 delay_reset = !!(flags & HL_DRV_RESET_DELAY);
1583 from_watchdog_thread = !!(flags & HL_DRV_RESET_FROM_WD_THR);
1584 reset_upon_device_release = hdev->reset_upon_device_release && from_dev_release;
1586 if (!hard_reset && (hl_device_status(hdev) == HL_DEVICE_STATUS_MALFUNCTION)) {
1587 dev_dbg(hdev->dev, "soft-reset isn't supported on a malfunctioning device\n");
1591 if (!hard_reset && !hdev->asic_prop.supports_compute_reset) {
1592 dev_dbg(hdev->dev, "asic doesn't support compute reset - do hard-reset instead\n");
1596 if (reset_upon_device_release) {
1599 "Aborting reset because hard-reset is mutually exclusive with reset-on-device-release\n");
1606 if (!hard_reset && !hdev->asic_prop.allow_inference_soft_reset) {
1608 "asic doesn't allow inference soft reset - do hard-reset instead\n");
1613 /* Re-entry of reset thread */
1614 if (from_hard_reset_thread && hdev->process_kill_trial_cnt)
1615 goto kill_processes;
1618 * Prevent concurrency in this function - only one reset should be
1619 * done at any given time. We need to perform this only if we didn't
1620 * get here from a dedicated hard reset thread.
1622 if (!from_hard_reset_thread) {
1623 /* Block future CS/VM/JOB completion operations */
1624 spin_lock(&hdev->reset_info.lock);
1625 if (hdev->reset_info.in_reset) {
1626 /* We allow scheduling of a hard reset only during a compute reset */
1627 if (hard_reset && hdev->reset_info.in_compute_reset)
1628 hdev->reset_info.hard_reset_schedule_flags = flags;
1629 spin_unlock(&hdev->reset_info.lock);
1633 /* This still allows the completion of some KDMA ops
1634 * Update this before in_reset because in_compute_reset implies we are in reset
1636 hdev->reset_info.in_compute_reset = !hard_reset;
1638 hdev->reset_info.in_reset = 1;
1640 spin_unlock(&hdev->reset_info.lock);
1642 /* Cancel the device release watchdog work if required.
1643 * In case of reset-upon-device-release while the release watchdog work is
1644 * scheduled due to a hard-reset, do hard-reset instead of compute-reset.
1646 if ((hard_reset || from_dev_release) && hdev->reset_info.watchdog_active) {
1647 struct hl_device_reset_work *watchdog_work =
1648 &hdev->device_release_watchdog_work;
1650 hdev->reset_info.watchdog_active = 0;
1651 if (!from_watchdog_thread)
1652 cancel_delayed_work_sync(&watchdog_work->reset_work);
1654 if (from_dev_release && (watchdog_work->flags & HL_DRV_RESET_HARD)) {
1655 hdev->reset_info.in_compute_reset = 0;
1656 flags |= HL_DRV_RESET_HARD;
1657 flags &= ~HL_DRV_RESET_DEV_RELEASE;
1663 usleep_range(HL_RESET_DELAY_USEC, HL_RESET_DELAY_USEC << 1);
1665 escalate_reset_flow:
1666 handle_reset_trigger(hdev, flags);
1667 send_disable_pci_access(hdev, flags);
1669 /* This also blocks future CS/VM/JOB completion operations */
1670 hdev->disabled = true;
1672 take_release_locks(hdev);
1675 dev_info(hdev->dev, "Going to reset device\n");
1676 else if (reset_upon_device_release)
1677 dev_dbg(hdev->dev, "Going to reset device after release by user\n");
1679 dev_dbg(hdev->dev, "Going to reset engines of inference device\n");
1682 if ((hard_reset) && (!from_hard_reset_thread)) {
1683 hdev->reset_info.hard_reset_pending = true;
1685 hdev->process_kill_trial_cnt = 0;
1687 hdev->device_reset_work.flags = flags;
1690 * Because the reset function can't run from heartbeat work,
1691 * we need to call the reset function from a dedicated work.
1693 queue_delayed_work(hdev->reset_wq, &hdev->device_reset_work.reset_work, 0);
1698 cleanup_resources(hdev, hard_reset, fw_reset, from_dev_release);
1702 /* Kill processes here after CS rollback. This is because the
1703 * process can't really exit until all its CSs are done, which
1704 * is what we do in cs rollback
1706 rc = device_kill_open_processes(hdev, 0, false);
1709 if (hdev->device_fini_pending) {
1711 "%s Failed to kill all open processes, stopping hard reset\n",
1712 dev_name(&(hdev)->pdev->dev));
1716 /* signal reset thread to reschedule */
1722 "%s Failed to kill all open processes, stopping hard reset\n",
1723 dev_name(&(hdev)->pdev->dev));
1727 /* Flush the Event queue workers to make sure no other thread is
1728 * reading or writing to registers during the reset
1730 flush_workqueue(hdev->eq_wq);
1733 /* Reset the H/W. It will be in idle state after this returns */
1734 hw_fini_rc = hdev->asic_funcs->hw_fini(hdev, hard_reset, fw_reset);
1737 hdev->fw_loader.fw_comp_loaded = FW_TYPE_NONE;
1739 /* Release kernel context */
1740 if (hdev->kernel_ctx && hl_ctx_put(hdev->kernel_ctx) == 1)
1741 hdev->kernel_ctx = NULL;
1745 hl_eq_reset(hdev, &hdev->event_queue);
1748 /* Re-initialize PI,CI to 0 in all queues (hw queue, cq) */
1749 hl_hw_queue_reset(hdev, hard_reset);
1750 for (i = 0 ; i < hdev->asic_prop.completion_queues_count ; i++)
1751 hl_cq_reset(hdev, &hdev->completion_queue[i]);
1753 /* Make sure the context switch phase will run again */
1754 ctx = hl_get_compute_ctx(hdev);
1756 atomic_set(&ctx->thread_ctx_switch_token, 1);
1757 ctx->thread_ctx_switch_wait_token = 0;
1765 /* Finished tear-down, starting to re-initialize */
1768 hdev->device_cpu_disabled = false;
1769 hdev->reset_info.hard_reset_pending = false;
1771 if (hdev->reset_info.reset_trigger_repeated &&
1772 (hdev->reset_info.prev_reset_trigger ==
1773 HL_DRV_RESET_FW_FATAL_ERR)) {
1774 /* if there 2 back to back resets from FW,
1775 * ensure driver puts the driver in a unusable state
1778 "%s Consecutive FW fatal errors received, stopping hard reset\n",
1779 dev_name(&(hdev)->pdev->dev));
1784 if (hdev->kernel_ctx) {
1786 "%s kernel ctx was alive during hard reset, something is terribly wrong\n",
1787 dev_name(&(hdev)->pdev->dev));
1792 rc = hl_mmu_init(hdev);
1795 "Failed to initialize MMU S/W after hard reset\n");
1799 /* Allocate the kernel context */
1800 hdev->kernel_ctx = kzalloc(sizeof(*hdev->kernel_ctx),
1802 if (!hdev->kernel_ctx) {
1808 hdev->is_compute_ctx_active = false;
1810 rc = hl_ctx_init(hdev, hdev->kernel_ctx, true);
1813 "failed to init kernel ctx in hard reset\n");
1814 kfree(hdev->kernel_ctx);
1815 hdev->kernel_ctx = NULL;
1821 /* Device is now enabled as part of the initialization requires
1822 * communication with the device firmware to get information that
1823 * is required for the initialization itself
1825 hdev->disabled = false;
1827 /* F/W security enabled indication might be updated after hard-reset */
1829 rc = hl_fw_read_preboot_status(hdev);
1834 rc = hdev->asic_funcs->hw_init(hdev);
1836 dev_err(hdev->dev, "failed to initialize the H/W after reset\n");
1840 /* If device is not idle fail the reset process */
1841 if (!hdev->asic_funcs->is_device_idle(hdev, idle_mask,
1842 HL_BUSY_ENGINES_MASK_EXT_SIZE, NULL)) {
1843 print_idle_status_mask(hdev, "device is not idle after reset", idle_mask);
1848 /* Check that the communication with the device is working */
1849 rc = hdev->asic_funcs->test_queues(hdev);
1851 dev_err(hdev->dev, "Failed to detect if device is alive after reset\n");
1856 rc = device_late_init(hdev);
1858 dev_err(hdev->dev, "Failed late init after hard reset\n");
1862 rc = hl_vm_init(hdev);
1864 dev_err(hdev->dev, "Failed to init memory module after hard reset\n");
1868 if (!hdev->asic_prop.fw_security_enabled)
1869 hl_fw_set_max_power(hdev);
1871 rc = hdev->asic_funcs->compute_reset_late_init(hdev);
1873 if (reset_upon_device_release)
1875 "Failed late init in reset after device release\n");
1877 dev_err(hdev->dev, "Failed late init after compute reset\n");
1882 rc = hdev->asic_funcs->scrub_device_mem(hdev);
1884 dev_err(hdev->dev, "scrub mem failed from device reset (%d)\n", rc);
1888 spin_lock(&hdev->reset_info.lock);
1889 hdev->reset_info.in_compute_reset = 0;
1891 /* Schedule hard reset only if requested and if not already in hard reset.
1892 * We keep 'in_reset' enabled, so no other reset can go in during the hard
1895 if (!hard_reset && hdev->reset_info.hard_reset_schedule_flags)
1896 schedule_hard_reset = true;
1898 hdev->reset_info.in_reset = 0;
1900 spin_unlock(&hdev->reset_info.lock);
1902 hdev->reset_info.needs_reset = false;
1906 "Successfully finished resetting the %s device\n",
1907 dev_name(&(hdev)->pdev->dev));
1910 "Successfully finished resetting the %s device\n",
1911 dev_name(&(hdev)->pdev->dev));
1914 hdev->reset_info.hard_reset_cnt++;
1916 /* After reset is done, we are ready to receive events from
1917 * the F/W. We can't do it before because we will ignore events
1918 * and if those events are fatal, we won't know about it and
1919 * the device will be operational although it shouldn't be
1921 hdev->asic_funcs->enable_events_from_fw(hdev);
1923 if (!reset_upon_device_release)
1924 hdev->reset_info.compute_reset_cnt++;
1926 if (schedule_hard_reset) {
1927 dev_info(hdev->dev, "Performing hard reset scheduled during compute reset\n");
1928 flags = hdev->reset_info.hard_reset_schedule_flags;
1929 hdev->reset_info.hard_reset_schedule_flags = 0;
1931 goto escalate_reset_flow;
1938 hdev->disabled = true;
1940 spin_lock(&hdev->reset_info.lock);
1941 hdev->reset_info.in_compute_reset = 0;
1945 "%s Failed to reset! Device is NOT usable\n",
1946 dev_name(&(hdev)->pdev->dev));
1947 hdev->reset_info.hard_reset_cnt++;
1949 if (reset_upon_device_release) {
1950 dev_err(hdev->dev, "Failed to reset device after user release\n");
1951 flags &= ~HL_DRV_RESET_DEV_RELEASE;
1953 dev_err(hdev->dev, "Failed to do compute reset\n");
1954 hdev->reset_info.compute_reset_cnt++;
1957 spin_unlock(&hdev->reset_info.lock);
1958 flags |= HL_DRV_RESET_HARD;
1960 goto escalate_reset_flow;
1963 hdev->reset_info.in_reset = 0;
1965 spin_unlock(&hdev->reset_info.lock);
1971 * hl_device_cond_reset() - conditionally reset the device.
1972 * @hdev: pointer to habanalabs device structure.
1973 * @reset_flags: reset flags.
1974 * @event_mask: events to notify user about.
1976 * Conditionally reset the device, or alternatively schedule a watchdog work to reset the device
1977 * unless another reset precedes it.
1979 int hl_device_cond_reset(struct hl_device *hdev, u32 flags, u64 event_mask)
1981 struct hl_ctx *ctx = NULL;
1983 /* F/W reset cannot be postponed */
1984 if (flags & HL_DRV_RESET_BYPASS_REQ_TO_FW)
1987 /* Device release watchdog is relevant only if user exists and gets a reset notification */
1988 if (!(event_mask & HL_NOTIFIER_EVENT_DEVICE_RESET)) {
1989 dev_err(hdev->dev, "Resetting device without a reset indication to user\n");
1993 ctx = hl_get_compute_ctx(hdev);
1998 * There is no point in postponing the reset if user is not registered for events.
1999 * However if no eventfd_ctx exists but the device release watchdog is already scheduled, it
2000 * just implies that user has unregistered as part of handling a previous event. In this
2001 * case an immediate reset is not required.
2003 if (!ctx->hpriv->notifier_event.eventfd && !hdev->reset_info.watchdog_active)
2006 /* Schedule the device release watchdog work unless reset is already in progress or if the
2007 * work is already scheduled.
2009 spin_lock(&hdev->reset_info.lock);
2010 if (hdev->reset_info.in_reset) {
2011 spin_unlock(&hdev->reset_info.lock);
2015 if (hdev->reset_info.watchdog_active) {
2016 hdev->device_release_watchdog_work.flags |= flags;
2020 hdev->device_release_watchdog_work.flags = flags;
2021 dev_dbg(hdev->dev, "Device is going to be hard-reset in %u sec unless being released\n",
2022 hdev->device_release_watchdog_timeout_sec);
2023 schedule_delayed_work(&hdev->device_release_watchdog_work.reset_work,
2024 msecs_to_jiffies(hdev->device_release_watchdog_timeout_sec * 1000));
2025 hdev->reset_info.watchdog_active = 1;
2027 spin_unlock(&hdev->reset_info.lock);
2029 hl_notifier_event_send_all(hdev, event_mask);
2033 hl_abort_waiting_for_completions(hdev);
2039 hl_notifier_event_send_all(hdev, event_mask);
2043 return hl_device_reset(hdev, flags | HL_DRV_RESET_HARD);
2046 static void hl_notifier_event_send(struct hl_notifier_event *notifier_event, u64 event_mask)
2048 mutex_lock(¬ifier_event->lock);
2049 notifier_event->events_mask |= event_mask;
2051 if (notifier_event->eventfd)
2052 eventfd_signal(notifier_event->eventfd);
2054 mutex_unlock(¬ifier_event->lock);
2058 * hl_notifier_event_send_all - notify all user processes via eventfd
2060 * @hdev: pointer to habanalabs device structure
2061 * @event_mask: the occurred event/s
2062 * Returns 0 for success or an error on failure.
2064 void hl_notifier_event_send_all(struct hl_device *hdev, u64 event_mask)
2066 struct hl_fpriv *hpriv;
2069 dev_warn(hdev->dev, "Skip sending zero event");
2073 mutex_lock(&hdev->fpriv_list_lock);
2075 list_for_each_entry(hpriv, &hdev->fpriv_list, dev_node)
2076 hl_notifier_event_send(&hpriv->notifier_event, event_mask);
2078 mutex_unlock(&hdev->fpriv_list_lock);
2082 * hl_device_init - main initialization function for habanalabs device
2084 * @hdev: pointer to habanalabs device structure
2086 * Allocate an id for the device, do early initialization and then call the
2087 * ASIC specific initialization functions. Finally, create the cdev and the
2088 * Linux device to expose it to the user
2090 int hl_device_init(struct hl_device *hdev)
2092 int i, rc, cq_cnt, user_interrupt_cnt, cq_ready_cnt;
2093 struct hl_ts_free_jobs *free_jobs_data;
2094 bool expose_interfaces_on_err = false;
2097 /* Initialize ASIC function pointers and perform early init */
2098 rc = device_early_init(hdev);
2102 user_interrupt_cnt = hdev->asic_prop.user_dec_intr_count +
2103 hdev->asic_prop.user_interrupt_count;
2105 if (user_interrupt_cnt) {
2106 hdev->user_interrupt = kcalloc(user_interrupt_cnt, sizeof(*hdev->user_interrupt),
2108 if (!hdev->user_interrupt) {
2113 /* Timestamp records supported only if CQ supported in device */
2114 if (hdev->asic_prop.first_available_cq[0] != USHRT_MAX) {
2115 for (i = 0 ; i < user_interrupt_cnt ; i++) {
2116 p = vzalloc(TIMESTAMP_FREE_NODES_NUM *
2117 sizeof(struct timestamp_reg_free_node));
2120 goto free_usr_intr_mem;
2122 free_jobs_data = &hdev->user_interrupt[i].ts_free_jobs_data;
2123 free_jobs_data->free_nodes_pool = p;
2124 free_jobs_data->free_nodes_length = TIMESTAMP_FREE_NODES_NUM;
2125 free_jobs_data->next_avail_free_node_idx = 0;
2130 free_jobs_data = &hdev->common_user_cq_interrupt.ts_free_jobs_data;
2131 p = vzalloc(TIMESTAMP_FREE_NODES_NUM *
2132 sizeof(struct timestamp_reg_free_node));
2135 goto free_usr_intr_mem;
2138 free_jobs_data->free_nodes_pool = p;
2139 free_jobs_data->free_nodes_length = TIMESTAMP_FREE_NODES_NUM;
2140 free_jobs_data->next_avail_free_node_idx = 0;
2143 * Start calling ASIC initialization. First S/W then H/W and finally
2146 rc = hdev->asic_funcs->sw_init(hdev);
2148 goto free_common_usr_intr_mem;
2151 /* initialize completion structure for multi CS wait */
2152 hl_multi_cs_completion_init(hdev);
2155 * Initialize the H/W queues. Must be done before hw_init, because
2156 * there the addresses of the kernel queue are being written to the
2157 * registers of the device
2159 rc = hl_hw_queues_create(hdev);
2161 dev_err(hdev->dev, "failed to initialize kernel queues\n");
2165 cq_cnt = hdev->asic_prop.completion_queues_count;
2168 * Initialize the completion queues. Must be done before hw_init,
2169 * because there the addresses of the completion queues are being
2170 * passed as arguments to request_irq
2173 hdev->completion_queue = kcalloc(cq_cnt,
2174 sizeof(*hdev->completion_queue),
2177 if (!hdev->completion_queue) {
2179 "failed to allocate completion queues\n");
2181 goto hw_queues_destroy;
2185 for (i = 0, cq_ready_cnt = 0 ; i < cq_cnt ; i++, cq_ready_cnt++) {
2186 rc = hl_cq_init(hdev, &hdev->completion_queue[i],
2187 hdev->asic_funcs->get_queue_id_for_cq(hdev, i));
2190 "failed to initialize completion queue\n");
2193 hdev->completion_queue[i].cq_idx = i;
2196 hdev->shadow_cs_queue = kcalloc(hdev->asic_prop.max_pending_cs,
2197 sizeof(struct hl_cs *), GFP_KERNEL);
2198 if (!hdev->shadow_cs_queue) {
2204 * Initialize the event queue. Must be done before hw_init,
2205 * because there the address of the event queue is being
2206 * passed as argument to request_irq
2208 rc = hl_eq_init(hdev, &hdev->event_queue);
2210 dev_err(hdev->dev, "failed to initialize event queue\n");
2211 goto free_shadow_cs_queue;
2214 /* MMU S/W must be initialized before kernel context is created */
2215 rc = hl_mmu_init(hdev);
2217 dev_err(hdev->dev, "Failed to initialize MMU S/W structures\n");
2221 /* Allocate the kernel context */
2222 hdev->kernel_ctx = kzalloc(sizeof(*hdev->kernel_ctx), GFP_KERNEL);
2223 if (!hdev->kernel_ctx) {
2228 hdev->is_compute_ctx_active = false;
2230 hdev->asic_funcs->state_dump_init(hdev);
2232 hdev->device_release_watchdog_timeout_sec = HL_DEVICE_RELEASE_WATCHDOG_TIMEOUT_SEC;
2234 hdev->memory_scrub_val = MEM_SCRUB_DEFAULT_VAL;
2236 rc = hl_debugfs_device_init(hdev);
2238 dev_err(hdev->dev, "failed to initialize debugfs entry structure\n");
2239 kfree(hdev->kernel_ctx);
2243 /* The debugfs entry structure is accessed in hl_ctx_init(), so it must be called after
2244 * hl_debugfs_device_init().
2246 rc = hl_ctx_init(hdev, hdev->kernel_ctx, true);
2248 dev_err(hdev->dev, "failed to initialize kernel context\n");
2249 kfree(hdev->kernel_ctx);
2250 goto debugfs_device_fini;
2253 rc = hl_cb_pool_init(hdev);
2255 dev_err(hdev->dev, "failed to initialize CB pool\n");
2259 rc = hl_dec_init(hdev);
2261 dev_err(hdev->dev, "Failed to initialize the decoder module\n");
2266 * From this point, override rc (=0) in case of an error to allow debugging
2267 * (by adding char devices and creating sysfs/debugfs files as part of the error flow).
2269 expose_interfaces_on_err = true;
2271 /* Device is now enabled as part of the initialization requires
2272 * communication with the device firmware to get information that
2273 * is required for the initialization itself
2275 hdev->disabled = false;
2277 rc = hdev->asic_funcs->hw_init(hdev);
2279 dev_err(hdev->dev, "failed to initialize the H/W\n");
2284 /* Check that the communication with the device is working */
2285 rc = hdev->asic_funcs->test_queues(hdev);
2287 dev_err(hdev->dev, "Failed to detect if device is alive\n");
2292 rc = device_late_init(hdev);
2294 dev_err(hdev->dev, "Failed late initialization\n");
2299 dev_info(hdev->dev, "Found %s device with %lluGB DRAM\n",
2301 hdev->asic_prop.dram_size / SZ_1G);
2303 rc = hl_vm_init(hdev);
2305 dev_err(hdev->dev, "Failed to initialize memory module\n");
2311 * Expose devices and sysfs/debugfs files to user.
2312 * From here there is no need to expose them in case of an error.
2314 expose_interfaces_on_err = false;
2316 rc = drm_dev_register(&hdev->drm, 0);
2318 dev_err(hdev->dev, "Failed to register DRM device, rc %d\n", rc);
2323 rc = cdev_sysfs_debugfs_add(hdev);
2325 dev_err(hdev->dev, "Failed to add char devices and sysfs/debugfs files\n");
2330 /* Need to call this again because the max power might change,
2331 * depending on card type for certain ASICs
2333 if (hdev->asic_prop.set_max_power_on_device_init &&
2334 !hdev->asic_prop.fw_security_enabled)
2335 hl_fw_set_max_power(hdev);
2338 * hl_hwmon_init() must be called after device_late_init(), because only
2339 * there we get the information from the device about which
2340 * hwmon-related sensors the device supports.
2341 * Furthermore, it must be done after adding the device to the system.
2343 rc = hl_hwmon_init(hdev);
2345 dev_err(hdev->dev, "Failed to initialize hwmon\n");
2350 dev_notice(hdev->dev,
2351 "Successfully added device %s to habanalabs driver\n",
2352 dev_name(&(hdev)->pdev->dev));
2354 /* After initialization is done, we are ready to receive events from
2355 * the F/W. We can't do it before because we will ignore events and if
2356 * those events are fatal, we won't know about it and the device will
2357 * be operational although it shouldn't be
2359 hdev->asic_funcs->enable_events_from_fw(hdev);
2361 hdev->init_done = true;
2366 hl_cb_pool_fini(hdev);
2368 if (hl_ctx_put(hdev->kernel_ctx) != 1)
2370 "kernel ctx is still alive on initialization failure\n");
2371 debugfs_device_fini:
2372 hl_debugfs_device_fini(hdev);
2376 hl_eq_fini(hdev, &hdev->event_queue);
2377 free_shadow_cs_queue:
2378 kfree(hdev->shadow_cs_queue);
2380 for (i = 0 ; i < cq_ready_cnt ; i++)
2381 hl_cq_fini(hdev, &hdev->completion_queue[i]);
2382 kfree(hdev->completion_queue);
2384 hl_hw_queues_destroy(hdev);
2386 hdev->asic_funcs->sw_fini(hdev);
2387 free_common_usr_intr_mem:
2388 vfree(hdev->common_user_cq_interrupt.ts_free_jobs_data.free_nodes_pool);
2390 if (user_interrupt_cnt) {
2391 for (i = 0 ; i < user_interrupt_cnt ; i++) {
2392 if (!hdev->user_interrupt[i].ts_free_jobs_data.free_nodes_pool)
2394 vfree(hdev->user_interrupt[i].ts_free_jobs_data.free_nodes_pool);
2396 kfree(hdev->user_interrupt);
2399 device_early_fini(hdev);
2401 hdev->disabled = true;
2402 if (expose_interfaces_on_err) {
2403 drm_dev_register(&hdev->drm, 0);
2404 cdev_sysfs_debugfs_add(hdev);
2407 pr_err("Failed to initialize accel%d. Device %s is NOT usable!\n",
2408 hdev->cdev_idx, dev_name(&hdev->pdev->dev));
2414 * hl_device_fini - main tear-down function for habanalabs device
2416 * @hdev: pointer to habanalabs device structure
2418 * Destroy the device, call ASIC fini functions and release the id
2420 void hl_device_fini(struct hl_device *hdev)
2422 u32 user_interrupt_cnt;
2423 bool device_in_reset;
2428 dev_info(hdev->dev, "Removing device %s\n", dev_name(&(hdev)->pdev->dev));
2430 hdev->device_fini_pending = 1;
2431 flush_delayed_work(&hdev->device_reset_work.reset_work);
2434 reset_sec = HL_PLDM_HARD_RESET_MAX_TIMEOUT;
2436 reset_sec = HL_HARD_RESET_MAX_TIMEOUT;
2439 * This function is competing with the reset function, so try to
2440 * take the reset atomic and if we are already in middle of reset,
2441 * wait until reset function is finished. Reset function is designed
2442 * to always finish. However, in Gaudi, because of all the network
2443 * ports, the hard reset could take between 10-30 seconds
2446 timeout = ktime_add_us(ktime_get(), reset_sec * 1000 * 1000);
2448 spin_lock(&hdev->reset_info.lock);
2449 device_in_reset = !!hdev->reset_info.in_reset;
2450 if (!device_in_reset)
2451 hdev->reset_info.in_reset = 1;
2452 spin_unlock(&hdev->reset_info.lock);
2454 while (device_in_reset) {
2455 usleep_range(50, 200);
2457 spin_lock(&hdev->reset_info.lock);
2458 device_in_reset = !!hdev->reset_info.in_reset;
2459 if (!device_in_reset)
2460 hdev->reset_info.in_reset = 1;
2461 spin_unlock(&hdev->reset_info.lock);
2463 if (ktime_compare(ktime_get(), timeout) > 0) {
2465 "%s Failed to remove device because reset function did not finish\n",
2466 dev_name(&(hdev)->pdev->dev));
2471 cancel_delayed_work_sync(&hdev->device_release_watchdog_work.reset_work);
2473 /* Disable PCI access from device F/W so it won't send us additional
2474 * interrupts. We disable MSI/MSI-X at the halt_engines function and we
2475 * can't have the F/W sending us interrupts after that. We need to
2476 * disable the access here because if the device is marked disable, the
2477 * message won't be send. Also, in case of heartbeat, the device CPU is
2478 * marked as disable so this message won't be sent
2480 hl_fw_send_pci_access_msg(hdev, CPUCP_PACKET_DISABLE_PCI_ACCESS, 0x0);
2482 /* Mark device as disabled */
2483 hdev->disabled = true;
2485 take_release_locks(hdev);
2487 hdev->reset_info.hard_reset_pending = true;
2489 hl_hwmon_fini(hdev);
2491 cleanup_resources(hdev, true, false, false);
2493 /* Kill processes here after CS rollback. This is because the process
2494 * can't really exit until all its CSs are done, which is what we
2498 "Waiting for all processes to exit (timeout of %u seconds)",
2499 HL_WAIT_PROCESS_KILL_ON_DEVICE_FINI);
2501 hdev->process_kill_trial_cnt = 0;
2502 rc = device_kill_open_processes(hdev, HL_WAIT_PROCESS_KILL_ON_DEVICE_FINI, false);
2504 dev_crit(hdev->dev, "Failed to kill all open processes (%d)\n", rc);
2505 device_disable_open_processes(hdev, false);
2508 hdev->process_kill_trial_cnt = 0;
2509 rc = device_kill_open_processes(hdev, 0, true);
2511 dev_crit(hdev->dev, "Failed to kill all control device open processes (%d)\n", rc);
2512 device_disable_open_processes(hdev, true);
2515 hl_cb_pool_fini(hdev);
2517 /* Reset the H/W. It will be in idle state after this returns */
2518 rc = hdev->asic_funcs->hw_fini(hdev, true, false);
2520 dev_err(hdev->dev, "hw_fini failed in device fini while removing device %d\n", rc);
2522 hdev->fw_loader.fw_comp_loaded = FW_TYPE_NONE;
2524 /* Release kernel context */
2525 if ((hdev->kernel_ctx) && (hl_ctx_put(hdev->kernel_ctx) != 1))
2526 dev_err(hdev->dev, "kernel ctx is still alive\n");
2534 vfree(hdev->captured_err_info.page_fault_info.user_mappings);
2536 hl_eq_fini(hdev, &hdev->event_queue);
2538 kfree(hdev->shadow_cs_queue);
2540 for (i = 0 ; i < hdev->asic_prop.completion_queues_count ; i++)
2541 hl_cq_fini(hdev, &hdev->completion_queue[i]);
2542 kfree(hdev->completion_queue);
2544 user_interrupt_cnt = hdev->asic_prop.user_dec_intr_count +
2545 hdev->asic_prop.user_interrupt_count;
2547 if (user_interrupt_cnt) {
2548 if (hdev->asic_prop.first_available_cq[0] != USHRT_MAX) {
2549 for (i = 0 ; i < user_interrupt_cnt ; i++)
2550 vfree(hdev->user_interrupt[i].ts_free_jobs_data.free_nodes_pool);
2553 kfree(hdev->user_interrupt);
2556 vfree(hdev->common_user_cq_interrupt.ts_free_jobs_data.free_nodes_pool);
2558 hl_hw_queues_destroy(hdev);
2560 /* Call ASIC S/W finalize function */
2561 hdev->asic_funcs->sw_fini(hdev);
2563 device_early_fini(hdev);
2565 /* Hide devices and sysfs/debugfs files from user */
2566 cdev_sysfs_debugfs_remove(hdev);
2567 drm_dev_unregister(&hdev->drm);
2569 hl_debugfs_device_fini(hdev);
2571 pr_info("removed device successfully\n");
2575 * MMIO register access helper functions.
2579 * hl_rreg - Read an MMIO register
2581 * @hdev: pointer to habanalabs device structure
2582 * @reg: MMIO register offset (in bytes)
2584 * Returns the value of the MMIO register we are asked to read
2587 inline u32 hl_rreg(struct hl_device *hdev, u32 reg)
2589 u32 val = readl(hdev->rmmio + reg);
2591 if (unlikely(trace_habanalabs_rreg32_enabled()))
2592 trace_habanalabs_rreg32(hdev->dev, reg, val);
2598 * hl_wreg - Write to an MMIO register
2600 * @hdev: pointer to habanalabs device structure
2601 * @reg: MMIO register offset (in bytes)
2602 * @val: 32-bit value
2604 * Writes the 32-bit value into the MMIO register
2607 inline void hl_wreg(struct hl_device *hdev, u32 reg, u32 val)
2609 if (unlikely(trace_habanalabs_wreg32_enabled()))
2610 trace_habanalabs_wreg32(hdev->dev, reg, val);
2612 writel(val, hdev->rmmio + reg);
2615 void hl_capture_razwi(struct hl_device *hdev, u64 addr, u16 *engine_id, u16 num_of_engines,
2618 struct razwi_info *razwi_info = &hdev->captured_err_info.razwi_info;
2620 if (num_of_engines > HL_RAZWI_MAX_NUM_OF_ENGINES_PER_RTR) {
2622 "Number of possible razwi initiators (%u) exceeded limit (%u)\n",
2623 num_of_engines, HL_RAZWI_MAX_NUM_OF_ENGINES_PER_RTR);
2627 /* In case it's the first razwi since the device was opened, capture its parameters */
2628 if (atomic_cmpxchg(&hdev->captured_err_info.razwi_info.razwi_detected, 0, 1))
2631 razwi_info->razwi.timestamp = ktime_to_ns(ktime_get());
2632 razwi_info->razwi.addr = addr;
2633 razwi_info->razwi.num_of_possible_engines = num_of_engines;
2634 memcpy(&razwi_info->razwi.engine_id[0], &engine_id[0],
2635 num_of_engines * sizeof(u16));
2636 razwi_info->razwi.flags = flags;
2638 razwi_info->razwi_info_available = true;
2641 void hl_handle_razwi(struct hl_device *hdev, u64 addr, u16 *engine_id, u16 num_of_engines,
2642 u8 flags, u64 *event_mask)
2644 hl_capture_razwi(hdev, addr, engine_id, num_of_engines, flags);
2647 *event_mask |= HL_NOTIFIER_EVENT_RAZWI;
2650 static void hl_capture_user_mappings(struct hl_device *hdev, bool is_pmmu)
2652 struct page_fault_info *pgf_info = &hdev->captured_err_info.page_fault_info;
2653 struct hl_vm_phys_pg_pack *phys_pg_pack = NULL;
2654 struct hl_vm_hash_node *hnode;
2655 struct hl_userptr *userptr;
2656 enum vm_type *vm_type;
2661 /* Reset previous session count*/
2662 pgf_info->num_of_user_mappings = 0;
2664 ctx = hl_get_compute_ctx(hdev);
2666 dev_err(hdev->dev, "Can't get user context for user mappings\n");
2670 mutex_lock(&ctx->mem_hash_lock);
2671 hash_for_each(ctx->mem_hash, i, hnode, node) {
2672 vm_type = hnode->ptr;
2673 if (((*vm_type == VM_TYPE_USERPTR) && is_pmmu) ||
2674 ((*vm_type == VM_TYPE_PHYS_PACK) && !is_pmmu))
2675 pgf_info->num_of_user_mappings++;
2679 if (!pgf_info->num_of_user_mappings)
2682 /* In case we already allocated in previous session, need to release it before
2683 * allocating new buffer.
2685 vfree(pgf_info->user_mappings);
2686 pgf_info->user_mappings =
2687 vzalloc(pgf_info->num_of_user_mappings * sizeof(struct hl_user_mapping));
2688 if (!pgf_info->user_mappings) {
2689 pgf_info->num_of_user_mappings = 0;
2693 hash_for_each(ctx->mem_hash, i, hnode, node) {
2694 vm_type = hnode->ptr;
2695 if ((*vm_type == VM_TYPE_USERPTR) && (is_pmmu)) {
2696 userptr = hnode->ptr;
2697 pgf_info->user_mappings[map_idx].dev_va = hnode->vaddr;
2698 pgf_info->user_mappings[map_idx].size = userptr->size;
2700 } else if ((*vm_type == VM_TYPE_PHYS_PACK) && (!is_pmmu)) {
2701 phys_pg_pack = hnode->ptr;
2702 pgf_info->user_mappings[map_idx].dev_va = hnode->vaddr;
2703 pgf_info->user_mappings[map_idx].size = phys_pg_pack->total_size;
2708 mutex_unlock(&ctx->mem_hash_lock);
2712 void hl_capture_page_fault(struct hl_device *hdev, u64 addr, u16 eng_id, bool is_pmmu)
2714 struct page_fault_info *pgf_info = &hdev->captured_err_info.page_fault_info;
2716 /* Capture only the first page fault */
2717 if (atomic_cmpxchg(&pgf_info->page_fault_detected, 0, 1))
2720 pgf_info->page_fault.timestamp = ktime_to_ns(ktime_get());
2721 pgf_info->page_fault.addr = addr;
2722 pgf_info->page_fault.engine_id = eng_id;
2723 hl_capture_user_mappings(hdev, is_pmmu);
2725 pgf_info->page_fault_info_available = true;
2728 void hl_handle_page_fault(struct hl_device *hdev, u64 addr, u16 eng_id, bool is_pmmu,
2731 hl_capture_page_fault(hdev, addr, eng_id, is_pmmu);
2734 *event_mask |= HL_NOTIFIER_EVENT_PAGE_FAULT;
2737 static void hl_capture_hw_err(struct hl_device *hdev, u16 event_id)
2739 struct hw_err_info *info = &hdev->captured_err_info.hw_err;
2741 /* Capture only the first HW err */
2742 if (atomic_cmpxchg(&info->event_detected, 0, 1))
2745 info->event.timestamp = ktime_to_ns(ktime_get());
2746 info->event.event_id = event_id;
2748 info->event_info_available = true;
2751 void hl_handle_critical_hw_err(struct hl_device *hdev, u16 event_id, u64 *event_mask)
2753 hl_capture_hw_err(hdev, event_id);
2756 *event_mask |= HL_NOTIFIER_EVENT_CRITICL_HW_ERR;
2759 static void hl_capture_fw_err(struct hl_device *hdev, struct hl_info_fw_err_info *fw_info)
2761 struct fw_err_info *info = &hdev->captured_err_info.fw_err;
2763 /* Capture only the first FW error */
2764 if (atomic_cmpxchg(&info->event_detected, 0, 1))
2767 info->event.timestamp = ktime_to_ns(ktime_get());
2768 info->event.err_type = fw_info->err_type;
2769 if (fw_info->err_type == HL_INFO_FW_REPORTED_ERR)
2770 info->event.event_id = fw_info->event_id;
2772 info->event_info_available = true;
2775 void hl_handle_fw_err(struct hl_device *hdev, struct hl_info_fw_err_info *info)
2777 hl_capture_fw_err(hdev, info);
2779 if (info->event_mask)
2780 *info->event_mask |= HL_NOTIFIER_EVENT_CRITICL_FW_ERR;
2783 void hl_capture_engine_err(struct hl_device *hdev, u16 engine_id, u16 error_count)
2785 struct engine_err_info *info = &hdev->captured_err_info.engine_err;
2787 /* Capture only the first engine error */
2788 if (atomic_cmpxchg(&info->event_detected, 0, 1))
2791 info->event.timestamp = ktime_to_ns(ktime_get());
2792 info->event.engine_id = engine_id;
2793 info->event.error_count = error_count;
2794 info->event_info_available = true;
2797 void hl_enable_err_info_capture(struct hl_error_info *captured_err_info)
2799 vfree(captured_err_info->page_fault_info.user_mappings);
2800 memset(captured_err_info, 0, sizeof(struct hl_error_info));
2801 atomic_set(&captured_err_info->cs_timeout.write_enable, 1);
2802 captured_err_info->undef_opcode.write_enable = true;