1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * PCI Error Recovery Driver for RPA-compliant PPC64 platform.
4 * Copyright IBM Corp. 2004 2005
5 * Copyright Linas Vepstas <linas@linas.org> 2004, 2005
7 * Send comments and feedback to Linas Vepstas <linas@austin.ibm.com>
9 #include <linux/delay.h>
10 #include <linux/interrupt.h>
11 #include <linux/irq.h>
12 #include <linux/module.h>
13 #include <linux/pci.h>
14 #include <linux/pci_hotplug.h>
16 #include <asm/eeh_event.h>
17 #include <asm/ppc-pci.h>
18 #include <asm/pci-bridge.h>
22 struct list_head removed_vf_list;
23 int removed_dev_count;
26 static int eeh_result_priority(enum pci_ers_result result)
29 case PCI_ERS_RESULT_NONE:
31 case PCI_ERS_RESULT_NO_AER_DRIVER:
33 case PCI_ERS_RESULT_RECOVERED:
35 case PCI_ERS_RESULT_CAN_RECOVER:
37 case PCI_ERS_RESULT_DISCONNECT:
39 case PCI_ERS_RESULT_NEED_RESET:
42 WARN_ONCE(1, "Unknown pci_ers_result value: %d\n", result);
47 static const char *pci_ers_result_name(enum pci_ers_result result)
50 case PCI_ERS_RESULT_NONE:
52 case PCI_ERS_RESULT_CAN_RECOVER:
54 case PCI_ERS_RESULT_NEED_RESET:
56 case PCI_ERS_RESULT_DISCONNECT:
58 case PCI_ERS_RESULT_RECOVERED:
60 case PCI_ERS_RESULT_NO_AER_DRIVER:
61 return "no AER driver";
63 WARN_ONCE(1, "Unknown result type: %d\n", result);
68 static enum pci_ers_result pci_ers_merge_result(enum pci_ers_result old,
69 enum pci_ers_result new)
71 if (eeh_result_priority(new) > eeh_result_priority(old))
76 static bool eeh_dev_removed(struct eeh_dev *edev)
78 return !edev || (edev->mode & EEH_DEV_REMOVED);
81 static bool eeh_edev_actionable(struct eeh_dev *edev)
85 if (edev->pdev->error_state == pci_channel_io_perm_failure)
87 if (eeh_dev_removed(edev))
89 if (eeh_pe_passed(edev->pe))
96 * eeh_pcid_get - Get the PCI device driver
99 * The function is used to retrieve the PCI device driver for
100 * the indicated PCI device. Besides, we will increase the reference
101 * of the PCI device driver to prevent that being unloaded on
102 * the fly. Otherwise, kernel crash would be seen.
104 static inline struct pci_driver *eeh_pcid_get(struct pci_dev *pdev)
106 if (!pdev || !pdev->dev.driver)
109 if (!try_module_get(pdev->dev.driver->owner))
112 return to_pci_driver(pdev->dev.driver);
116 * eeh_pcid_put - Dereference on the PCI device driver
119 * The function is called to do dereference on the PCI device
120 * driver of the indicated PCI device.
122 static inline void eeh_pcid_put(struct pci_dev *pdev)
124 if (!pdev || !pdev->dev.driver)
127 module_put(pdev->dev.driver->owner);
131 * eeh_disable_irq - Disable interrupt for the recovering device
134 * This routine must be called when reporting temporary or permanent
135 * error to the particular PCI device to disable interrupt of that
136 * device. If the device has enabled MSI or MSI-X interrupt, we needn't
137 * do real work because EEH should freeze DMA transfers for those PCI
138 * devices encountering EEH errors, which includes MSI or MSI-X.
140 static void eeh_disable_irq(struct eeh_dev *edev)
142 /* Don't disable MSI and MSI-X interrupts. They are
143 * effectively disabled by the DMA Stopped state
144 * when an EEH error occurs.
146 if (edev->pdev->msi_enabled || edev->pdev->msix_enabled)
149 if (!irq_has_action(edev->pdev->irq))
152 edev->mode |= EEH_DEV_IRQ_DISABLED;
153 disable_irq_nosync(edev->pdev->irq);
157 * eeh_enable_irq - Enable interrupt for the recovering device
160 * This routine must be called to enable interrupt while failed
161 * device could be resumed.
163 static void eeh_enable_irq(struct eeh_dev *edev)
165 if ((edev->mode) & EEH_DEV_IRQ_DISABLED) {
166 edev->mode &= ~EEH_DEV_IRQ_DISABLED;
170 * This is just ass backwards. This maze has
171 * unbalanced irq_enable/disable calls. So instead of
172 * finding the root cause it works around the warning
173 * in the irq_enable code by conditionally calling
176 * That's just wrong.The warning in the core code is
177 * there to tell people to fix their asymmetries in
178 * their own code, not by abusing the core information
181 * I so wish that the assymetry would be the other way
182 * round and a few more irq_disable calls render that
183 * shit unusable forever.
187 if (irqd_irq_disabled(irq_get_irq_data(edev->pdev->irq)))
188 enable_irq(edev->pdev->irq);
192 static void eeh_dev_save_state(struct eeh_dev *edev, void *userdata)
194 struct pci_dev *pdev;
200 * We cannot access the config space on some adapters.
201 * Otherwise, it will cause fenced PHB. We don't save
202 * the content in their config space and will restore
203 * from the initial config space saved when the EEH
206 if (edev->pe && (edev->pe->state & EEH_PE_CFG_RESTRICTED))
209 pdev = eeh_dev_to_pci_dev(edev);
213 pci_save_state(pdev);
216 static void eeh_set_channel_state(struct eeh_pe *root, pci_channel_state_t s)
219 struct eeh_dev *edev, *tmp;
221 eeh_for_each_pe(root, pe)
222 eeh_pe_for_each_dev(pe, edev, tmp)
223 if (eeh_edev_actionable(edev))
224 edev->pdev->error_state = s;
227 static void eeh_set_irq_state(struct eeh_pe *root, bool enable)
230 struct eeh_dev *edev, *tmp;
232 eeh_for_each_pe(root, pe) {
233 eeh_pe_for_each_dev(pe, edev, tmp) {
234 if (!eeh_edev_actionable(edev))
237 if (!eeh_pcid_get(edev->pdev))
241 eeh_enable_irq(edev);
243 eeh_disable_irq(edev);
245 eeh_pcid_put(edev->pdev);
250 typedef enum pci_ers_result (*eeh_report_fn)(struct eeh_dev *,
252 struct pci_driver *);
253 static void eeh_pe_report_edev(struct eeh_dev *edev, eeh_report_fn fn,
254 enum pci_ers_result *result)
256 struct pci_dev *pdev;
257 struct pci_driver *driver;
258 enum pci_ers_result new_result;
260 pci_lock_rescan_remove();
263 get_device(&pdev->dev);
264 pci_unlock_rescan_remove();
266 eeh_edev_info(edev, "no device");
269 device_lock(&pdev->dev);
270 if (eeh_edev_actionable(edev)) {
271 driver = eeh_pcid_get(pdev);
274 eeh_edev_info(edev, "no driver");
275 else if (!driver->err_handler)
276 eeh_edev_info(edev, "driver not EEH aware");
277 else if (edev->mode & EEH_DEV_NO_HANDLER)
278 eeh_edev_info(edev, "driver bound too late");
280 new_result = fn(edev, pdev, driver);
281 eeh_edev_info(edev, "%s driver reports: '%s'",
283 pci_ers_result_name(new_result));
285 *result = pci_ers_merge_result(*result,
291 eeh_edev_info(edev, "not actionable (%d,%d,%d)", !!pdev,
292 !eeh_dev_removed(edev), !eeh_pe_passed(edev->pe));
294 device_unlock(&pdev->dev);
295 if (edev->pdev != pdev)
296 eeh_edev_warn(edev, "Device changed during processing!\n");
297 put_device(&pdev->dev);
300 static void eeh_pe_report(const char *name, struct eeh_pe *root,
301 eeh_report_fn fn, enum pci_ers_result *result)
304 struct eeh_dev *edev, *tmp;
306 pr_info("EEH: Beginning: '%s'\n", name);
307 eeh_for_each_pe(root, pe) eeh_pe_for_each_dev(pe, edev, tmp)
308 eeh_pe_report_edev(edev, fn, result);
310 pr_info("EEH: Finished:'%s' with aggregate recovery state:'%s'\n",
311 name, pci_ers_result_name(*result));
313 pr_info("EEH: Finished:'%s'", name);
317 * eeh_report_error - Report pci error to each device driver
319 * @driver: device's PCI driver
321 * Report an EEH error to each device driver.
323 static enum pci_ers_result eeh_report_error(struct eeh_dev *edev,
324 struct pci_dev *pdev,
325 struct pci_driver *driver)
327 enum pci_ers_result rc;
329 if (!driver->err_handler->error_detected)
330 return PCI_ERS_RESULT_NONE;
332 eeh_edev_info(edev, "Invoking %s->error_detected(IO frozen)",
334 rc = driver->err_handler->error_detected(pdev, pci_channel_io_frozen);
336 edev->in_error = true;
337 pci_uevent_ers(pdev, PCI_ERS_RESULT_NONE);
342 * eeh_report_mmio_enabled - Tell drivers that MMIO has been enabled
344 * @driver: device's PCI driver
346 * Tells each device driver that IO ports, MMIO and config space I/O
349 static enum pci_ers_result eeh_report_mmio_enabled(struct eeh_dev *edev,
350 struct pci_dev *pdev,
351 struct pci_driver *driver)
353 if (!driver->err_handler->mmio_enabled)
354 return PCI_ERS_RESULT_NONE;
355 eeh_edev_info(edev, "Invoking %s->mmio_enabled()", driver->name);
356 return driver->err_handler->mmio_enabled(pdev);
360 * eeh_report_reset - Tell device that slot has been reset
362 * @driver: device's PCI driver
364 * This routine must be called while EEH tries to reset particular
365 * PCI device so that the associated PCI device driver could take
366 * some actions, usually to save data the driver needs so that the
367 * driver can work again while the device is recovered.
369 static enum pci_ers_result eeh_report_reset(struct eeh_dev *edev,
370 struct pci_dev *pdev,
371 struct pci_driver *driver)
373 if (!driver->err_handler->slot_reset || !edev->in_error)
374 return PCI_ERS_RESULT_NONE;
375 eeh_edev_info(edev, "Invoking %s->slot_reset()", driver->name);
376 return driver->err_handler->slot_reset(pdev);
379 static void eeh_dev_restore_state(struct eeh_dev *edev, void *userdata)
381 struct pci_dev *pdev;
387 * The content in the config space isn't saved because
388 * the blocked config space on some adapters. We have
389 * to restore the initial saved config space when the
390 * EEH device is created.
392 if (edev->pe && (edev->pe->state & EEH_PE_CFG_RESTRICTED)) {
393 if (list_is_last(&edev->entry, &edev->pe->edevs))
394 eeh_pe_restore_bars(edev->pe);
399 pdev = eeh_dev_to_pci_dev(edev);
403 pci_restore_state(pdev);
407 * eeh_report_resume - Tell device to resume normal operations
409 * @driver: device's PCI driver
411 * This routine must be called to notify the device driver that it
412 * could resume so that the device driver can do some initialization
413 * to make the recovered device work again.
415 static enum pci_ers_result eeh_report_resume(struct eeh_dev *edev,
416 struct pci_dev *pdev,
417 struct pci_driver *driver)
419 if (!driver->err_handler->resume || !edev->in_error)
420 return PCI_ERS_RESULT_NONE;
422 eeh_edev_info(edev, "Invoking %s->resume()", driver->name);
423 driver->err_handler->resume(pdev);
425 pci_uevent_ers(edev->pdev, PCI_ERS_RESULT_RECOVERED);
426 #ifdef CONFIG_PCI_IOV
427 if (eeh_ops->notify_resume)
428 eeh_ops->notify_resume(edev);
430 return PCI_ERS_RESULT_NONE;
434 * eeh_report_failure - Tell device driver that device is dead.
436 * @driver: device's PCI driver
438 * This informs the device driver that the device is permanently
439 * dead, and that no further recovery attempts will be made on it.
441 static enum pci_ers_result eeh_report_failure(struct eeh_dev *edev,
442 struct pci_dev *pdev,
443 struct pci_driver *driver)
445 enum pci_ers_result rc;
447 if (!driver->err_handler->error_detected)
448 return PCI_ERS_RESULT_NONE;
450 eeh_edev_info(edev, "Invoking %s->error_detected(permanent failure)",
452 rc = driver->err_handler->error_detected(pdev,
453 pci_channel_io_perm_failure);
455 pci_uevent_ers(pdev, PCI_ERS_RESULT_DISCONNECT);
459 static void *eeh_add_virt_device(struct eeh_dev *edev)
461 struct pci_driver *driver;
462 struct pci_dev *dev = eeh_dev_to_pci_dev(edev);
464 if (!(edev->physfn)) {
465 eeh_edev_warn(edev, "Not for VF\n");
469 driver = eeh_pcid_get(dev);
471 if (driver->err_handler) {
478 #ifdef CONFIG_PCI_IOV
479 pci_iov_add_virtfn(edev->physfn, edev->vf_index);
484 static void eeh_rmv_device(struct eeh_dev *edev, void *userdata)
486 struct pci_driver *driver;
487 struct pci_dev *dev = eeh_dev_to_pci_dev(edev);
488 struct eeh_rmv_data *rmv_data = (struct eeh_rmv_data *)userdata;
491 * Actually, we should remove the PCI bridges as well.
492 * However, that's lots of complexity to do that,
493 * particularly some of devices under the bridge might
494 * support EEH. So we just care about PCI devices for
497 if (!eeh_edev_actionable(edev) ||
498 (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE))
502 driver = eeh_pcid_get(dev);
504 if (driver->err_handler &&
505 driver->err_handler->error_detected &&
506 driver->err_handler->slot_reset) {
514 /* Remove it from PCI subsystem */
515 pr_info("EEH: Removing %s without EEH sensitive driver\n",
517 edev->mode |= EEH_DEV_DISCONNECTED;
519 rmv_data->removed_dev_count++;
522 #ifdef CONFIG_PCI_IOV
523 pci_iov_remove_virtfn(edev->physfn, edev->vf_index);
527 list_add(&edev->rmv_entry, &rmv_data->removed_vf_list);
529 pci_lock_rescan_remove();
530 pci_stop_and_remove_bus_device(dev);
531 pci_unlock_rescan_remove();
535 static void *eeh_pe_detach_dev(struct eeh_pe *pe, void *userdata)
537 struct eeh_dev *edev, *tmp;
539 eeh_pe_for_each_dev(pe, edev, tmp) {
540 if (!(edev->mode & EEH_DEV_DISCONNECTED))
543 edev->mode &= ~(EEH_DEV_DISCONNECTED | EEH_DEV_IRQ_DISABLED);
544 eeh_pe_tree_remove(edev);
551 * Explicitly clear PE's frozen state for PowerNV where
552 * we have frozen PE until BAR restore is completed. It's
553 * harmless to clear it for pSeries. To be consistent with
554 * PE reset (for 3 times), we try to clear the frozen state
555 * for 3 times as well.
557 static int eeh_clear_pe_frozen_state(struct eeh_pe *root, bool include_passed)
562 eeh_for_each_pe(root, pe) {
563 if (include_passed || !eeh_pe_passed(pe)) {
564 for (i = 0; i < 3; i++)
565 if (!eeh_unfreeze_pe(pe))
571 eeh_pe_state_clear(root, EEH_PE_ISOLATED, include_passed);
575 int eeh_pe_reset_and_recover(struct eeh_pe *pe)
579 /* Bail if the PE is being recovered */
580 if (pe->state & EEH_PE_RECOVERING)
583 /* Put the PE into recovery mode */
584 eeh_pe_state_mark(pe, EEH_PE_RECOVERING);
587 eeh_pe_dev_traverse(pe, eeh_dev_save_state, NULL);
590 ret = eeh_pe_reset_full(pe, true);
592 eeh_pe_state_clear(pe, EEH_PE_RECOVERING, true);
596 /* Unfreeze the PE */
597 ret = eeh_clear_pe_frozen_state(pe, true);
599 eeh_pe_state_clear(pe, EEH_PE_RECOVERING, true);
603 /* Restore device state */
604 eeh_pe_dev_traverse(pe, eeh_dev_restore_state, NULL);
606 /* Clear recovery mode */
607 eeh_pe_state_clear(pe, EEH_PE_RECOVERING, true);
613 * eeh_reset_device - Perform actual reset of a pci slot
614 * @driver_eeh_aware: Does the device's driver provide EEH support?
616 * @bus: PCI bus corresponding to the isolcated slot
617 * @rmv_data: Optional, list to record removed devices
619 * This routine must be called to do reset on the indicated PE.
620 * During the reset, udev might be invoked because those affected
621 * PCI devices will be removed and then added.
623 static int eeh_reset_device(struct eeh_pe *pe, struct pci_bus *bus,
624 struct eeh_rmv_data *rmv_data,
625 bool driver_eeh_aware)
629 struct eeh_dev *edev;
630 struct eeh_pe *tmp_pe;
631 bool any_passed = false;
633 eeh_for_each_pe(pe, tmp_pe)
634 any_passed |= eeh_pe_passed(tmp_pe);
636 /* pcibios will clear the counter; save the value */
637 cnt = pe->freeze_count;
641 * We don't remove the corresponding PE instances because
642 * we need the information afterwords. The attached EEH
643 * devices are expected to be attached soon when calling
644 * into pci_hp_add_devices().
646 eeh_pe_state_mark(pe, EEH_PE_KEEP);
647 if (any_passed || driver_eeh_aware || (pe->type & EEH_PE_VF)) {
648 eeh_pe_dev_traverse(pe, eeh_rmv_device, rmv_data);
650 pci_lock_rescan_remove();
651 pci_hp_remove_devices(bus);
652 pci_unlock_rescan_remove();
656 * Reset the pci controller. (Asserts RST#; resets config space).
657 * Reconfigure bridges and devices. Don't try to bring the system
658 * up if the reset failed for some reason.
660 * During the reset, it's very dangerous to have uncontrolled PCI
661 * config accesses. So we prefer to block them. However, controlled
662 * PCI config accesses initiated from EEH itself are allowed.
664 rc = eeh_pe_reset_full(pe, false);
668 pci_lock_rescan_remove();
671 eeh_ops->configure_bridge(pe);
672 eeh_pe_restore_bars(pe);
674 /* Clear frozen state */
675 rc = eeh_clear_pe_frozen_state(pe, false);
677 pci_unlock_rescan_remove();
681 /* Give the system 5 seconds to finish running the user-space
682 * hotplug shutdown scripts, e.g. ifdown for ethernet. Yes,
683 * this is a hack, but if we don't do this, and try to bring
684 * the device up before the scripts have taken it down,
685 * potentially weird things happen.
687 if (!driver_eeh_aware || rmv_data->removed_dev_count) {
688 pr_info("EEH: Sleep 5s ahead of %s hotplug\n",
689 (driver_eeh_aware ? "partial" : "complete"));
693 * The EEH device is still connected with its parent
694 * PE. We should disconnect it so the binding can be
695 * rebuilt when adding PCI devices.
697 edev = list_first_entry(&pe->edevs, struct eeh_dev, entry);
698 eeh_pe_traverse(pe, eeh_pe_detach_dev, NULL);
699 if (pe->type & EEH_PE_VF) {
700 eeh_add_virt_device(edev);
702 if (!driver_eeh_aware)
703 eeh_pe_state_clear(pe, EEH_PE_PRI_BUS, true);
704 pci_hp_add_devices(bus);
707 eeh_pe_state_clear(pe, EEH_PE_KEEP, true);
710 pe->freeze_count = cnt;
712 pci_unlock_rescan_remove();
716 /* The longest amount of time to wait for a pci device
717 * to come back on line, in seconds.
719 #define MAX_WAIT_FOR_RECOVERY 300
722 /* Walks the PE tree after processing an event to remove any stale PEs.
724 * NB: This needs to be recursive to ensure the leaf PEs get removed
725 * before their parents do. Although this is possible to do recursively
726 * we don't since this is easier to read and we need to garantee
727 * the leaf nodes will be handled first.
729 static void eeh_pe_cleanup(struct eeh_pe *pe)
731 struct eeh_pe *child_pe, *tmp;
733 list_for_each_entry_safe(child_pe, tmp, &pe->child_list, child)
734 eeh_pe_cleanup(child_pe);
736 if (pe->state & EEH_PE_KEEP)
739 if (!(pe->state & EEH_PE_INVALID))
742 if (list_empty(&pe->edevs) && list_empty(&pe->child_list)) {
743 list_del(&pe->child);
749 * eeh_check_slot_presence - Check if a device is still present in a slot
750 * @pdev: pci_dev to check
752 * This function may return a false positive if we can't determine the slot's
753 * presence state. This might happen for PCIe slots if the PE containing
754 * the upstream bridge is also frozen, or the bridge is part of the same PE
757 * This shouldn't happen often, but you might see it if you hotplug a PCIe
760 static bool eeh_slot_presence_check(struct pci_dev *pdev)
762 const struct hotplug_slot_ops *ops;
763 struct pci_slot *slot;
770 if (pdev->error_state == pci_channel_io_perm_failure)
774 if (!slot || !slot->hotplug)
777 ops = slot->hotplug->ops;
778 if (!ops || !ops->get_adapter_status)
781 /* set the attention indicator while we've got the slot ops */
782 if (ops->set_attention_status)
783 ops->set_attention_status(slot->hotplug, 1);
785 rc = ops->get_adapter_status(slot->hotplug, &state);
792 static void eeh_clear_slot_attention(struct pci_dev *pdev)
794 const struct hotplug_slot_ops *ops;
795 struct pci_slot *slot;
800 if (pdev->error_state == pci_channel_io_perm_failure)
804 if (!slot || !slot->hotplug)
807 ops = slot->hotplug->ops;
808 if (!ops || !ops->set_attention_status)
811 ops->set_attention_status(slot->hotplug, 0);
815 * eeh_handle_normal_event - Handle EEH events on a specific PE
816 * @pe: EEH PE - which should not be used after we return, as it may
817 * have been invalidated.
819 * Attempts to recover the given PE. If recovery fails or the PE has failed
820 * too many times, remove the PE.
822 * While PHB detects address or data parity errors on particular PCI
823 * slot, the associated PE will be frozen. Besides, DMA's occurring
824 * to wild addresses (which usually happen due to bugs in device
825 * drivers or in PCI adapter firmware) can cause EEH error. #SERR,
826 * #PERR or other misc PCI-related errors also can trigger EEH errors.
828 * Recovery process consists of unplugging the device driver (which
829 * generated hotplug events to userspace), then issuing a PCI #RST to
830 * the device, then reconfiguring the PCI config space for all bridges
831 * & devices under this slot, and then finally restarting the device
832 * drivers (which cause a second set of hotplug events to go out to
835 void eeh_handle_normal_event(struct eeh_pe *pe)
838 struct eeh_dev *edev, *tmp;
839 struct eeh_pe *tmp_pe;
841 enum pci_ers_result result = PCI_ERS_RESULT_NONE;
842 struct eeh_rmv_data rmv_data =
843 {LIST_HEAD_INIT(rmv_data.removed_vf_list), 0};
846 bus = eeh_pe_bus_get(pe);
848 pr_err("%s: Cannot find PCI bus for PHB#%x-PE#%x\n",
849 __func__, pe->phb->global_number, pe->addr);
854 * When devices are hot-removed we might get an EEH due to
855 * a driver attempting to touch the MMIO space of a removed
856 * device. In this case we don't have a device to recover
857 * so suppress the event if we can't find any present devices.
859 * The hotplug driver should take care of tearing down the
862 eeh_for_each_pe(pe, tmp_pe)
863 eeh_pe_for_each_dev(tmp_pe, edev, tmp)
864 if (eeh_slot_presence_check(edev->pdev))
868 pr_debug("EEH: Frozen PHB#%x-PE#%x is empty!\n",
869 pe->phb->global_number, pe->addr);
870 goto out; /* nothing to recover */
874 if (pe->type & EEH_PE_PHB) {
875 pr_err("EEH: Recovering PHB#%x, location: %s\n",
876 pe->phb->global_number, eeh_pe_loc_get(pe));
878 struct eeh_pe *phb_pe = eeh_phb_pe_get(pe->phb);
880 pr_err("EEH: Recovering PHB#%x-PE#%x\n",
881 pe->phb->global_number, pe->addr);
882 pr_err("EEH: PE location: %s, PHB location: %s\n",
883 eeh_pe_loc_get(pe), eeh_pe_loc_get(phb_pe));
886 #ifdef CONFIG_STACKTRACE
888 * Print the saved stack trace now that we've verified there's
889 * something to recover.
891 if (pe->trace_entries) {
892 void **ptrs = (void **) pe->stack_trace;
895 pr_err("EEH: Frozen PHB#%x-PE#%x detected\n",
896 pe->phb->global_number, pe->addr);
898 /* FIXME: Use the same format as dump_stack() */
899 pr_err("EEH: Call Trace:\n");
900 for (i = 0; i < pe->trace_entries; i++)
901 pr_err("EEH: [%pK] %pS\n", ptrs[i], ptrs[i]);
903 pe->trace_entries = 0;
905 #endif /* CONFIG_STACKTRACE */
907 eeh_for_each_pe(pe, tmp_pe)
908 eeh_pe_for_each_dev(tmp_pe, edev, tmp)
909 edev->mode &= ~EEH_DEV_NO_HANDLER;
911 eeh_pe_update_time_stamp(pe);
913 if (pe->freeze_count > eeh_max_freezes) {
914 pr_err("EEH: PHB#%x-PE#%x has failed %d times in the last hour and has been permanently disabled.\n",
915 pe->phb->global_number, pe->addr,
921 /* Walk the various device drivers attached to this slot through
922 * a reset sequence, giving each an opportunity to do what it needs
923 * to accomplish the reset. Each child gets a report of the
924 * status ... if any child can't handle the reset, then the entire
925 * slot is dlpar removed and added.
927 * When the PHB is fenced, we have to issue a reset to recover from
928 * the error. Override the result if necessary to have partially
929 * hotplug for this case.
931 pr_warn("EEH: This PCI device has failed %d times in the last hour and will be permanently disabled after %d failures.\n",
932 pe->freeze_count, eeh_max_freezes);
933 pr_info("EEH: Notify device drivers to shutdown\n");
934 eeh_set_channel_state(pe, pci_channel_io_frozen);
935 eeh_set_irq_state(pe, false);
936 eeh_pe_report("error_detected(IO frozen)", pe,
937 eeh_report_error, &result);
938 if (result == PCI_ERS_RESULT_DISCONNECT)
942 * Error logged on a PHB are always fences which need a full
943 * PHB reset to clear so force that to happen.
945 if ((pe->type & EEH_PE_PHB) && result != PCI_ERS_RESULT_NONE)
946 result = PCI_ERS_RESULT_NEED_RESET;
948 /* Get the current PCI slot state. This can take a long time,
949 * sometimes over 300 seconds for certain systems.
951 rc = eeh_wait_state(pe, MAX_WAIT_FOR_RECOVERY * 1000);
952 if (rc < 0 || rc == EEH_STATE_NOT_SUPPORT) {
953 pr_warn("EEH: Permanent failure\n");
957 /* Since rtas may enable MMIO when posting the error log,
958 * don't post the error log until after all dev drivers
959 * have been informed.
961 pr_info("EEH: Collect temporary log\n");
962 eeh_slot_error_detail(pe, EEH_LOG_TEMP);
964 /* If all device drivers were EEH-unaware, then shut
965 * down all of the device drivers, and hope they
966 * go down willingly, without panicing the system.
968 if (result == PCI_ERS_RESULT_NONE) {
969 pr_info("EEH: Reset with hotplug activity\n");
970 rc = eeh_reset_device(pe, bus, NULL, false);
972 pr_warn("%s: Unable to reset, err=%d\n", __func__, rc);
977 /* If all devices reported they can proceed, then re-enable MMIO */
978 if (result == PCI_ERS_RESULT_CAN_RECOVER) {
979 pr_info("EEH: Enable I/O for affected devices\n");
980 rc = eeh_pci_enable(pe, EEH_OPT_THAW_MMIO);
985 result = PCI_ERS_RESULT_NEED_RESET;
987 pr_info("EEH: Notify device drivers to resume I/O\n");
988 eeh_pe_report("mmio_enabled", pe,
989 eeh_report_mmio_enabled, &result);
992 if (result == PCI_ERS_RESULT_CAN_RECOVER) {
993 pr_info("EEH: Enabled DMA for affected devices\n");
994 rc = eeh_pci_enable(pe, EEH_OPT_THAW_DMA);
999 result = PCI_ERS_RESULT_NEED_RESET;
1002 * We didn't do PE reset for the case. The PE
1003 * is still in frozen state. Clear it before
1006 eeh_pe_state_clear(pe, EEH_PE_ISOLATED, true);
1007 result = PCI_ERS_RESULT_RECOVERED;
1011 /* If any device called out for a reset, then reset the slot */
1012 if (result == PCI_ERS_RESULT_NEED_RESET) {
1013 pr_info("EEH: Reset without hotplug activity\n");
1014 rc = eeh_reset_device(pe, bus, &rmv_data, true);
1016 pr_warn("%s: Cannot reset, err=%d\n", __func__, rc);
1017 goto recover_failed;
1020 result = PCI_ERS_RESULT_NONE;
1021 eeh_set_channel_state(pe, pci_channel_io_normal);
1022 eeh_set_irq_state(pe, true);
1023 eeh_pe_report("slot_reset", pe, eeh_report_reset,
1027 if ((result == PCI_ERS_RESULT_RECOVERED) ||
1028 (result == PCI_ERS_RESULT_NONE)) {
1030 * For those hot removed VFs, we should add back them after PF
1031 * get recovered properly.
1033 list_for_each_entry_safe(edev, tmp, &rmv_data.removed_vf_list,
1035 eeh_add_virt_device(edev);
1036 list_del(&edev->rmv_entry);
1039 /* Tell all device drivers that they can resume operations */
1040 pr_info("EEH: Notify device driver to resume\n");
1041 eeh_set_channel_state(pe, pci_channel_io_normal);
1042 eeh_set_irq_state(pe, true);
1043 eeh_pe_report("resume", pe, eeh_report_resume, NULL);
1044 eeh_for_each_pe(pe, tmp_pe) {
1045 eeh_pe_for_each_dev(tmp_pe, edev, tmp) {
1046 edev->mode &= ~EEH_DEV_NO_HANDLER;
1047 edev->in_error = false;
1051 pr_info("EEH: Recovery successful.\n");
1057 * About 90% of all real-life EEH failures in the field
1058 * are due to poorly seated PCI cards. Only 10% or so are
1059 * due to actual, failed cards.
1061 pr_err("EEH: Unable to recover from failure from PHB#%x-PE#%x.\n"
1062 "Please try reseating or replacing it\n",
1063 pe->phb->global_number, pe->addr);
1065 eeh_slot_error_detail(pe, EEH_LOG_PERM);
1067 /* Notify all devices that they're about to go down. */
1068 eeh_set_irq_state(pe, false);
1069 eeh_pe_report("error_detected(permanent failure)", pe,
1070 eeh_report_failure, NULL);
1071 eeh_set_channel_state(pe, pci_channel_io_perm_failure);
1073 /* Mark the PE to be removed permanently */
1074 eeh_pe_state_mark(pe, EEH_PE_REMOVED);
1077 * Shut down the device drivers for good. We mark
1078 * all removed devices correctly to avoid access
1079 * the their PCI config any more.
1081 if (pe->type & EEH_PE_VF) {
1082 eeh_pe_dev_traverse(pe, eeh_rmv_device, NULL);
1083 eeh_pe_dev_mode_mark(pe, EEH_DEV_REMOVED);
1085 eeh_pe_state_clear(pe, EEH_PE_PRI_BUS, true);
1086 eeh_pe_dev_mode_mark(pe, EEH_DEV_REMOVED);
1088 pci_lock_rescan_remove();
1089 pci_hp_remove_devices(bus);
1090 pci_unlock_rescan_remove();
1091 /* The passed PE should no longer be used */
1097 * Clean up any PEs without devices. While marked as EEH_PE_RECOVERYING
1098 * we don't want to modify the PE tree structure so we do it here.
1102 /* clear the slot attention LED for all recovered devices */
1103 eeh_for_each_pe(pe, tmp_pe)
1104 eeh_pe_for_each_dev(tmp_pe, edev, tmp)
1105 eeh_clear_slot_attention(edev->pdev);
1107 eeh_pe_state_clear(pe, EEH_PE_RECOVERING, true);
1111 * eeh_handle_special_event - Handle EEH events without a specific failing PE
1113 * Called when an EEH event is detected but can't be narrowed down to a
1114 * specific PE. Iterates through possible failures and handles them as
1117 void eeh_handle_special_event(void)
1119 struct eeh_pe *pe, *phb_pe, *tmp_pe;
1120 struct eeh_dev *edev, *tmp_edev;
1121 struct pci_bus *bus;
1122 struct pci_controller *hose;
1123 unsigned long flags;
1128 rc = eeh_ops->next_error(&pe);
1131 case EEH_NEXT_ERR_DEAD_IOC:
1132 /* Mark all PHBs in dead state */
1133 eeh_serialize_lock(&flags);
1135 /* Purge all events */
1136 eeh_remove_event(NULL, true);
1138 list_for_each_entry(hose, &hose_list, list_node) {
1139 phb_pe = eeh_phb_pe_get(hose);
1140 if (!phb_pe) continue;
1142 eeh_pe_mark_isolated(phb_pe);
1145 eeh_serialize_unlock(flags);
1148 case EEH_NEXT_ERR_FROZEN_PE:
1149 case EEH_NEXT_ERR_FENCED_PHB:
1150 case EEH_NEXT_ERR_DEAD_PHB:
1151 /* Mark the PE in fenced state */
1152 eeh_serialize_lock(&flags);
1154 /* Purge all events of the PHB */
1155 eeh_remove_event(pe, true);
1157 if (rc != EEH_NEXT_ERR_DEAD_PHB)
1158 eeh_pe_state_mark(pe, EEH_PE_RECOVERING);
1159 eeh_pe_mark_isolated(pe);
1161 eeh_serialize_unlock(flags);
1164 case EEH_NEXT_ERR_NONE:
1167 pr_warn("%s: Invalid value %d from next_error()\n",
1173 * For fenced PHB and frozen PE, it's handled as normal
1174 * event. We have to remove the affected PHBs for dead
1177 if (rc == EEH_NEXT_ERR_FROZEN_PE ||
1178 rc == EEH_NEXT_ERR_FENCED_PHB) {
1179 eeh_pe_state_mark(pe, EEH_PE_RECOVERING);
1180 eeh_handle_normal_event(pe);
1182 eeh_for_each_pe(pe, tmp_pe)
1183 eeh_pe_for_each_dev(tmp_pe, edev, tmp_edev)
1184 edev->mode &= ~EEH_DEV_NO_HANDLER;
1186 /* Notify all devices to be down */
1187 eeh_pe_state_clear(pe, EEH_PE_PRI_BUS, true);
1189 "error_detected(permanent failure)", pe,
1190 eeh_report_failure, NULL);
1191 eeh_set_channel_state(pe, pci_channel_io_perm_failure);
1193 pci_lock_rescan_remove();
1194 list_for_each_entry(hose, &hose_list, list_node) {
1195 phb_pe = eeh_phb_pe_get(hose);
1197 !(phb_pe->state & EEH_PE_ISOLATED) ||
1198 (phb_pe->state & EEH_PE_RECOVERING))
1201 bus = eeh_pe_bus_get(phb_pe);
1203 pr_err("%s: Cannot find PCI bus for "
1206 pe->phb->global_number,
1210 pci_hp_remove_devices(bus);
1212 pci_unlock_rescan_remove();
1216 * If we have detected dead IOC, we needn't proceed
1217 * any more since all PHBs would have been removed
1219 if (rc == EEH_NEXT_ERR_DEAD_IOC)
1221 } while (rc != EEH_NEXT_ERR_NONE);