2 * The file intends to implement the platform dependent EEH operations on
3 * powernv platform. Actually, the powernv was created in order to fully
6 * Copyright Benjamin Herrenschmidt & Gavin Shan, IBM Corporation 2013.
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
14 #include <linux/atomic.h>
15 #include <linux/debugfs.h>
16 #include <linux/delay.h>
17 #include <linux/export.h>
18 #include <linux/init.h>
19 #include <linux/interrupt.h>
20 #include <linux/list.h>
21 #include <linux/msi.h>
23 #include <linux/pci.h>
24 #include <linux/proc_fs.h>
25 #include <linux/rbtree.h>
26 #include <linux/sched.h>
27 #include <linux/seq_file.h>
28 #include <linux/spinlock.h>
31 #include <asm/eeh_event.h>
32 #include <asm/firmware.h>
34 #include <asm/iommu.h>
35 #include <asm/machdep.h>
36 #include <asm/msi_bitmap.h>
38 #include <asm/ppc-pci.h>
39 #include <asm/pnv-pci.h>
44 static bool pnv_eeh_nb_init = false;
45 static int eeh_event_irq = -EINVAL;
47 static int pnv_eeh_init(void)
49 struct pci_controller *hose;
51 int max_diag_size = PNV_PCI_DIAG_BUF_SIZE;
53 if (!firmware_has_feature(FW_FEATURE_OPAL)) {
54 pr_warn("%s: OPAL is required !\n",
60 eeh_add_flag(EEH_PROBE_MODE_DEV);
63 * P7IOC blocks PCI config access to frozen PE, but PHB3
64 * doesn't do that. So we have to selectively enable I/O
65 * prior to collecting error log.
67 list_for_each_entry(hose, &hose_list, list_node) {
68 phb = hose->private_data;
70 if (phb->model == PNV_PHB_MODEL_P7IOC)
71 eeh_add_flag(EEH_ENABLE_IO_FOR_LOG);
73 if (phb->diag_data_size > max_diag_size)
74 max_diag_size = phb->diag_data_size;
77 * PE#0 should be regarded as valid by EEH core
78 * if it's not the reserved one. Currently, we
79 * have the reserved PE#255 and PE#127 for PHB3
80 * and P7IOC separately. So we should regard
81 * PE#0 as valid for PHB3 and P7IOC.
83 if (phb->ioda.reserved_pe_idx != 0)
84 eeh_add_flag(EEH_VALID_PE_ZERO);
89 eeh_set_pe_aux_size(max_diag_size);
94 static irqreturn_t pnv_eeh_event(int irq, void *data)
97 * We simply send a special EEH event if EEH has been
98 * enabled. We don't care about EEH events until we've
99 * finished processing the outstanding ones. Event processing
100 * gets unmasked in next_error() if EEH is enabled.
102 disable_irq_nosync(irq);
105 eeh_send_failure_event(NULL);
110 #ifdef CONFIG_DEBUG_FS
111 static ssize_t pnv_eeh_ei_write(struct file *filp,
112 const char __user *user_buf,
113 size_t count, loff_t *ppos)
115 struct pci_controller *hose = filp->private_data;
117 int pe_no, type, func;
118 unsigned long addr, mask;
122 if (!eeh_ops || !eeh_ops->err_inject)
125 /* Copy over argument buffer */
126 ret = simple_write_to_buffer(buf, sizeof(buf), ppos, user_buf, count);
130 /* Retrieve parameters */
131 ret = sscanf(buf, "%x:%x:%x:%lx:%lx",
132 &pe_no, &type, &func, &addr, &mask);
137 pe = eeh_pe_get(hose, pe_no, 0);
141 /* Do error injection */
142 ret = eeh_ops->err_inject(pe, type, func, addr, mask);
143 return ret < 0 ? ret : count;
146 static const struct file_operations pnv_eeh_ei_fops = {
149 .write = pnv_eeh_ei_write,
152 static int pnv_eeh_dbgfs_set(void *data, int offset, u64 val)
154 struct pci_controller *hose = data;
155 struct pnv_phb *phb = hose->private_data;
157 out_be64(phb->regs + offset, val);
161 static int pnv_eeh_dbgfs_get(void *data, int offset, u64 *val)
163 struct pci_controller *hose = data;
164 struct pnv_phb *phb = hose->private_data;
166 *val = in_be64(phb->regs + offset);
170 #define PNV_EEH_DBGFS_ENTRY(name, reg) \
171 static int pnv_eeh_dbgfs_set_##name(void *data, u64 val) \
173 return pnv_eeh_dbgfs_set(data, reg, val); \
176 static int pnv_eeh_dbgfs_get_##name(void *data, u64 *val) \
178 return pnv_eeh_dbgfs_get(data, reg, val); \
181 DEFINE_SIMPLE_ATTRIBUTE(pnv_eeh_dbgfs_ops_##name, \
182 pnv_eeh_dbgfs_get_##name, \
183 pnv_eeh_dbgfs_set_##name, \
186 PNV_EEH_DBGFS_ENTRY(outb, 0xD10);
187 PNV_EEH_DBGFS_ENTRY(inbA, 0xD90);
188 PNV_EEH_DBGFS_ENTRY(inbB, 0xE10);
190 #endif /* CONFIG_DEBUG_FS */
193 * pnv_eeh_post_init - EEH platform dependent post initialization
195 * EEH platform dependent post initialization on powernv. When
196 * the function is called, the EEH PEs and devices should have
197 * been built. If the I/O cache staff has been built, EEH is
198 * ready to supply service.
200 static int pnv_eeh_post_init(void)
202 struct pci_controller *hose;
206 /* Register OPAL event notifier */
207 if (!pnv_eeh_nb_init) {
208 eeh_event_irq = opal_event_request(ilog2(OPAL_EVENT_PCI_ERROR));
209 if (eeh_event_irq < 0) {
210 pr_err("%s: Can't register OPAL event interrupt (%d)\n",
211 __func__, eeh_event_irq);
212 return eeh_event_irq;
215 ret = request_irq(eeh_event_irq, pnv_eeh_event,
216 IRQ_TYPE_LEVEL_HIGH, "opal-eeh", NULL);
218 irq_dispose_mapping(eeh_event_irq);
219 pr_err("%s: Can't request OPAL event interrupt (%d)\n",
220 __func__, eeh_event_irq);
224 pnv_eeh_nb_init = true;
228 disable_irq(eeh_event_irq);
230 list_for_each_entry(hose, &hose_list, list_node) {
231 phb = hose->private_data;
234 * If EEH is enabled, we're going to rely on that.
235 * Otherwise, we restore to conventional mechanism
236 * to clear frozen PE during PCI config access.
239 phb->flags |= PNV_PHB_FLAG_EEH;
241 phb->flags &= ~PNV_PHB_FLAG_EEH;
243 /* Create debugfs entries */
244 #ifdef CONFIG_DEBUG_FS
245 if (phb->has_dbgfs || !phb->dbgfs)
249 debugfs_create_file("err_injct", 0200,
253 debugfs_create_file("err_injct_outbound", 0600,
255 &pnv_eeh_dbgfs_ops_outb);
256 debugfs_create_file("err_injct_inboundA", 0600,
258 &pnv_eeh_dbgfs_ops_inbA);
259 debugfs_create_file("err_injct_inboundB", 0600,
261 &pnv_eeh_dbgfs_ops_inbB);
262 #endif /* CONFIG_DEBUG_FS */
268 static int pnv_eeh_find_cap(struct pci_dn *pdn, int cap)
270 int pos = PCI_CAPABILITY_LIST;
271 int cnt = 48; /* Maximal number of capabilities */
277 /* Check if the device supports capabilities */
278 pnv_pci_cfg_read(pdn, PCI_STATUS, 2, &status);
279 if (!(status & PCI_STATUS_CAP_LIST))
283 pnv_pci_cfg_read(pdn, pos, 1, &pos);
288 pnv_pci_cfg_read(pdn, pos + PCI_CAP_LIST_ID, 1, &id);
297 pos += PCI_CAP_LIST_NEXT;
303 static int pnv_eeh_find_ecap(struct pci_dn *pdn, int cap)
305 struct eeh_dev *edev = pdn_to_eeh_dev(pdn);
307 int pos = 256, ttl = (4096 - 256) / 8;
309 if (!edev || !edev->pcie_cap)
311 if (pnv_pci_cfg_read(pdn, pos, 4, &header) != PCIBIOS_SUCCESSFUL)
317 if (PCI_EXT_CAP_ID(header) == cap && pos)
320 pos = PCI_EXT_CAP_NEXT(header);
324 if (pnv_pci_cfg_read(pdn, pos, 4, &header) != PCIBIOS_SUCCESSFUL)
332 * pnv_eeh_probe - Do probe on PCI device
333 * @pdn: PCI device node
336 * When EEH module is installed during system boot, all PCI devices
337 * are checked one by one to see if it supports EEH. The function
338 * is introduced for the purpose. By default, EEH has been enabled
339 * on all PCI devices. That's to say, we only need do necessary
340 * initialization on the corresponding eeh device and create PE
343 * It's notable that's unsafe to retrieve the EEH device through
344 * the corresponding PCI device. During the PCI device hotplug, which
345 * was possiblly triggered by EEH core, the binding between EEH device
346 * and the PCI device isn't built yet.
348 static void *pnv_eeh_probe(struct pci_dn *pdn, void *data)
350 struct pci_controller *hose = pdn->phb;
351 struct pnv_phb *phb = hose->private_data;
352 struct eeh_dev *edev = pdn_to_eeh_dev(pdn);
355 int config_addr = (pdn->busno << 8) | (pdn->devfn);
358 * When probing the root bridge, which doesn't have any
359 * subordinate PCI devices. We don't have OF node for
360 * the root bridge. So it's not reasonable to continue
363 if (!edev || edev->pe)
366 /* Skip for PCI-ISA bridge */
367 if ((pdn->class_code >> 8) == PCI_CLASS_BRIDGE_ISA)
370 /* Initialize eeh device */
371 edev->class_code = pdn->class_code;
372 edev->mode &= 0xFFFFFF00;
373 edev->pcix_cap = pnv_eeh_find_cap(pdn, PCI_CAP_ID_PCIX);
374 edev->pcie_cap = pnv_eeh_find_cap(pdn, PCI_CAP_ID_EXP);
375 edev->af_cap = pnv_eeh_find_cap(pdn, PCI_CAP_ID_AF);
376 edev->aer_cap = pnv_eeh_find_ecap(pdn, PCI_EXT_CAP_ID_ERR);
377 if ((edev->class_code >> 8) == PCI_CLASS_BRIDGE_PCI) {
378 edev->mode |= EEH_DEV_BRIDGE;
379 if (edev->pcie_cap) {
380 pnv_pci_cfg_read(pdn, edev->pcie_cap + PCI_EXP_FLAGS,
382 pcie_flags = (pcie_flags & PCI_EXP_FLAGS_TYPE) >> 4;
383 if (pcie_flags == PCI_EXP_TYPE_ROOT_PORT)
384 edev->mode |= EEH_DEV_ROOT_PORT;
385 else if (pcie_flags == PCI_EXP_TYPE_DOWNSTREAM)
386 edev->mode |= EEH_DEV_DS_PORT;
390 edev->pe_config_addr = phb->ioda.pe_rmap[config_addr];
393 ret = eeh_add_to_parent_pe(edev);
395 pr_warn("%s: Can't add PCI dev %04x:%02x:%02x.%01x to parent PE (%x)\n",
396 __func__, hose->global_number, pdn->busno,
397 PCI_SLOT(pdn->devfn), PCI_FUNC(pdn->devfn), ret);
402 * If the PE contains any one of following adapters, the
403 * PCI config space can't be accessed when dumping EEH log.
404 * Otherwise, we will run into fenced PHB caused by shortage
405 * of outbound credits in the adapter. The PCI config access
406 * should be blocked until PE reset. MMIO access is dropped
407 * by hardware certainly. In order to drop PCI config requests,
408 * one more flag (EEH_PE_CFG_RESTRICTED) is introduced, which
409 * will be checked in the backend for PE state retrival. If
410 * the PE becomes frozen for the first time and the flag has
411 * been set for the PE, we will set EEH_PE_CFG_BLOCKED for
412 * that PE to block its config space.
414 * Broadcom BCM5718 2-ports NICs (14e4:1656)
415 * Broadcom Austin 4-ports NICs (14e4:1657)
416 * Broadcom Shiner 4-ports 1G NICs (14e4:168a)
417 * Broadcom Shiner 2-ports 10G NICs (14e4:168e)
419 if ((pdn->vendor_id == PCI_VENDOR_ID_BROADCOM &&
420 pdn->device_id == 0x1656) ||
421 (pdn->vendor_id == PCI_VENDOR_ID_BROADCOM &&
422 pdn->device_id == 0x1657) ||
423 (pdn->vendor_id == PCI_VENDOR_ID_BROADCOM &&
424 pdn->device_id == 0x168a) ||
425 (pdn->vendor_id == PCI_VENDOR_ID_BROADCOM &&
426 pdn->device_id == 0x168e))
427 edev->pe->state |= EEH_PE_CFG_RESTRICTED;
430 * Cache the PE primary bus, which can't be fetched when
431 * full hotplug is in progress. In that case, all child
432 * PCI devices of the PE are expected to be removed prior
435 if (!(edev->pe->state & EEH_PE_PRI_BUS)) {
436 edev->pe->bus = pci_find_bus(hose->global_number,
439 edev->pe->state |= EEH_PE_PRI_BUS;
443 * Enable EEH explicitly so that we will do EEH check
444 * while accessing I/O stuff
446 eeh_add_flag(EEH_ENABLED);
448 /* Save memory bars */
455 * pnv_eeh_set_option - Initialize EEH or MMIO/DMA reenable
457 * @option: operation to be issued
459 * The function is used to control the EEH functionality globally.
460 * Currently, following options are support according to PAPR:
461 * Enable EEH, Disable EEH, Enable MMIO and Enable DMA
463 static int pnv_eeh_set_option(struct eeh_pe *pe, int option)
465 struct pci_controller *hose = pe->phb;
466 struct pnv_phb *phb = hose->private_data;
467 bool freeze_pe = false;
472 case EEH_OPT_DISABLE:
476 case EEH_OPT_THAW_MMIO:
477 opt = OPAL_EEH_ACTION_CLEAR_FREEZE_MMIO;
479 case EEH_OPT_THAW_DMA:
480 opt = OPAL_EEH_ACTION_CLEAR_FREEZE_DMA;
482 case EEH_OPT_FREEZE_PE:
484 opt = OPAL_EEH_ACTION_SET_FREEZE_ALL;
487 pr_warn("%s: Invalid option %d\n", __func__, option);
491 /* Freeze master and slave PEs if PHB supports compound PEs */
493 if (phb->freeze_pe) {
494 phb->freeze_pe(phb, pe->addr);
498 rc = opal_pci_eeh_freeze_set(phb->opal_id, pe->addr, opt);
499 if (rc != OPAL_SUCCESS) {
500 pr_warn("%s: Failure %lld freezing PHB#%x-PE#%x\n",
501 __func__, rc, phb->hose->global_number,
509 /* Unfreeze master and slave PEs if PHB supports */
510 if (phb->unfreeze_pe)
511 return phb->unfreeze_pe(phb, pe->addr, opt);
513 rc = opal_pci_eeh_freeze_clear(phb->opal_id, pe->addr, opt);
514 if (rc != OPAL_SUCCESS) {
515 pr_warn("%s: Failure %lld enable %d for PHB#%x-PE#%x\n",
516 __func__, rc, option, phb->hose->global_number,
525 * pnv_eeh_get_pe_addr - Retrieve PE address
528 * Retrieve the PE address according to the given tranditional
529 * PCI BDF (Bus/Device/Function) address.
531 static int pnv_eeh_get_pe_addr(struct eeh_pe *pe)
536 static void pnv_eeh_get_phb_diag(struct eeh_pe *pe)
538 struct pnv_phb *phb = pe->phb->private_data;
541 rc = opal_pci_get_phb_diag_data2(phb->opal_id, pe->data,
542 phb->diag_data_size);
543 if (rc != OPAL_SUCCESS)
544 pr_warn("%s: Failure %lld getting PHB#%x diag-data\n",
545 __func__, rc, pe->phb->global_number);
548 static int pnv_eeh_get_phb_state(struct eeh_pe *pe)
550 struct pnv_phb *phb = pe->phb->private_data;
556 rc = opal_pci_eeh_freeze_status(phb->opal_id,
561 if (rc != OPAL_SUCCESS) {
562 pr_warn("%s: Failure %lld getting PHB#%x state\n",
563 __func__, rc, phb->hose->global_number);
564 return EEH_STATE_NOT_SUPPORT;
568 * Check PHB state. If the PHB is frozen for the
569 * first time, to dump the PHB diag-data.
571 if (be16_to_cpu(pcierr) != OPAL_EEH_PHB_ERROR) {
572 result = (EEH_STATE_MMIO_ACTIVE |
573 EEH_STATE_DMA_ACTIVE |
574 EEH_STATE_MMIO_ENABLED |
575 EEH_STATE_DMA_ENABLED);
576 } else if (!(pe->state & EEH_PE_ISOLATED)) {
577 eeh_pe_state_mark(pe, EEH_PE_ISOLATED);
578 pnv_eeh_get_phb_diag(pe);
580 if (eeh_has_flag(EEH_EARLY_DUMP_LOG))
581 pnv_pci_dump_phb_diag_data(pe->phb, pe->data);
587 static int pnv_eeh_get_pe_state(struct eeh_pe *pe)
589 struct pnv_phb *phb = pe->phb->private_data;
596 * We don't clobber hardware frozen state until PE
597 * reset is completed. In order to keep EEH core
598 * moving forward, we have to return operational
599 * state during PE reset.
601 if (pe->state & EEH_PE_RESET) {
602 result = (EEH_STATE_MMIO_ACTIVE |
603 EEH_STATE_DMA_ACTIVE |
604 EEH_STATE_MMIO_ENABLED |
605 EEH_STATE_DMA_ENABLED);
610 * Fetch PE state from hardware. If the PHB
611 * supports compound PE, let it handle that.
613 if (phb->get_pe_state) {
614 fstate = phb->get_pe_state(phb, pe->addr);
616 rc = opal_pci_eeh_freeze_status(phb->opal_id,
621 if (rc != OPAL_SUCCESS) {
622 pr_warn("%s: Failure %lld getting PHB#%x-PE%x state\n",
623 __func__, rc, phb->hose->global_number,
625 return EEH_STATE_NOT_SUPPORT;
629 /* Figure out state */
631 case OPAL_EEH_STOPPED_NOT_FROZEN:
632 result = (EEH_STATE_MMIO_ACTIVE |
633 EEH_STATE_DMA_ACTIVE |
634 EEH_STATE_MMIO_ENABLED |
635 EEH_STATE_DMA_ENABLED);
637 case OPAL_EEH_STOPPED_MMIO_FREEZE:
638 result = (EEH_STATE_DMA_ACTIVE |
639 EEH_STATE_DMA_ENABLED);
641 case OPAL_EEH_STOPPED_DMA_FREEZE:
642 result = (EEH_STATE_MMIO_ACTIVE |
643 EEH_STATE_MMIO_ENABLED);
645 case OPAL_EEH_STOPPED_MMIO_DMA_FREEZE:
648 case OPAL_EEH_STOPPED_RESET:
649 result = EEH_STATE_RESET_ACTIVE;
651 case OPAL_EEH_STOPPED_TEMP_UNAVAIL:
652 result = EEH_STATE_UNAVAILABLE;
654 case OPAL_EEH_STOPPED_PERM_UNAVAIL:
655 result = EEH_STATE_NOT_SUPPORT;
658 result = EEH_STATE_NOT_SUPPORT;
659 pr_warn("%s: Invalid PHB#%x-PE#%x state %x\n",
660 __func__, phb->hose->global_number,
665 * If PHB supports compound PE, to freeze all
666 * slave PEs for consistency.
668 * If the PE is switching to frozen state for the
669 * first time, to dump the PHB diag-data.
671 if (!(result & EEH_STATE_NOT_SUPPORT) &&
672 !(result & EEH_STATE_UNAVAILABLE) &&
673 !(result & EEH_STATE_MMIO_ACTIVE) &&
674 !(result & EEH_STATE_DMA_ACTIVE) &&
675 !(pe->state & EEH_PE_ISOLATED)) {
677 phb->freeze_pe(phb, pe->addr);
679 eeh_pe_state_mark(pe, EEH_PE_ISOLATED);
680 pnv_eeh_get_phb_diag(pe);
682 if (eeh_has_flag(EEH_EARLY_DUMP_LOG))
683 pnv_pci_dump_phb_diag_data(pe->phb, pe->data);
690 * pnv_eeh_get_state - Retrieve PE state
692 * @delay: delay while PE state is temporarily unavailable
694 * Retrieve the state of the specified PE. For IODA-compitable
695 * platform, it should be retrieved from IODA table. Therefore,
696 * we prefer passing down to hardware implementation to handle
699 static int pnv_eeh_get_state(struct eeh_pe *pe, int *delay)
703 if (pe->type & EEH_PE_PHB)
704 ret = pnv_eeh_get_phb_state(pe);
706 ret = pnv_eeh_get_pe_state(pe);
712 * If the PE state is temporarily unavailable,
713 * to inform the EEH core delay for default
717 if (ret & EEH_STATE_UNAVAILABLE)
723 static s64 pnv_eeh_poll(unsigned long id)
725 s64 rc = OPAL_HARDWARE;
728 rc = opal_pci_poll(id);
732 if (system_state < SYSTEM_RUNNING)
741 int pnv_eeh_phb_reset(struct pci_controller *hose, int option)
743 struct pnv_phb *phb = hose->private_data;
744 s64 rc = OPAL_HARDWARE;
746 pr_debug("%s: Reset PHB#%x, option=%d\n",
747 __func__, hose->global_number, option);
749 /* Issue PHB complete reset request */
750 if (option == EEH_RESET_FUNDAMENTAL ||
751 option == EEH_RESET_HOT)
752 rc = opal_pci_reset(phb->opal_id,
753 OPAL_RESET_PHB_COMPLETE,
755 else if (option == EEH_RESET_DEACTIVATE)
756 rc = opal_pci_reset(phb->opal_id,
757 OPAL_RESET_PHB_COMPLETE,
758 OPAL_DEASSERT_RESET);
763 * Poll state of the PHB until the request is done
764 * successfully. The PHB reset is usually PHB complete
765 * reset followed by hot reset on root bus. So we also
766 * need the PCI bus settlement delay.
769 rc = pnv_eeh_poll(phb->opal_id);
770 if (option == EEH_RESET_DEACTIVATE) {
771 if (system_state < SYSTEM_RUNNING)
772 udelay(1000 * EEH_PE_RST_SETTLE_TIME);
774 msleep(EEH_PE_RST_SETTLE_TIME);
777 if (rc != OPAL_SUCCESS)
783 static int pnv_eeh_root_reset(struct pci_controller *hose, int option)
785 struct pnv_phb *phb = hose->private_data;
786 s64 rc = OPAL_HARDWARE;
788 pr_debug("%s: Reset PHB#%x, option=%d\n",
789 __func__, hose->global_number, option);
792 * During the reset deassert time, we needn't care
793 * the reset scope because the firmware does nothing
794 * for fundamental or hot reset during deassert phase.
796 if (option == EEH_RESET_FUNDAMENTAL)
797 rc = opal_pci_reset(phb->opal_id,
798 OPAL_RESET_PCI_FUNDAMENTAL,
800 else if (option == EEH_RESET_HOT)
801 rc = opal_pci_reset(phb->opal_id,
804 else if (option == EEH_RESET_DEACTIVATE)
805 rc = opal_pci_reset(phb->opal_id,
807 OPAL_DEASSERT_RESET);
811 /* Poll state of the PHB until the request is done */
813 rc = pnv_eeh_poll(phb->opal_id);
814 if (option == EEH_RESET_DEACTIVATE)
815 msleep(EEH_PE_RST_SETTLE_TIME);
817 if (rc != OPAL_SUCCESS)
823 static int __pnv_eeh_bridge_reset(struct pci_dev *dev, int option)
825 struct pci_dn *pdn = pci_get_pdn_by_devfn(dev->bus, dev->devfn);
826 struct eeh_dev *edev = pdn_to_eeh_dev(pdn);
827 int aer = edev ? edev->aer_cap : 0;
830 pr_debug("%s: Reset PCI bus %04x:%02x with option %d\n",
831 __func__, pci_domain_nr(dev->bus),
832 dev->bus->number, option);
835 case EEH_RESET_FUNDAMENTAL:
837 /* Don't report linkDown event */
839 eeh_ops->read_config(pdn, aer + PCI_ERR_UNCOR_MASK,
841 ctrl |= PCI_ERR_UNC_SURPDN;
842 eeh_ops->write_config(pdn, aer + PCI_ERR_UNCOR_MASK,
846 eeh_ops->read_config(pdn, PCI_BRIDGE_CONTROL, 2, &ctrl);
847 ctrl |= PCI_BRIDGE_CTL_BUS_RESET;
848 eeh_ops->write_config(pdn, PCI_BRIDGE_CONTROL, 2, ctrl);
850 msleep(EEH_PE_RST_HOLD_TIME);
852 case EEH_RESET_DEACTIVATE:
853 eeh_ops->read_config(pdn, PCI_BRIDGE_CONTROL, 2, &ctrl);
854 ctrl &= ~PCI_BRIDGE_CTL_BUS_RESET;
855 eeh_ops->write_config(pdn, PCI_BRIDGE_CONTROL, 2, ctrl);
857 msleep(EEH_PE_RST_SETTLE_TIME);
859 /* Continue reporting linkDown event */
861 eeh_ops->read_config(pdn, aer + PCI_ERR_UNCOR_MASK,
863 ctrl &= ~PCI_ERR_UNC_SURPDN;
864 eeh_ops->write_config(pdn, aer + PCI_ERR_UNCOR_MASK,
874 static int pnv_eeh_bridge_reset(struct pci_dev *pdev, int option)
876 struct pci_controller *hose = pci_bus_to_host(pdev->bus);
877 struct pnv_phb *phb = hose->private_data;
878 struct device_node *dn = pci_device_to_OF_node(pdev);
879 uint64_t id = PCI_SLOT_ID(phb->opal_id,
880 (pdev->bus->number << 8) | pdev->devfn);
884 /* Hot reset to the bus if firmware cannot handle */
885 if (!dn || !of_get_property(dn, "ibm,reset-by-firmware", NULL))
886 return __pnv_eeh_bridge_reset(pdev, option);
889 case EEH_RESET_FUNDAMENTAL:
890 scope = OPAL_RESET_PCI_FUNDAMENTAL;
893 scope = OPAL_RESET_PCI_HOT;
895 case EEH_RESET_DEACTIVATE:
898 dev_dbg(&pdev->dev, "%s: Unsupported reset %d\n",
903 rc = opal_pci_reset(id, scope, OPAL_ASSERT_RESET);
904 if (rc <= OPAL_SUCCESS)
907 rc = pnv_eeh_poll(id);
909 return (rc == OPAL_SUCCESS) ? 0 : -EIO;
912 void pnv_pci_reset_secondary_bus(struct pci_dev *dev)
914 struct pci_controller *hose;
916 if (pci_is_root_bus(dev->bus)) {
917 hose = pci_bus_to_host(dev->bus);
918 pnv_eeh_root_reset(hose, EEH_RESET_HOT);
919 pnv_eeh_root_reset(hose, EEH_RESET_DEACTIVATE);
921 pnv_eeh_bridge_reset(dev, EEH_RESET_HOT);
922 pnv_eeh_bridge_reset(dev, EEH_RESET_DEACTIVATE);
926 static void pnv_eeh_wait_for_pending(struct pci_dn *pdn, const char *type,
931 /* Wait for Transaction Pending bit to be cleared */
932 for (i = 0; i < 4; i++) {
933 eeh_ops->read_config(pdn, pos, 2, &status);
934 if (!(status & mask))
937 msleep((1 << i) * 100);
940 pr_warn("%s: Pending transaction while issuing %sFLR to %04x:%02x:%02x.%01x\n",
942 pdn->phb->global_number, pdn->busno,
943 PCI_SLOT(pdn->devfn), PCI_FUNC(pdn->devfn));
946 static int pnv_eeh_do_flr(struct pci_dn *pdn, int option)
948 struct eeh_dev *edev = pdn_to_eeh_dev(pdn);
951 if (WARN_ON(!edev->pcie_cap))
954 eeh_ops->read_config(pdn, edev->pcie_cap + PCI_EXP_DEVCAP, 4, ®);
955 if (!(reg & PCI_EXP_DEVCAP_FLR))
960 case EEH_RESET_FUNDAMENTAL:
961 pnv_eeh_wait_for_pending(pdn, "",
962 edev->pcie_cap + PCI_EXP_DEVSTA,
963 PCI_EXP_DEVSTA_TRPND);
964 eeh_ops->read_config(pdn, edev->pcie_cap + PCI_EXP_DEVCTL,
966 reg |= PCI_EXP_DEVCTL_BCR_FLR;
967 eeh_ops->write_config(pdn, edev->pcie_cap + PCI_EXP_DEVCTL,
969 msleep(EEH_PE_RST_HOLD_TIME);
971 case EEH_RESET_DEACTIVATE:
972 eeh_ops->read_config(pdn, edev->pcie_cap + PCI_EXP_DEVCTL,
974 reg &= ~PCI_EXP_DEVCTL_BCR_FLR;
975 eeh_ops->write_config(pdn, edev->pcie_cap + PCI_EXP_DEVCTL,
977 msleep(EEH_PE_RST_SETTLE_TIME);
984 static int pnv_eeh_do_af_flr(struct pci_dn *pdn, int option)
986 struct eeh_dev *edev = pdn_to_eeh_dev(pdn);
989 if (WARN_ON(!edev->af_cap))
992 eeh_ops->read_config(pdn, edev->af_cap + PCI_AF_CAP, 1, &cap);
993 if (!(cap & PCI_AF_CAP_TP) || !(cap & PCI_AF_CAP_FLR))
998 case EEH_RESET_FUNDAMENTAL:
1000 * Wait for Transaction Pending bit to clear. A word-aligned
1001 * test is used, so we use the conrol offset rather than status
1002 * and shift the test bit to match.
1004 pnv_eeh_wait_for_pending(pdn, "AF",
1005 edev->af_cap + PCI_AF_CTRL,
1006 PCI_AF_STATUS_TP << 8);
1007 eeh_ops->write_config(pdn, edev->af_cap + PCI_AF_CTRL,
1008 1, PCI_AF_CTRL_FLR);
1009 msleep(EEH_PE_RST_HOLD_TIME);
1011 case EEH_RESET_DEACTIVATE:
1012 eeh_ops->write_config(pdn, edev->af_cap + PCI_AF_CTRL, 1, 0);
1013 msleep(EEH_PE_RST_SETTLE_TIME);
1020 static int pnv_eeh_reset_vf_pe(struct eeh_pe *pe, int option)
1022 struct eeh_dev *edev;
1026 /* The VF PE should have only one child device */
1027 edev = list_first_entry_or_null(&pe->edevs, struct eeh_dev, list);
1028 pdn = eeh_dev_to_pdn(edev);
1032 ret = pnv_eeh_do_flr(pdn, option);
1036 return pnv_eeh_do_af_flr(pdn, option);
1040 * pnv_eeh_reset - Reset the specified PE
1042 * @option: reset option
1044 * Do reset on the indicated PE. For PCI bus sensitive PE,
1045 * we need to reset the parent p2p bridge. The PHB has to
1046 * be reinitialized if the p2p bridge is root bridge. For
1047 * PCI device sensitive PE, we will try to reset the device
1048 * through FLR. For now, we don't have OPAL APIs to do HARD
1049 * reset yet, so all reset would be SOFT (HOT) reset.
1051 static int pnv_eeh_reset(struct eeh_pe *pe, int option)
1053 struct pci_controller *hose = pe->phb;
1054 struct pnv_phb *phb;
1055 struct pci_bus *bus;
1059 * For PHB reset, we always have complete reset. For those PEs whose
1060 * primary bus derived from root complex (root bus) or root port
1061 * (usually bus#1), we apply hot or fundamental reset on the root port.
1062 * For other PEs, we always have hot reset on the PE primary bus.
1064 * Here, we have different design to pHyp, which always clear the
1065 * frozen state during PE reset. However, the good idea here from
1066 * benh is to keep frozen state before we get PE reset done completely
1067 * (until BAR restore). With the frozen state, HW drops illegal IO
1068 * or MMIO access, which can incur recrusive frozen PE during PE
1069 * reset. The side effect is that EEH core has to clear the frozen
1070 * state explicitly after BAR restore.
1072 if (pe->type & EEH_PE_PHB)
1073 return pnv_eeh_phb_reset(hose, option);
1076 * The frozen PE might be caused by PAPR error injection
1077 * registers, which are expected to be cleared after hitting
1078 * frozen PE as stated in the hardware spec. Unfortunately,
1079 * that's not true on P7IOC. So we have to clear it manually
1080 * to avoid recursive EEH errors during recovery.
1082 phb = hose->private_data;
1083 if (phb->model == PNV_PHB_MODEL_P7IOC &&
1084 (option == EEH_RESET_HOT ||
1085 option == EEH_RESET_FUNDAMENTAL)) {
1086 rc = opal_pci_reset(phb->opal_id,
1087 OPAL_RESET_PHB_ERROR,
1089 if (rc != OPAL_SUCCESS) {
1090 pr_warn("%s: Failure %lld clearing error injection registers\n",
1096 if (pe->type & EEH_PE_VF)
1097 return pnv_eeh_reset_vf_pe(pe, option);
1099 bus = eeh_pe_bus_get(pe);
1101 pr_err("%s: Cannot find PCI bus for PHB#%x-PE#%x\n",
1102 __func__, pe->phb->global_number, pe->addr);
1107 * If dealing with the root bus (or the bus underneath the
1108 * root port), we reset the bus underneath the root port.
1110 * The cxl driver depends on this behaviour for bi-modal card
1113 if (pci_is_root_bus(bus) ||
1114 pci_is_root_bus(bus->parent))
1115 return pnv_eeh_root_reset(hose, option);
1117 return pnv_eeh_bridge_reset(bus->self, option);
1121 * pnv_eeh_wait_state - Wait for PE state
1123 * @max_wait: maximal period in millisecond
1125 * Wait for the state of associated PE. It might take some time
1126 * to retrieve the PE's state.
1128 static int pnv_eeh_wait_state(struct eeh_pe *pe, int max_wait)
1134 ret = pnv_eeh_get_state(pe, &mwait);
1137 * If the PE's state is temporarily unavailable,
1138 * we have to wait for the specified time. Otherwise,
1139 * the PE's state will be returned immediately.
1141 if (ret != EEH_STATE_UNAVAILABLE)
1144 if (max_wait <= 0) {
1145 pr_warn("%s: Timeout getting PE#%x's state (%d)\n",
1146 __func__, pe->addr, max_wait);
1147 return EEH_STATE_NOT_SUPPORT;
1154 return EEH_STATE_NOT_SUPPORT;
1158 * pnv_eeh_get_log - Retrieve error log
1160 * @severity: temporary or permanent error log
1161 * @drv_log: driver log to be combined with retrieved error log
1162 * @len: length of driver log
1164 * Retrieve the temporary or permanent error from the PE.
1166 static int pnv_eeh_get_log(struct eeh_pe *pe, int severity,
1167 char *drv_log, unsigned long len)
1169 if (!eeh_has_flag(EEH_EARLY_DUMP_LOG))
1170 pnv_pci_dump_phb_diag_data(pe->phb, pe->data);
1176 * pnv_eeh_configure_bridge - Configure PCI bridges in the indicated PE
1179 * The function will be called to reconfigure the bridges included
1180 * in the specified PE so that the mulfunctional PE would be recovered
1183 static int pnv_eeh_configure_bridge(struct eeh_pe *pe)
1189 * pnv_pe_err_inject - Inject specified error to the indicated PE
1190 * @pe: the indicated PE
1192 * @func: specific error type
1194 * @mask: address mask
1196 * The routine is called to inject specified error, which is
1197 * determined by @type and @func, to the indicated PE for
1200 static int pnv_eeh_err_inject(struct eeh_pe *pe, int type, int func,
1201 unsigned long addr, unsigned long mask)
1203 struct pci_controller *hose = pe->phb;
1204 struct pnv_phb *phb = hose->private_data;
1207 if (type != OPAL_ERR_INJECT_TYPE_IOA_BUS_ERR &&
1208 type != OPAL_ERR_INJECT_TYPE_IOA_BUS_ERR64) {
1209 pr_warn("%s: Invalid error type %d\n",
1214 if (func < OPAL_ERR_INJECT_FUNC_IOA_LD_MEM_ADDR ||
1215 func > OPAL_ERR_INJECT_FUNC_IOA_DMA_WR_TARGET) {
1216 pr_warn("%s: Invalid error function %d\n",
1221 /* Firmware supports error injection ? */
1222 if (!opal_check_token(OPAL_PCI_ERR_INJECT)) {
1223 pr_warn("%s: Firmware doesn't support error injection\n",
1228 /* Do error injection */
1229 rc = opal_pci_err_inject(phb->opal_id, pe->addr,
1230 type, func, addr, mask);
1231 if (rc != OPAL_SUCCESS) {
1232 pr_warn("%s: Failure %lld injecting error "
1233 "%d-%d to PHB#%x-PE#%x\n",
1234 __func__, rc, type, func,
1235 hose->global_number, pe->addr);
1242 static inline bool pnv_eeh_cfg_blocked(struct pci_dn *pdn)
1244 struct eeh_dev *edev = pdn_to_eeh_dev(pdn);
1246 if (!edev || !edev->pe)
1250 * We will issue FLR or AF FLR to all VFs, which are contained
1251 * in VF PE. It relies on the EEH PCI config accessors. So we
1252 * can't block them during the window.
1254 if (edev->physfn && (edev->pe->state & EEH_PE_RESET))
1257 if (edev->pe->state & EEH_PE_CFG_BLOCKED)
1263 static int pnv_eeh_read_config(struct pci_dn *pdn,
1264 int where, int size, u32 *val)
1267 return PCIBIOS_DEVICE_NOT_FOUND;
1269 if (pnv_eeh_cfg_blocked(pdn)) {
1271 return PCIBIOS_SET_FAILED;
1274 return pnv_pci_cfg_read(pdn, where, size, val);
1277 static int pnv_eeh_write_config(struct pci_dn *pdn,
1278 int where, int size, u32 val)
1281 return PCIBIOS_DEVICE_NOT_FOUND;
1283 if (pnv_eeh_cfg_blocked(pdn))
1284 return PCIBIOS_SET_FAILED;
1286 return pnv_pci_cfg_write(pdn, where, size, val);
1289 static void pnv_eeh_dump_hub_diag_common(struct OpalIoP7IOCErrorData *data)
1292 if (data->gemXfir || data->gemRfir ||
1293 data->gemRirqfir || data->gemMask || data->gemRwof)
1294 pr_info(" GEM: %016llx %016llx %016llx %016llx %016llx\n",
1295 be64_to_cpu(data->gemXfir),
1296 be64_to_cpu(data->gemRfir),
1297 be64_to_cpu(data->gemRirqfir),
1298 be64_to_cpu(data->gemMask),
1299 be64_to_cpu(data->gemRwof));
1302 if (data->lemFir || data->lemErrMask ||
1303 data->lemAction0 || data->lemAction1 || data->lemWof)
1304 pr_info(" LEM: %016llx %016llx %016llx %016llx %016llx\n",
1305 be64_to_cpu(data->lemFir),
1306 be64_to_cpu(data->lemErrMask),
1307 be64_to_cpu(data->lemAction0),
1308 be64_to_cpu(data->lemAction1),
1309 be64_to_cpu(data->lemWof));
1312 static void pnv_eeh_get_and_dump_hub_diag(struct pci_controller *hose)
1314 struct pnv_phb *phb = hose->private_data;
1315 struct OpalIoP7IOCErrorData *data =
1316 (struct OpalIoP7IOCErrorData*)phb->diag_data;
1319 rc = opal_pci_get_hub_diag_data(phb->hub_id, data, sizeof(*data));
1320 if (rc != OPAL_SUCCESS) {
1321 pr_warn("%s: Failed to get HUB#%llx diag-data (%ld)\n",
1322 __func__, phb->hub_id, rc);
1326 switch (be16_to_cpu(data->type)) {
1327 case OPAL_P7IOC_DIAG_TYPE_RGC:
1328 pr_info("P7IOC diag-data for RGC\n\n");
1329 pnv_eeh_dump_hub_diag_common(data);
1330 if (data->rgc.rgcStatus || data->rgc.rgcLdcp)
1331 pr_info(" RGC: %016llx %016llx\n",
1332 be64_to_cpu(data->rgc.rgcStatus),
1333 be64_to_cpu(data->rgc.rgcLdcp));
1335 case OPAL_P7IOC_DIAG_TYPE_BI:
1336 pr_info("P7IOC diag-data for BI %s\n\n",
1337 data->bi.biDownbound ? "Downbound" : "Upbound");
1338 pnv_eeh_dump_hub_diag_common(data);
1339 if (data->bi.biLdcp0 || data->bi.biLdcp1 ||
1340 data->bi.biLdcp2 || data->bi.biFenceStatus)
1341 pr_info(" BI: %016llx %016llx %016llx %016llx\n",
1342 be64_to_cpu(data->bi.biLdcp0),
1343 be64_to_cpu(data->bi.biLdcp1),
1344 be64_to_cpu(data->bi.biLdcp2),
1345 be64_to_cpu(data->bi.biFenceStatus));
1347 case OPAL_P7IOC_DIAG_TYPE_CI:
1348 pr_info("P7IOC diag-data for CI Port %d\n\n",
1350 pnv_eeh_dump_hub_diag_common(data);
1351 if (data->ci.ciPortStatus || data->ci.ciPortLdcp)
1352 pr_info(" CI: %016llx %016llx\n",
1353 be64_to_cpu(data->ci.ciPortStatus),
1354 be64_to_cpu(data->ci.ciPortLdcp));
1356 case OPAL_P7IOC_DIAG_TYPE_MISC:
1357 pr_info("P7IOC diag-data for MISC\n\n");
1358 pnv_eeh_dump_hub_diag_common(data);
1360 case OPAL_P7IOC_DIAG_TYPE_I2C:
1361 pr_info("P7IOC diag-data for I2C\n\n");
1362 pnv_eeh_dump_hub_diag_common(data);
1365 pr_warn("%s: Invalid type of HUB#%llx diag-data (%d)\n",
1366 __func__, phb->hub_id, data->type);
1370 static int pnv_eeh_get_pe(struct pci_controller *hose,
1371 u16 pe_no, struct eeh_pe **pe)
1373 struct pnv_phb *phb = hose->private_data;
1374 struct pnv_ioda_pe *pnv_pe;
1375 struct eeh_pe *dev_pe;
1378 * If PHB supports compound PE, to fetch
1379 * the master PE because slave PE is invisible
1382 pnv_pe = &phb->ioda.pe_array[pe_no];
1383 if (pnv_pe->flags & PNV_IODA_PE_SLAVE) {
1384 pnv_pe = pnv_pe->master;
1386 !(pnv_pe->flags & PNV_IODA_PE_MASTER));
1387 pe_no = pnv_pe->pe_number;
1390 /* Find the PE according to PE# */
1391 dev_pe = eeh_pe_get(hose, pe_no, 0);
1395 /* Freeze the (compound) PE */
1397 if (!(dev_pe->state & EEH_PE_ISOLATED))
1398 phb->freeze_pe(phb, pe_no);
1401 * At this point, we're sure the (compound) PE should
1402 * have been frozen. However, we still need poke until
1403 * hitting the frozen PE on top level.
1405 dev_pe = dev_pe->parent;
1406 while (dev_pe && !(dev_pe->type & EEH_PE_PHB)) {
1408 int active_flags = (EEH_STATE_MMIO_ACTIVE |
1409 EEH_STATE_DMA_ACTIVE);
1411 ret = eeh_ops->get_state(dev_pe, NULL);
1412 if (ret <= 0 || (ret & active_flags) == active_flags) {
1413 dev_pe = dev_pe->parent;
1417 /* Frozen parent PE */
1419 if (!(dev_pe->state & EEH_PE_ISOLATED))
1420 phb->freeze_pe(phb, dev_pe->addr);
1423 dev_pe = dev_pe->parent;
1430 * pnv_eeh_next_error - Retrieve next EEH error to handle
1433 * The function is expected to be called by EEH core while it gets
1434 * special EEH event (without binding PE). The function calls to
1435 * OPAL APIs for next error to handle. The informational error is
1436 * handled internally by platform. However, the dead IOC, dead PHB,
1437 * fenced PHB and frozen PE should be handled by EEH core eventually.
1439 static int pnv_eeh_next_error(struct eeh_pe **pe)
1441 struct pci_controller *hose;
1442 struct pnv_phb *phb;
1443 struct eeh_pe *phb_pe, *parent_pe;
1444 __be64 frozen_pe_no;
1445 __be16 err_type, severity;
1446 int active_flags = (EEH_STATE_MMIO_ACTIVE | EEH_STATE_DMA_ACTIVE);
1448 int state, ret = EEH_NEXT_ERR_NONE;
1451 * While running here, it's safe to purge the event queue. The
1452 * event should still be masked.
1454 eeh_remove_event(NULL, false);
1456 list_for_each_entry(hose, &hose_list, list_node) {
1458 * If the subordinate PCI buses of the PHB has been
1459 * removed or is exactly under error recovery, we
1460 * needn't take care of it any more.
1462 phb = hose->private_data;
1463 phb_pe = eeh_phb_pe_get(hose);
1464 if (!phb_pe || (phb_pe->state & EEH_PE_ISOLATED))
1467 rc = opal_pci_next_error(phb->opal_id,
1468 &frozen_pe_no, &err_type, &severity);
1469 if (rc != OPAL_SUCCESS) {
1470 pr_devel("%s: Invalid return value on "
1471 "PHB#%x (0x%lx) from opal_pci_next_error",
1472 __func__, hose->global_number, rc);
1476 /* If the PHB doesn't have error, stop processing */
1477 if (be16_to_cpu(err_type) == OPAL_EEH_NO_ERROR ||
1478 be16_to_cpu(severity) == OPAL_EEH_SEV_NO_ERROR) {
1479 pr_devel("%s: No error found on PHB#%x\n",
1480 __func__, hose->global_number);
1485 * Processing the error. We're expecting the error with
1486 * highest priority reported upon multiple errors on the
1489 pr_devel("%s: Error (%d, %d, %llu) on PHB#%x\n",
1490 __func__, be16_to_cpu(err_type),
1491 be16_to_cpu(severity), be64_to_cpu(frozen_pe_no),
1492 hose->global_number);
1493 switch (be16_to_cpu(err_type)) {
1494 case OPAL_EEH_IOC_ERROR:
1495 if (be16_to_cpu(severity) == OPAL_EEH_SEV_IOC_DEAD) {
1496 pr_err("EEH: dead IOC detected\n");
1497 ret = EEH_NEXT_ERR_DEAD_IOC;
1498 } else if (be16_to_cpu(severity) == OPAL_EEH_SEV_INF) {
1499 pr_info("EEH: IOC informative error "
1501 pnv_eeh_get_and_dump_hub_diag(hose);
1502 ret = EEH_NEXT_ERR_NONE;
1506 case OPAL_EEH_PHB_ERROR:
1507 if (be16_to_cpu(severity) == OPAL_EEH_SEV_PHB_DEAD) {
1509 pr_err("EEH: dead PHB#%x detected, "
1511 hose->global_number,
1512 eeh_pe_loc_get(phb_pe));
1513 ret = EEH_NEXT_ERR_DEAD_PHB;
1514 } else if (be16_to_cpu(severity) ==
1515 OPAL_EEH_SEV_PHB_FENCED) {
1517 pr_err("EEH: Fenced PHB#%x detected, "
1519 hose->global_number,
1520 eeh_pe_loc_get(phb_pe));
1521 ret = EEH_NEXT_ERR_FENCED_PHB;
1522 } else if (be16_to_cpu(severity) == OPAL_EEH_SEV_INF) {
1523 pr_info("EEH: PHB#%x informative error "
1524 "detected, location: %s\n",
1525 hose->global_number,
1526 eeh_pe_loc_get(phb_pe));
1527 pnv_eeh_get_phb_diag(phb_pe);
1528 pnv_pci_dump_phb_diag_data(hose, phb_pe->data);
1529 ret = EEH_NEXT_ERR_NONE;
1533 case OPAL_EEH_PE_ERROR:
1535 * If we can't find the corresponding PE, we
1536 * just try to unfreeze.
1538 if (pnv_eeh_get_pe(hose,
1539 be64_to_cpu(frozen_pe_no), pe)) {
1540 pr_info("EEH: Clear non-existing PHB#%x-PE#%llx\n",
1541 hose->global_number, be64_to_cpu(frozen_pe_no));
1542 pr_info("EEH: PHB location: %s\n",
1543 eeh_pe_loc_get(phb_pe));
1545 /* Dump PHB diag-data */
1546 rc = opal_pci_get_phb_diag_data2(phb->opal_id,
1547 phb->diag_data, phb->diag_data_size);
1548 if (rc == OPAL_SUCCESS)
1549 pnv_pci_dump_phb_diag_data(hose,
1552 /* Try best to clear it */
1553 opal_pci_eeh_freeze_clear(phb->opal_id,
1554 be64_to_cpu(frozen_pe_no),
1555 OPAL_EEH_ACTION_CLEAR_FREEZE_ALL);
1556 ret = EEH_NEXT_ERR_NONE;
1557 } else if ((*pe)->state & EEH_PE_ISOLATED ||
1558 eeh_pe_passed(*pe)) {
1559 ret = EEH_NEXT_ERR_NONE;
1561 pr_err("EEH: Frozen PE#%x "
1562 "on PHB#%x detected\n",
1564 (*pe)->phb->global_number);
1565 pr_err("EEH: PE location: %s, "
1566 "PHB location: %s\n",
1567 eeh_pe_loc_get(*pe),
1568 eeh_pe_loc_get(phb_pe));
1569 ret = EEH_NEXT_ERR_FROZEN_PE;
1574 pr_warn("%s: Unexpected error type %d\n",
1575 __func__, be16_to_cpu(err_type));
1579 * EEH core will try recover from fenced PHB or
1580 * frozen PE. In the time for frozen PE, EEH core
1581 * enable IO path for that before collecting logs,
1582 * but it ruins the site. So we have to dump the
1583 * log in advance here.
1585 if ((ret == EEH_NEXT_ERR_FROZEN_PE ||
1586 ret == EEH_NEXT_ERR_FENCED_PHB) &&
1587 !((*pe)->state & EEH_PE_ISOLATED)) {
1588 eeh_pe_state_mark(*pe, EEH_PE_ISOLATED);
1589 pnv_eeh_get_phb_diag(*pe);
1591 if (eeh_has_flag(EEH_EARLY_DUMP_LOG))
1592 pnv_pci_dump_phb_diag_data((*pe)->phb,
1597 * We probably have the frozen parent PE out there and
1598 * we need have to handle frozen parent PE firstly.
1600 if (ret == EEH_NEXT_ERR_FROZEN_PE) {
1601 parent_pe = (*pe)->parent;
1603 /* Hit the ceiling ? */
1604 if (parent_pe->type & EEH_PE_PHB)
1607 /* Frozen parent PE ? */
1608 state = eeh_ops->get_state(parent_pe, NULL);
1610 (state & active_flags) != active_flags)
1613 /* Next parent level */
1614 parent_pe = parent_pe->parent;
1617 /* We possibly migrate to another PE */
1618 eeh_pe_state_mark(*pe, EEH_PE_ISOLATED);
1622 * If we have no errors on the specific PHB or only
1623 * informative error there, we continue poking it.
1624 * Otherwise, we need actions to be taken by upper
1627 if (ret > EEH_NEXT_ERR_INF)
1631 /* Unmask the event */
1632 if (ret == EEH_NEXT_ERR_NONE && eeh_enabled())
1633 enable_irq(eeh_event_irq);
1638 static int pnv_eeh_restore_vf_config(struct pci_dn *pdn)
1640 struct eeh_dev *edev = pdn_to_eeh_dev(pdn);
1641 u32 devctl, cmd, cap2, aer_capctl;
1644 if (edev->pcie_cap) {
1646 old_mps = (ffs(pdn->mps) - 8) << 5;
1647 eeh_ops->read_config(pdn, edev->pcie_cap + PCI_EXP_DEVCTL,
1649 devctl &= ~PCI_EXP_DEVCTL_PAYLOAD;
1651 eeh_ops->write_config(pdn, edev->pcie_cap + PCI_EXP_DEVCTL,
1654 /* Disable Completion Timeout */
1655 eeh_ops->read_config(pdn, edev->pcie_cap + PCI_EXP_DEVCAP2,
1658 eeh_ops->read_config(pdn,
1659 edev->pcie_cap + PCI_EXP_DEVCTL2,
1662 eeh_ops->write_config(pdn,
1663 edev->pcie_cap + PCI_EXP_DEVCTL2,
1668 /* Enable SERR and parity checking */
1669 eeh_ops->read_config(pdn, PCI_COMMAND, 2, &cmd);
1670 cmd |= (PCI_COMMAND_PARITY | PCI_COMMAND_SERR);
1671 eeh_ops->write_config(pdn, PCI_COMMAND, 2, cmd);
1673 /* Enable report various errors */
1674 if (edev->pcie_cap) {
1675 eeh_ops->read_config(pdn, edev->pcie_cap + PCI_EXP_DEVCTL,
1677 devctl &= ~PCI_EXP_DEVCTL_CERE;
1678 devctl |= (PCI_EXP_DEVCTL_NFERE |
1679 PCI_EXP_DEVCTL_FERE |
1680 PCI_EXP_DEVCTL_URRE);
1681 eeh_ops->write_config(pdn, edev->pcie_cap + PCI_EXP_DEVCTL,
1685 /* Enable ECRC generation and check */
1686 if (edev->pcie_cap && edev->aer_cap) {
1687 eeh_ops->read_config(pdn, edev->aer_cap + PCI_ERR_CAP,
1689 aer_capctl |= (PCI_ERR_CAP_ECRC_GENE | PCI_ERR_CAP_ECRC_CHKE);
1690 eeh_ops->write_config(pdn, edev->aer_cap + PCI_ERR_CAP,
1697 static int pnv_eeh_restore_config(struct pci_dn *pdn)
1699 struct eeh_dev *edev = pdn_to_eeh_dev(pdn);
1700 struct pnv_phb *phb;
1702 int config_addr = (pdn->busno << 8) | (pdn->devfn);
1708 * We have to restore the PCI config space after reset since the
1709 * firmware can't see SRIOV VFs.
1711 * FIXME: The MPS, error routing rules, timeout setting are worthy
1712 * to be exported by firmware in extendible way.
1715 ret = pnv_eeh_restore_vf_config(pdn);
1717 phb = pdn->phb->private_data;
1718 ret = opal_pci_reinit(phb->opal_id,
1719 OPAL_REINIT_PCI_DEV, config_addr);
1723 pr_warn("%s: Can't reinit PCI dev 0x%x (%lld)\n",
1724 __func__, config_addr, ret);
1731 static struct eeh_ops pnv_eeh_ops = {
1733 .init = pnv_eeh_init,
1734 .post_init = pnv_eeh_post_init,
1735 .probe = pnv_eeh_probe,
1736 .set_option = pnv_eeh_set_option,
1737 .get_pe_addr = pnv_eeh_get_pe_addr,
1738 .get_state = pnv_eeh_get_state,
1739 .reset = pnv_eeh_reset,
1740 .wait_state = pnv_eeh_wait_state,
1741 .get_log = pnv_eeh_get_log,
1742 .configure_bridge = pnv_eeh_configure_bridge,
1743 .err_inject = pnv_eeh_err_inject,
1744 .read_config = pnv_eeh_read_config,
1745 .write_config = pnv_eeh_write_config,
1746 .next_error = pnv_eeh_next_error,
1747 .restore_config = pnv_eeh_restore_config
1750 void pcibios_bus_add_device(struct pci_dev *pdev)
1752 struct pci_dn *pdn = pci_get_pdn(pdev);
1754 if (!pdev->is_virtfn)
1758 * The following operations will fail if VF's sysfs files
1759 * aren't created or its resources aren't finalized.
1761 eeh_add_device_early(pdn);
1762 eeh_add_device_late(pdev);
1763 eeh_sysfs_add_device(pdev);
1766 #ifdef CONFIG_PCI_IOV
1767 static void pnv_pci_fixup_vf_mps(struct pci_dev *pdev)
1769 struct pci_dn *pdn = pci_get_pdn(pdev);
1772 if (!pdev->is_virtfn)
1775 /* Synchronize MPS for VF and PF */
1776 parent_mps = pcie_get_mps(pdev->physfn);
1777 if ((128 << pdev->pcie_mpss) >= parent_mps)
1778 pcie_set_mps(pdev, parent_mps);
1779 pdn->mps = pcie_get_mps(pdev);
1781 DECLARE_PCI_FIXUP_HEADER(PCI_ANY_ID, PCI_ANY_ID, pnv_pci_fixup_vf_mps);
1782 #endif /* CONFIG_PCI_IOV */
1785 * eeh_powernv_init - Register platform dependent EEH operations
1787 * EEH initialization on powernv platform. This function should be
1788 * called before any EEH related functions.
1790 static int __init eeh_powernv_init(void)
1794 ret = eeh_ops_register(&pnv_eeh_ops);
1796 pr_info("EEH: PowerNV platform initialized\n");
1798 pr_info("EEH: Failed to initialize PowerNV platform (%d)\n", ret);
1802 machine_early_initcall(powernv, eeh_powernv_init);