2 * PCI Error Recovery Driver for RPA-compliant PPC64 platform.
3 * Copyright IBM Corp. 2004 2005
4 * Copyright Linas Vepstas <linas@linas.org> 2004, 2005
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or (at
11 * your option) any later version.
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
16 * NON INFRINGEMENT. See the GNU General Public License for more
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
23 * Send comments and feedback to Linas Vepstas <linas@austin.ibm.com>
25 #include <linux/delay.h>
26 #include <linux/interrupt.h>
27 #include <linux/irq.h>
28 #include <linux/module.h>
29 #include <linux/pci.h>
31 #include <asm/eeh_event.h>
32 #include <asm/ppc-pci.h>
33 #include <asm/pci-bridge.h>
38 struct list_head edev_list;
43 * eeh_pcid_name - Retrieve name of PCI device driver
46 * This routine is used to retrieve the name of PCI device driver
49 static inline const char *eeh_pcid_name(struct pci_dev *pdev)
51 if (pdev && pdev->dev.driver)
52 return pdev->dev.driver->name;
57 * eeh_pcid_get - Get the PCI device driver
60 * The function is used to retrieve the PCI device driver for
61 * the indicated PCI device. Besides, we will increase the reference
62 * of the PCI device driver to prevent that being unloaded on
63 * the fly. Otherwise, kernel crash would be seen.
65 static inline struct pci_driver *eeh_pcid_get(struct pci_dev *pdev)
67 if (!pdev || !pdev->driver)
70 if (!try_module_get(pdev->driver->driver.owner))
77 * eeh_pcid_put - Dereference on the PCI device driver
80 * The function is called to do dereference on the PCI device
81 * driver of the indicated PCI device.
83 static inline void eeh_pcid_put(struct pci_dev *pdev)
85 if (!pdev || !pdev->driver)
88 module_put(pdev->driver->driver.owner);
92 * eeh_disable_irq - Disable interrupt for the recovering device
95 * This routine must be called when reporting temporary or permanent
96 * error to the particular PCI device to disable interrupt of that
97 * device. If the device has enabled MSI or MSI-X interrupt, we needn't
98 * do real work because EEH should freeze DMA transfers for those PCI
99 * devices encountering EEH errors, which includes MSI or MSI-X.
101 static void eeh_disable_irq(struct pci_dev *dev)
103 struct eeh_dev *edev = pci_dev_to_eeh_dev(dev);
105 /* Don't disable MSI and MSI-X interrupts. They are
106 * effectively disabled by the DMA Stopped state
107 * when an EEH error occurs.
109 if (dev->msi_enabled || dev->msix_enabled)
112 if (!irq_has_action(dev->irq))
115 edev->mode |= EEH_DEV_IRQ_DISABLED;
116 disable_irq_nosync(dev->irq);
120 * eeh_enable_irq - Enable interrupt for the recovering device
123 * This routine must be called to enable interrupt while failed
124 * device could be resumed.
126 static void eeh_enable_irq(struct pci_dev *dev)
128 struct eeh_dev *edev = pci_dev_to_eeh_dev(dev);
130 if ((edev->mode) & EEH_DEV_IRQ_DISABLED) {
131 edev->mode &= ~EEH_DEV_IRQ_DISABLED;
135 * This is just ass backwards. This maze has
136 * unbalanced irq_enable/disable calls. So instead of
137 * finding the root cause it works around the warning
138 * in the irq_enable code by conditionally calling
141 * That's just wrong.The warning in the core code is
142 * there to tell people to fix their asymmetries in
143 * their own code, not by abusing the core information
146 * I so wish that the assymetry would be the other way
147 * round and a few more irq_disable calls render that
148 * shit unusable forever.
152 if (irqd_irq_disabled(irq_get_irq_data(dev->irq)))
153 enable_irq(dev->irq);
157 static bool eeh_dev_removed(struct eeh_dev *edev)
159 /* EEH device removed ? */
160 if (!edev || (edev->mode & EEH_DEV_REMOVED))
166 static void *eeh_dev_save_state(void *data, void *userdata)
168 struct eeh_dev *edev = data;
169 struct pci_dev *pdev;
175 * We cannot access the config space on some adapters.
176 * Otherwise, it will cause fenced PHB. We don't save
177 * the content in their config space and will restore
178 * from the initial config space saved when the EEH
181 if (edev->pe && (edev->pe->state & EEH_PE_CFG_RESTRICTED))
184 pdev = eeh_dev_to_pci_dev(edev);
188 pci_save_state(pdev);
193 * eeh_report_error - Report pci error to each device driver
195 * @userdata: return value
197 * Report an EEH error to each device driver, collect up and
198 * merge the device driver responses. Cumulative response
199 * passed back in "userdata".
201 static void *eeh_report_error(void *data, void *userdata)
203 struct eeh_dev *edev = (struct eeh_dev *)data;
204 struct pci_dev *dev = eeh_dev_to_pci_dev(edev);
205 enum pci_ers_result rc, *res = userdata;
206 struct pci_driver *driver;
208 if (!dev || eeh_dev_removed(edev) || eeh_pe_passed(edev->pe))
211 device_lock(&dev->dev);
212 dev->error_state = pci_channel_io_frozen;
214 driver = eeh_pcid_get(dev);
215 if (!driver) goto out_no_dev;
217 eeh_disable_irq(dev);
219 if (!driver->err_handler ||
220 !driver->err_handler->error_detected)
223 rc = driver->err_handler->error_detected(dev, pci_channel_io_frozen);
225 /* A driver that needs a reset trumps all others */
226 if (rc == PCI_ERS_RESULT_NEED_RESET) *res = rc;
227 if (*res == PCI_ERS_RESULT_NONE) *res = rc;
229 edev->in_error = true;
230 pci_uevent_ers(dev, PCI_ERS_RESULT_NONE);
235 device_unlock(&dev->dev);
240 * eeh_report_mmio_enabled - Tell drivers that MMIO has been enabled
242 * @userdata: return value
244 * Tells each device driver that IO ports, MMIO and config space I/O
245 * are now enabled. Collects up and merges the device driver responses.
246 * Cumulative response passed back in "userdata".
248 static void *eeh_report_mmio_enabled(void *data, void *userdata)
250 struct eeh_dev *edev = (struct eeh_dev *)data;
251 struct pci_dev *dev = eeh_dev_to_pci_dev(edev);
252 enum pci_ers_result rc, *res = userdata;
253 struct pci_driver *driver;
255 if (!dev || eeh_dev_removed(edev) || eeh_pe_passed(edev->pe))
258 device_lock(&dev->dev);
259 driver = eeh_pcid_get(dev);
260 if (!driver) goto out_no_dev;
262 if (!driver->err_handler ||
263 !driver->err_handler->mmio_enabled ||
264 (edev->mode & EEH_DEV_NO_HANDLER))
267 rc = driver->err_handler->mmio_enabled(dev);
269 /* A driver that needs a reset trumps all others */
270 if (rc == PCI_ERS_RESULT_NEED_RESET) *res = rc;
271 if (*res == PCI_ERS_RESULT_NONE) *res = rc;
276 device_unlock(&dev->dev);
281 * eeh_report_reset - Tell device that slot has been reset
283 * @userdata: return value
285 * This routine must be called while EEH tries to reset particular
286 * PCI device so that the associated PCI device driver could take
287 * some actions, usually to save data the driver needs so that the
288 * driver can work again while the device is recovered.
290 static void *eeh_report_reset(void *data, void *userdata)
292 struct eeh_dev *edev = (struct eeh_dev *)data;
293 struct pci_dev *dev = eeh_dev_to_pci_dev(edev);
294 enum pci_ers_result rc, *res = userdata;
295 struct pci_driver *driver;
297 if (!dev || eeh_dev_removed(edev) || eeh_pe_passed(edev->pe))
300 device_lock(&dev->dev);
301 dev->error_state = pci_channel_io_normal;
303 driver = eeh_pcid_get(dev);
304 if (!driver) goto out_no_dev;
308 if (!driver->err_handler ||
309 !driver->err_handler->slot_reset ||
310 (edev->mode & EEH_DEV_NO_HANDLER) ||
314 rc = driver->err_handler->slot_reset(dev);
315 if ((*res == PCI_ERS_RESULT_NONE) ||
316 (*res == PCI_ERS_RESULT_RECOVERED)) *res = rc;
317 if (*res == PCI_ERS_RESULT_DISCONNECT &&
318 rc == PCI_ERS_RESULT_NEED_RESET) *res = rc;
323 device_unlock(&dev->dev);
327 static void *eeh_dev_restore_state(void *data, void *userdata)
329 struct eeh_dev *edev = data;
330 struct pci_dev *pdev;
336 * The content in the config space isn't saved because
337 * the blocked config space on some adapters. We have
338 * to restore the initial saved config space when the
339 * EEH device is created.
341 if (edev->pe && (edev->pe->state & EEH_PE_CFG_RESTRICTED)) {
342 if (list_is_last(&edev->list, &edev->pe->edevs))
343 eeh_pe_restore_bars(edev->pe);
348 pdev = eeh_dev_to_pci_dev(edev);
352 pci_restore_state(pdev);
357 * eeh_report_resume - Tell device to resume normal operations
359 * @userdata: return value
361 * This routine must be called to notify the device driver that it
362 * could resume so that the device driver can do some initialization
363 * to make the recovered device work again.
365 static void *eeh_report_resume(void *data, void *userdata)
367 struct eeh_dev *edev = (struct eeh_dev *)data;
368 struct pci_dev *dev = eeh_dev_to_pci_dev(edev);
370 struct pci_driver *driver;
372 if (!dev || eeh_dev_removed(edev) || eeh_pe_passed(edev->pe))
375 device_lock(&dev->dev);
376 dev->error_state = pci_channel_io_normal;
378 driver = eeh_pcid_get(dev);
379 if (!driver) goto out_no_dev;
381 was_in_error = edev->in_error;
382 edev->in_error = false;
385 if (!driver->err_handler ||
386 !driver->err_handler->resume ||
387 (edev->mode & EEH_DEV_NO_HANDLER) || !was_in_error) {
388 edev->mode &= ~EEH_DEV_NO_HANDLER;
392 driver->err_handler->resume(dev);
394 pci_uevent_ers(dev, PCI_ERS_RESULT_RECOVERED);
397 #ifdef CONFIG_PCI_IOV
398 if (eeh_ops->notify_resume && eeh_dev_to_pdn(edev))
399 eeh_ops->notify_resume(eeh_dev_to_pdn(edev));
402 device_unlock(&dev->dev);
407 * eeh_report_failure - Tell device driver that device is dead.
409 * @userdata: return value
411 * This informs the device driver that the device is permanently
412 * dead, and that no further recovery attempts will be made on it.
414 static void *eeh_report_failure(void *data, void *userdata)
416 struct eeh_dev *edev = (struct eeh_dev *)data;
417 struct pci_dev *dev = eeh_dev_to_pci_dev(edev);
418 struct pci_driver *driver;
420 if (!dev || eeh_dev_removed(edev) || eeh_pe_passed(edev->pe))
423 device_lock(&dev->dev);
424 dev->error_state = pci_channel_io_perm_failure;
426 driver = eeh_pcid_get(dev);
427 if (!driver) goto out_no_dev;
429 eeh_disable_irq(dev);
431 if (!driver->err_handler ||
432 !driver->err_handler->error_detected)
435 driver->err_handler->error_detected(dev, pci_channel_io_perm_failure);
437 pci_uevent_ers(dev, PCI_ERS_RESULT_DISCONNECT);
441 device_unlock(&dev->dev);
445 static void *eeh_add_virt_device(void *data, void *userdata)
447 struct pci_driver *driver;
448 struct eeh_dev *edev = (struct eeh_dev *)data;
449 struct pci_dev *dev = eeh_dev_to_pci_dev(edev);
450 struct pci_dn *pdn = eeh_dev_to_pdn(edev);
452 if (!(edev->physfn)) {
453 pr_warn("%s: EEH dev %04x:%02x:%02x.%01x not for VF\n",
454 __func__, pdn->phb->global_number, pdn->busno,
455 PCI_SLOT(pdn->devfn), PCI_FUNC(pdn->devfn));
459 driver = eeh_pcid_get(dev);
462 if (driver->err_handler)
466 #ifdef CONFIG_PCI_IOV
467 pci_iov_add_virtfn(edev->physfn, pdn->vf_index);
472 static void *eeh_rmv_device(void *data, void *userdata)
474 struct pci_driver *driver;
475 struct eeh_dev *edev = (struct eeh_dev *)data;
476 struct pci_dev *dev = eeh_dev_to_pci_dev(edev);
477 struct eeh_rmv_data *rmv_data = (struct eeh_rmv_data *)userdata;
478 int *removed = rmv_data ? &rmv_data->removed : NULL;
481 * Actually, we should remove the PCI bridges as well.
482 * However, that's lots of complexity to do that,
483 * particularly some of devices under the bridge might
484 * support EEH. So we just care about PCI devices for
487 if (!dev || (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE))
491 * We rely on count-based pcibios_release_device() to
492 * detach permanently offlined PEs. Unfortunately, that's
493 * not reliable enough. We might have the permanently
494 * offlined PEs attached, but we needn't take care of
495 * them and their child devices.
497 if (eeh_dev_removed(edev))
500 driver = eeh_pcid_get(dev);
504 eeh_pe_passed(edev->pe))
507 driver->err_handler &&
508 driver->err_handler->error_detected &&
509 driver->err_handler->slot_reset)
513 /* Remove it from PCI subsystem */
514 pr_debug("EEH: Removing %s without EEH sensitive driver\n",
516 edev->bus = dev->bus;
517 edev->mode |= EEH_DEV_DISCONNECTED;
522 #ifdef CONFIG_PCI_IOV
523 struct pci_dn *pdn = eeh_dev_to_pdn(edev);
525 pci_iov_remove_virtfn(edev->physfn, pdn->vf_index);
529 * We have to set the VF PE number to invalid one, which is
530 * required to plug the VF successfully.
532 pdn->pe_number = IODA_INVALID_PE;
535 list_add(&edev->rmv_list, &rmv_data->edev_list);
537 pci_lock_rescan_remove();
538 pci_stop_and_remove_bus_device(dev);
539 pci_unlock_rescan_remove();
545 static void *eeh_pe_detach_dev(void *data, void *userdata)
547 struct eeh_pe *pe = (struct eeh_pe *)data;
548 struct eeh_dev *edev, *tmp;
550 eeh_pe_for_each_dev(pe, edev, tmp) {
551 if (!(edev->mode & EEH_DEV_DISCONNECTED))
554 edev->mode &= ~(EEH_DEV_DISCONNECTED | EEH_DEV_IRQ_DISABLED);
555 eeh_rmv_from_parent_pe(edev);
562 * Explicitly clear PE's frozen state for PowerNV where
563 * we have frozen PE until BAR restore is completed. It's
564 * harmless to clear it for pSeries. To be consistent with
565 * PE reset (for 3 times), we try to clear the frozen state
566 * for 3 times as well.
568 static void *__eeh_clear_pe_frozen_state(void *data, void *flag)
570 struct eeh_pe *pe = (struct eeh_pe *)data;
571 bool clear_sw_state = *(bool *)flag;
574 for (i = 0; rc && i < 3; i++)
575 rc = eeh_unfreeze_pe(pe, clear_sw_state);
577 /* Stop immediately on any errors */
579 pr_warn("%s: Failure %d unfreezing PHB#%x-PE#%x\n",
580 __func__, rc, pe->phb->global_number, pe->addr);
587 static int eeh_clear_pe_frozen_state(struct eeh_pe *pe,
592 rc = eeh_pe_traverse(pe, __eeh_clear_pe_frozen_state, &clear_sw_state);
594 eeh_pe_state_clear(pe, EEH_PE_ISOLATED);
596 return rc ? -EIO : 0;
599 int eeh_pe_reset_and_recover(struct eeh_pe *pe)
603 /* Bail if the PE is being recovered */
604 if (pe->state & EEH_PE_RECOVERING)
607 /* Put the PE into recovery mode */
608 eeh_pe_state_mark(pe, EEH_PE_RECOVERING);
611 eeh_pe_dev_traverse(pe, eeh_dev_save_state, NULL);
614 ret = eeh_pe_reset_full(pe);
616 eeh_pe_state_clear(pe, EEH_PE_RECOVERING);
620 /* Unfreeze the PE */
621 ret = eeh_clear_pe_frozen_state(pe, true);
623 eeh_pe_state_clear(pe, EEH_PE_RECOVERING);
627 /* Restore device state */
628 eeh_pe_dev_traverse(pe, eeh_dev_restore_state, NULL);
630 /* Clear recovery mode */
631 eeh_pe_state_clear(pe, EEH_PE_RECOVERING);
637 * eeh_reset_device - Perform actual reset of a pci slot
638 * @driver_eeh_aware: Does the device's driver provide EEH support?
640 * @bus: PCI bus corresponding to the isolcated slot
641 * @rmv_data: Optional, list to record removed devices
643 * This routine must be called to do reset on the indicated PE.
644 * During the reset, udev might be invoked because those affected
645 * PCI devices will be removed and then added.
647 static int eeh_reset_device(struct eeh_pe *pe, struct pci_bus *bus,
648 struct eeh_rmv_data *rmv_data,
649 bool driver_eeh_aware)
653 struct eeh_dev *edev;
655 /* pcibios will clear the counter; save the value */
656 cnt = pe->freeze_count;
660 * We don't remove the corresponding PE instances because
661 * we need the information afterwords. The attached EEH
662 * devices are expected to be attached soon when calling
663 * into pci_hp_add_devices().
665 eeh_pe_state_mark(pe, EEH_PE_KEEP);
666 if (driver_eeh_aware || (pe->type & EEH_PE_VF)) {
667 eeh_pe_dev_traverse(pe, eeh_rmv_device, rmv_data);
669 pci_lock_rescan_remove();
670 pci_hp_remove_devices(bus);
671 pci_unlock_rescan_remove();
675 * Reset the pci controller. (Asserts RST#; resets config space).
676 * Reconfigure bridges and devices. Don't try to bring the system
677 * up if the reset failed for some reason.
679 * During the reset, it's very dangerous to have uncontrolled PCI
680 * config accesses. So we prefer to block them. However, controlled
681 * PCI config accesses initiated from EEH itself are allowed.
683 rc = eeh_pe_reset_full(pe);
687 pci_lock_rescan_remove();
690 eeh_ops->configure_bridge(pe);
691 eeh_pe_restore_bars(pe);
693 /* Clear frozen state */
694 rc = eeh_clear_pe_frozen_state(pe, false);
696 pci_unlock_rescan_remove();
700 /* Give the system 5 seconds to finish running the user-space
701 * hotplug shutdown scripts, e.g. ifdown for ethernet. Yes,
702 * this is a hack, but if we don't do this, and try to bring
703 * the device up before the scripts have taken it down,
704 * potentially weird things happen.
706 if (!driver_eeh_aware || rmv_data->removed) {
707 pr_info("EEH: Sleep 5s ahead of %s hotplug\n",
708 (driver_eeh_aware ? "partial" : "complete"));
712 * The EEH device is still connected with its parent
713 * PE. We should disconnect it so the binding can be
714 * rebuilt when adding PCI devices.
716 edev = list_first_entry(&pe->edevs, struct eeh_dev, list);
717 eeh_pe_traverse(pe, eeh_pe_detach_dev, NULL);
718 if (pe->type & EEH_PE_VF) {
719 eeh_add_virt_device(edev, NULL);
721 if (!driver_eeh_aware)
722 eeh_pe_state_clear(pe, EEH_PE_PRI_BUS);
723 pci_hp_add_devices(bus);
726 eeh_pe_state_clear(pe, EEH_PE_KEEP);
729 pe->freeze_count = cnt;
731 pci_unlock_rescan_remove();
735 /* The longest amount of time to wait for a pci device
736 * to come back on line, in seconds.
738 #define MAX_WAIT_FOR_RECOVERY 300
741 * eeh_handle_normal_event - Handle EEH events on a specific PE
742 * @pe: EEH PE - which should not be used after we return, as it may
743 * have been invalidated.
745 * Attempts to recover the given PE. If recovery fails or the PE has failed
746 * too many times, remove the PE.
748 * While PHB detects address or data parity errors on particular PCI
749 * slot, the associated PE will be frozen. Besides, DMA's occurring
750 * to wild addresses (which usually happen due to bugs in device
751 * drivers or in PCI adapter firmware) can cause EEH error. #SERR,
752 * #PERR or other misc PCI-related errors also can trigger EEH errors.
754 * Recovery process consists of unplugging the device driver (which
755 * generated hotplug events to userspace), then issuing a PCI #RST to
756 * the device, then reconfiguring the PCI config space for all bridges
757 * & devices under this slot, and then finally restarting the device
758 * drivers (which cause a second set of hotplug events to go out to
761 void eeh_handle_normal_event(struct eeh_pe *pe)
764 struct eeh_dev *edev, *tmp;
766 enum pci_ers_result result = PCI_ERS_RESULT_NONE;
767 struct eeh_rmv_data rmv_data = {LIST_HEAD_INIT(rmv_data.edev_list), 0};
769 bus = eeh_pe_bus_get(pe);
771 pr_err("%s: Cannot find PCI bus for PHB#%x-PE#%x\n",
772 __func__, pe->phb->global_number, pe->addr);
776 eeh_pe_state_mark(pe, EEH_PE_RECOVERING);
778 eeh_pe_update_time_stamp(pe);
780 if (pe->freeze_count > eeh_max_freezes) {
781 pr_err("EEH: PHB#%x-PE#%x has failed %d times in the\n"
782 "last hour and has been permanently disabled.\n",
783 pe->phb->global_number, pe->addr,
787 pr_warn("EEH: This PCI device has failed %d times in the last hour\n",
790 /* Walk the various device drivers attached to this slot through
791 * a reset sequence, giving each an opportunity to do what it needs
792 * to accomplish the reset. Each child gets a report of the
793 * status ... if any child can't handle the reset, then the entire
794 * slot is dlpar removed and added.
796 * When the PHB is fenced, we have to issue a reset to recover from
797 * the error. Override the result if necessary to have partially
798 * hotplug for this case.
800 pr_info("EEH: Notify device drivers to shutdown\n");
801 eeh_pe_dev_traverse(pe, eeh_report_error, &result);
802 if ((pe->type & EEH_PE_PHB) &&
803 result != PCI_ERS_RESULT_NONE &&
804 result != PCI_ERS_RESULT_NEED_RESET)
805 result = PCI_ERS_RESULT_NEED_RESET;
807 /* Get the current PCI slot state. This can take a long time,
808 * sometimes over 300 seconds for certain systems.
810 rc = eeh_ops->wait_state(pe, MAX_WAIT_FOR_RECOVERY*1000);
811 if (rc < 0 || rc == EEH_STATE_NOT_SUPPORT) {
812 pr_warn("EEH: Permanent failure\n");
816 /* Since rtas may enable MMIO when posting the error log,
817 * don't post the error log until after all dev drivers
818 * have been informed.
820 pr_info("EEH: Collect temporary log\n");
821 eeh_slot_error_detail(pe, EEH_LOG_TEMP);
823 /* If all device drivers were EEH-unaware, then shut
824 * down all of the device drivers, and hope they
825 * go down willingly, without panicing the system.
827 if (result == PCI_ERS_RESULT_NONE) {
828 pr_info("EEH: Reset with hotplug activity\n");
829 rc = eeh_reset_device(pe, bus, NULL, false);
831 pr_warn("%s: Unable to reset, err=%d\n",
837 /* If all devices reported they can proceed, then re-enable MMIO */
838 if (result == PCI_ERS_RESULT_CAN_RECOVER) {
839 pr_info("EEH: Enable I/O for affected devices\n");
840 rc = eeh_pci_enable(pe, EEH_OPT_THAW_MMIO);
845 result = PCI_ERS_RESULT_NEED_RESET;
847 pr_info("EEH: Notify device drivers to resume I/O\n");
848 eeh_pe_dev_traverse(pe, eeh_report_mmio_enabled, &result);
852 /* If all devices reported they can proceed, then re-enable DMA */
853 if (result == PCI_ERS_RESULT_CAN_RECOVER) {
854 pr_info("EEH: Enabled DMA for affected devices\n");
855 rc = eeh_pci_enable(pe, EEH_OPT_THAW_DMA);
860 result = PCI_ERS_RESULT_NEED_RESET;
863 * We didn't do PE reset for the case. The PE
864 * is still in frozen state. Clear it before
867 eeh_pe_state_clear(pe, EEH_PE_ISOLATED);
868 result = PCI_ERS_RESULT_RECOVERED;
872 /* If any device has a hard failure, then shut off everything. */
873 if (result == PCI_ERS_RESULT_DISCONNECT) {
874 pr_warn("EEH: Device driver gave up\n");
878 /* If any device called out for a reset, then reset the slot */
879 if (result == PCI_ERS_RESULT_NEED_RESET) {
880 pr_info("EEH: Reset without hotplug activity\n");
881 rc = eeh_reset_device(pe, bus, &rmv_data, true);
883 pr_warn("%s: Cannot reset, err=%d\n",
888 pr_info("EEH: Notify device drivers "
889 "the completion of reset\n");
890 result = PCI_ERS_RESULT_NONE;
891 eeh_pe_dev_traverse(pe, eeh_report_reset, &result);
894 /* All devices should claim they have recovered by now. */
895 if ((result != PCI_ERS_RESULT_RECOVERED) &&
896 (result != PCI_ERS_RESULT_NONE)) {
897 pr_warn("EEH: Not recovered\n");
902 * For those hot removed VFs, we should add back them after PF get
903 * recovered properly.
905 list_for_each_entry_safe(edev, tmp, &rmv_data.edev_list, rmv_list) {
906 eeh_add_virt_device(edev, NULL);
907 list_del(&edev->rmv_list);
910 /* Tell all device drivers that they can resume operations */
911 pr_info("EEH: Notify device driver to resume\n");
912 eeh_pe_dev_traverse(pe, eeh_report_resume, NULL);
918 * About 90% of all real-life EEH failures in the field
919 * are due to poorly seated PCI cards. Only 10% or so are
920 * due to actual, failed cards.
922 pr_err("EEH: Unable to recover from failure from PHB#%x-PE#%x.\n"
923 "Please try reseating or replacing it\n",
924 pe->phb->global_number, pe->addr);
926 eeh_slot_error_detail(pe, EEH_LOG_PERM);
928 /* Notify all devices that they're about to go down. */
929 eeh_pe_dev_traverse(pe, eeh_report_failure, NULL);
931 /* Mark the PE to be removed permanently */
932 eeh_pe_state_mark(pe, EEH_PE_REMOVED);
935 * Shut down the device drivers for good. We mark
936 * all removed devices correctly to avoid access
937 * the their PCI config any more.
939 if (pe->type & EEH_PE_VF) {
940 eeh_pe_dev_traverse(pe, eeh_rmv_device, NULL);
941 eeh_pe_dev_mode_mark(pe, EEH_DEV_REMOVED);
943 eeh_pe_state_clear(pe, EEH_PE_PRI_BUS);
944 eeh_pe_dev_mode_mark(pe, EEH_DEV_REMOVED);
946 pci_lock_rescan_remove();
947 pci_hp_remove_devices(bus);
948 pci_unlock_rescan_remove();
949 /* The passed PE should no longer be used */
953 eeh_pe_state_clear(pe, EEH_PE_RECOVERING);
957 * eeh_handle_special_event - Handle EEH events without a specific failing PE
959 * Called when an EEH event is detected but can't be narrowed down to a
960 * specific PE. Iterates through possible failures and handles them as
963 void eeh_handle_special_event(void)
965 struct eeh_pe *pe, *phb_pe;
967 struct pci_controller *hose;
973 rc = eeh_ops->next_error(&pe);
976 case EEH_NEXT_ERR_DEAD_IOC:
977 /* Mark all PHBs in dead state */
978 eeh_serialize_lock(&flags);
980 /* Purge all events */
981 eeh_remove_event(NULL, true);
983 list_for_each_entry(hose, &hose_list, list_node) {
984 phb_pe = eeh_phb_pe_get(hose);
985 if (!phb_pe) continue;
987 eeh_pe_state_mark(phb_pe, EEH_PE_ISOLATED);
990 eeh_serialize_unlock(flags);
993 case EEH_NEXT_ERR_FROZEN_PE:
994 case EEH_NEXT_ERR_FENCED_PHB:
995 case EEH_NEXT_ERR_DEAD_PHB:
996 /* Mark the PE in fenced state */
997 eeh_serialize_lock(&flags);
999 /* Purge all events of the PHB */
1000 eeh_remove_event(pe, true);
1002 if (rc == EEH_NEXT_ERR_DEAD_PHB)
1003 eeh_pe_state_mark(pe, EEH_PE_ISOLATED);
1005 eeh_pe_state_mark(pe,
1006 EEH_PE_ISOLATED | EEH_PE_RECOVERING);
1008 eeh_serialize_unlock(flags);
1011 case EEH_NEXT_ERR_NONE:
1014 pr_warn("%s: Invalid value %d from next_error()\n",
1020 * For fenced PHB and frozen PE, it's handled as normal
1021 * event. We have to remove the affected PHBs for dead
1024 if (rc == EEH_NEXT_ERR_FROZEN_PE ||
1025 rc == EEH_NEXT_ERR_FENCED_PHB) {
1026 eeh_handle_normal_event(pe);
1028 pci_lock_rescan_remove();
1029 list_for_each_entry(hose, &hose_list, list_node) {
1030 phb_pe = eeh_phb_pe_get(hose);
1032 !(phb_pe->state & EEH_PE_ISOLATED) ||
1033 (phb_pe->state & EEH_PE_RECOVERING))
1036 /* Notify all devices to be down */
1037 eeh_pe_state_clear(pe, EEH_PE_PRI_BUS);
1038 eeh_pe_dev_traverse(pe,
1039 eeh_report_failure, NULL);
1040 bus = eeh_pe_bus_get(phb_pe);
1042 pr_err("%s: Cannot find PCI bus for "
1045 pe->phb->global_number,
1049 pci_hp_remove_devices(bus);
1051 pci_unlock_rescan_remove();
1055 * If we have detected dead IOC, we needn't proceed
1056 * any more since all PHBs would have been removed
1058 if (rc == EEH_NEXT_ERR_DEAD_IOC)
1060 } while (rc != EEH_NEXT_ERR_NONE);