830526e72cce270fb6218db50171bc75835cc3b6
[linux-2.6-microblaze.git] / arch / powerpc / platforms / powernv / eeh-powernv.c
1 /*
2  * The file intends to implement the platform dependent EEH operations on
3  * powernv platform. Actually, the powernv was created in order to fully
4  * hypervisor support.
5  *
6  * Copyright Benjamin Herrenschmidt & Gavin Shan, IBM Corporation 2013.
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License as published by
10  * the Free Software Foundation; either version 2 of the License, or
11  * (at your option) any later version.
12  */
13
14 #include <linux/atomic.h>
15 #include <linux/debugfs.h>
16 #include <linux/delay.h>
17 #include <linux/export.h>
18 #include <linux/init.h>
19 #include <linux/interrupt.h>
20 #include <linux/list.h>
21 #include <linux/msi.h>
22 #include <linux/of.h>
23 #include <linux/pci.h>
24 #include <linux/proc_fs.h>
25 #include <linux/rbtree.h>
26 #include <linux/sched.h>
27 #include <linux/seq_file.h>
28 #include <linux/spinlock.h>
29
30 #include <asm/eeh.h>
31 #include <asm/eeh_event.h>
32 #include <asm/firmware.h>
33 #include <asm/io.h>
34 #include <asm/iommu.h>
35 #include <asm/machdep.h>
36 #include <asm/msi_bitmap.h>
37 #include <asm/opal.h>
38 #include <asm/ppc-pci.h>
39
40 #include "powernv.h"
41 #include "pci.h"
42
43 static bool pnv_eeh_nb_init = false;
44 static int eeh_event_irq = -EINVAL;
45
46 static int pnv_eeh_init(void)
47 {
48         struct pci_controller *hose;
49         struct pnv_phb *phb;
50
51         if (!firmware_has_feature(FW_FEATURE_OPAL)) {
52                 pr_warn("%s: OPAL is required !\n",
53                         __func__);
54                 return -EINVAL;
55         }
56
57         /* Set probe mode */
58         eeh_add_flag(EEH_PROBE_MODE_DEV);
59
60         /*
61          * P7IOC blocks PCI config access to frozen PE, but PHB3
62          * doesn't do that. So we have to selectively enable I/O
63          * prior to collecting error log.
64          */
65         list_for_each_entry(hose, &hose_list, list_node) {
66                 phb = hose->private_data;
67
68                 if (phb->model == PNV_PHB_MODEL_P7IOC)
69                         eeh_add_flag(EEH_ENABLE_IO_FOR_LOG);
70
71                 /*
72                  * PE#0 should be regarded as valid by EEH core
73                  * if it's not the reserved one. Currently, we
74                  * have the reserved PE#255 and PE#127 for PHB3
75                  * and P7IOC separately. So we should regard
76                  * PE#0 as valid for PHB3 and P7IOC.
77                  */
78                 if (phb->ioda.reserved_pe != 0)
79                         eeh_add_flag(EEH_VALID_PE_ZERO);
80
81                 break;
82         }
83
84         return 0;
85 }
86
87 static irqreturn_t pnv_eeh_event(int irq, void *data)
88 {
89         /*
90          * We simply send a special EEH event if EEH has been
91          * enabled. We don't care about EEH events until we've
92          * finished processing the outstanding ones. Event processing
93          * gets unmasked in next_error() if EEH is enabled.
94          */
95         disable_irq_nosync(irq);
96
97         if (eeh_enabled())
98                 eeh_send_failure_event(NULL);
99
100         return IRQ_HANDLED;
101 }
102
103 #ifdef CONFIG_DEBUG_FS
104 static ssize_t pnv_eeh_ei_write(struct file *filp,
105                                 const char __user *user_buf,
106                                 size_t count, loff_t *ppos)
107 {
108         struct pci_controller *hose = filp->private_data;
109         struct eeh_dev *edev;
110         struct eeh_pe *pe;
111         int pe_no, type, func;
112         unsigned long addr, mask;
113         char buf[50];
114         int ret;
115
116         if (!eeh_ops || !eeh_ops->err_inject)
117                 return -ENXIO;
118
119         /* Copy over argument buffer */
120         ret = simple_write_to_buffer(buf, sizeof(buf), ppos, user_buf, count);
121         if (!ret)
122                 return -EFAULT;
123
124         /* Retrieve parameters */
125         ret = sscanf(buf, "%x:%x:%x:%lx:%lx",
126                      &pe_no, &type, &func, &addr, &mask);
127         if (ret != 5)
128                 return -EINVAL;
129
130         /* Retrieve PE */
131         edev = kzalloc(sizeof(*edev), GFP_KERNEL);
132         if (!edev)
133                 return -ENOMEM;
134         edev->phb = hose;
135         edev->pe_config_addr = pe_no;
136         pe = eeh_pe_get(edev);
137         kfree(edev);
138         if (!pe)
139                 return -ENODEV;
140
141         /* Do error injection */
142         ret = eeh_ops->err_inject(pe, type, func, addr, mask);
143         return ret < 0 ? ret : count;
144 }
145
146 static const struct file_operations pnv_eeh_ei_fops = {
147         .open   = simple_open,
148         .llseek = no_llseek,
149         .write  = pnv_eeh_ei_write,
150 };
151
152 static int pnv_eeh_dbgfs_set(void *data, int offset, u64 val)
153 {
154         struct pci_controller *hose = data;
155         struct pnv_phb *phb = hose->private_data;
156
157         out_be64(phb->regs + offset, val);
158         return 0;
159 }
160
161 static int pnv_eeh_dbgfs_get(void *data, int offset, u64 *val)
162 {
163         struct pci_controller *hose = data;
164         struct pnv_phb *phb = hose->private_data;
165
166         *val = in_be64(phb->regs + offset);
167         return 0;
168 }
169
170 #define PNV_EEH_DBGFS_ENTRY(name, reg)                          \
171 static int pnv_eeh_dbgfs_set_##name(void *data, u64 val)        \
172 {                                                               \
173         return pnv_eeh_dbgfs_set(data, reg, val);               \
174 }                                                               \
175                                                                 \
176 static int pnv_eeh_dbgfs_get_##name(void *data, u64 *val)       \
177 {                                                               \
178         return pnv_eeh_dbgfs_get(data, reg, val);               \
179 }                                                               \
180                                                                 \
181 DEFINE_SIMPLE_ATTRIBUTE(pnv_eeh_dbgfs_ops_##name,               \
182                         pnv_eeh_dbgfs_get_##name,               \
183                         pnv_eeh_dbgfs_set_##name,               \
184                         "0x%llx\n")
185
186 PNV_EEH_DBGFS_ENTRY(outb, 0xD10);
187 PNV_EEH_DBGFS_ENTRY(inbA, 0xD90);
188 PNV_EEH_DBGFS_ENTRY(inbB, 0xE10);
189
190 #endif /* CONFIG_DEBUG_FS */
191
192 /**
193  * pnv_eeh_post_init - EEH platform dependent post initialization
194  *
195  * EEH platform dependent post initialization on powernv. When
196  * the function is called, the EEH PEs and devices should have
197  * been built. If the I/O cache staff has been built, EEH is
198  * ready to supply service.
199  */
200 static int pnv_eeh_post_init(void)
201 {
202         struct pci_controller *hose;
203         struct pnv_phb *phb;
204         int ret = 0;
205
206         /* Register OPAL event notifier */
207         if (!pnv_eeh_nb_init) {
208                 eeh_event_irq = opal_event_request(ilog2(OPAL_EVENT_PCI_ERROR));
209                 if (eeh_event_irq < 0) {
210                         pr_err("%s: Can't register OPAL event interrupt (%d)\n",
211                                __func__, eeh_event_irq);
212                         return eeh_event_irq;
213                 }
214
215                 ret = request_irq(eeh_event_irq, pnv_eeh_event,
216                                 IRQ_TYPE_LEVEL_HIGH, "opal-eeh", NULL);
217                 if (ret < 0) {
218                         irq_dispose_mapping(eeh_event_irq);
219                         pr_err("%s: Can't request OPAL event interrupt (%d)\n",
220                                __func__, eeh_event_irq);
221                         return ret;
222                 }
223
224                 pnv_eeh_nb_init = true;
225         }
226
227         if (!eeh_enabled())
228                 disable_irq(eeh_event_irq);
229
230         list_for_each_entry(hose, &hose_list, list_node) {
231                 phb = hose->private_data;
232
233                 /*
234                  * If EEH is enabled, we're going to rely on that.
235                  * Otherwise, we restore to conventional mechanism
236                  * to clear frozen PE during PCI config access.
237                  */
238                 if (eeh_enabled())
239                         phb->flags |= PNV_PHB_FLAG_EEH;
240                 else
241                         phb->flags &= ~PNV_PHB_FLAG_EEH;
242
243                 /* Create debugfs entries */
244 #ifdef CONFIG_DEBUG_FS
245                 if (phb->has_dbgfs || !phb->dbgfs)
246                         continue;
247
248                 phb->has_dbgfs = 1;
249                 debugfs_create_file("err_injct", 0200,
250                                     phb->dbgfs, hose,
251                                     &pnv_eeh_ei_fops);
252
253                 debugfs_create_file("err_injct_outbound", 0600,
254                                     phb->dbgfs, hose,
255                                     &pnv_eeh_dbgfs_ops_outb);
256                 debugfs_create_file("err_injct_inboundA", 0600,
257                                     phb->dbgfs, hose,
258                                     &pnv_eeh_dbgfs_ops_inbA);
259                 debugfs_create_file("err_injct_inboundB", 0600,
260                                     phb->dbgfs, hose,
261                                     &pnv_eeh_dbgfs_ops_inbB);
262 #endif /* CONFIG_DEBUG_FS */
263         }
264
265         return ret;
266 }
267
268 static int pnv_eeh_find_cap(struct pci_dn *pdn, int cap)
269 {
270         int pos = PCI_CAPABILITY_LIST;
271         int cnt = 48;   /* Maximal number of capabilities */
272         u32 status, id;
273
274         if (!pdn)
275                 return 0;
276
277         /* Check if the device supports capabilities */
278         pnv_pci_cfg_read(pdn, PCI_STATUS, 2, &status);
279         if (!(status & PCI_STATUS_CAP_LIST))
280                 return 0;
281
282         while (cnt--) {
283                 pnv_pci_cfg_read(pdn, pos, 1, &pos);
284                 if (pos < 0x40)
285                         break;
286
287                 pos &= ~3;
288                 pnv_pci_cfg_read(pdn, pos + PCI_CAP_LIST_ID, 1, &id);
289                 if (id == 0xff)
290                         break;
291
292                 /* Found */
293                 if (id == cap)
294                         return pos;
295
296                 /* Next one */
297                 pos += PCI_CAP_LIST_NEXT;
298         }
299
300         return 0;
301 }
302
303 static int pnv_eeh_find_ecap(struct pci_dn *pdn, int cap)
304 {
305         struct eeh_dev *edev = pdn_to_eeh_dev(pdn);
306         u32 header;
307         int pos = 256, ttl = (4096 - 256) / 8;
308
309         if (!edev || !edev->pcie_cap)
310                 return 0;
311         if (pnv_pci_cfg_read(pdn, pos, 4, &header) != PCIBIOS_SUCCESSFUL)
312                 return 0;
313         else if (!header)
314                 return 0;
315
316         while (ttl-- > 0) {
317                 if (PCI_EXT_CAP_ID(header) == cap && pos)
318                         return pos;
319
320                 pos = PCI_EXT_CAP_NEXT(header);
321                 if (pos < 256)
322                         break;
323
324                 if (pnv_pci_cfg_read(pdn, pos, 4, &header) != PCIBIOS_SUCCESSFUL)
325                         break;
326         }
327
328         return 0;
329 }
330
331 /**
332  * pnv_eeh_probe - Do probe on PCI device
333  * @pdn: PCI device node
334  * @data: unused
335  *
336  * When EEH module is installed during system boot, all PCI devices
337  * are checked one by one to see if it supports EEH. The function
338  * is introduced for the purpose. By default, EEH has been enabled
339  * on all PCI devices. That's to say, we only need do necessary
340  * initialization on the corresponding eeh device and create PE
341  * accordingly.
342  *
343  * It's notable that's unsafe to retrieve the EEH device through
344  * the corresponding PCI device. During the PCI device hotplug, which
345  * was possiblly triggered by EEH core, the binding between EEH device
346  * and the PCI device isn't built yet.
347  */
348 static void *pnv_eeh_probe(struct pci_dn *pdn, void *data)
349 {
350         struct pci_controller *hose = pdn->phb;
351         struct pnv_phb *phb = hose->private_data;
352         struct eeh_dev *edev = pdn_to_eeh_dev(pdn);
353         uint32_t pcie_flags;
354         int ret;
355
356         /*
357          * When probing the root bridge, which doesn't have any
358          * subordinate PCI devices. We don't have OF node for
359          * the root bridge. So it's not reasonable to continue
360          * the probing.
361          */
362         if (!edev || edev->pe)
363                 return NULL;
364
365         /* Skip for PCI-ISA bridge */
366         if ((pdn->class_code >> 8) == PCI_CLASS_BRIDGE_ISA)
367                 return NULL;
368
369         /* Initialize eeh device */
370         edev->class_code = pdn->class_code;
371         edev->mode      &= 0xFFFFFF00;
372         edev->pcix_cap = pnv_eeh_find_cap(pdn, PCI_CAP_ID_PCIX);
373         edev->pcie_cap = pnv_eeh_find_cap(pdn, PCI_CAP_ID_EXP);
374         edev->aer_cap  = pnv_eeh_find_ecap(pdn, PCI_EXT_CAP_ID_ERR);
375         if ((edev->class_code >> 8) == PCI_CLASS_BRIDGE_PCI) {
376                 edev->mode |= EEH_DEV_BRIDGE;
377                 if (edev->pcie_cap) {
378                         pnv_pci_cfg_read(pdn, edev->pcie_cap + PCI_EXP_FLAGS,
379                                          2, &pcie_flags);
380                         pcie_flags = (pcie_flags & PCI_EXP_FLAGS_TYPE) >> 4;
381                         if (pcie_flags == PCI_EXP_TYPE_ROOT_PORT)
382                                 edev->mode |= EEH_DEV_ROOT_PORT;
383                         else if (pcie_flags == PCI_EXP_TYPE_DOWNSTREAM)
384                                 edev->mode |= EEH_DEV_DS_PORT;
385                 }
386         }
387
388         edev->config_addr    = (pdn->busno << 8) | (pdn->devfn);
389         edev->pe_config_addr = phb->ioda.pe_rmap[edev->config_addr];
390
391         /* Create PE */
392         ret = eeh_add_to_parent_pe(edev);
393         if (ret) {
394                 pr_warn("%s: Can't add PCI dev %04x:%02x:%02x.%01x to parent PE (%d)\n",
395                         __func__, hose->global_number, pdn->busno,
396                         PCI_SLOT(pdn->devfn), PCI_FUNC(pdn->devfn), ret);
397                 return NULL;
398         }
399
400         /*
401          * If the PE contains any one of following adapters, the
402          * PCI config space can't be accessed when dumping EEH log.
403          * Otherwise, we will run into fenced PHB caused by shortage
404          * of outbound credits in the adapter. The PCI config access
405          * should be blocked until PE reset. MMIO access is dropped
406          * by hardware certainly. In order to drop PCI config requests,
407          * one more flag (EEH_PE_CFG_RESTRICTED) is introduced, which
408          * will be checked in the backend for PE state retrival. If
409          * the PE becomes frozen for the first time and the flag has
410          * been set for the PE, we will set EEH_PE_CFG_BLOCKED for
411          * that PE to block its config space.
412          *
413          * Broadcom Austin 4-ports NICs (14e4:1657)
414          * Broadcom Shiner 4-ports 1G NICs (14e4:168a)
415          * Broadcom Shiner 2-ports 10G NICs (14e4:168e)
416          */
417         if ((pdn->vendor_id == PCI_VENDOR_ID_BROADCOM &&
418              pdn->device_id == 0x1657) ||
419             (pdn->vendor_id == PCI_VENDOR_ID_BROADCOM &&
420              pdn->device_id == 0x168a) ||
421             (pdn->vendor_id == PCI_VENDOR_ID_BROADCOM &&
422              pdn->device_id == 0x168e))
423                 edev->pe->state |= EEH_PE_CFG_RESTRICTED;
424
425         /*
426          * Cache the PE primary bus, which can't be fetched when
427          * full hotplug is in progress. In that case, all child
428          * PCI devices of the PE are expected to be removed prior
429          * to PE reset.
430          */
431         if (!(edev->pe->state & EEH_PE_PRI_BUS)) {
432                 edev->pe->bus = pci_find_bus(hose->global_number,
433                                              pdn->busno);
434                 if (edev->pe->bus)
435                         edev->pe->state |= EEH_PE_PRI_BUS;
436         }
437
438         /*
439          * Enable EEH explicitly so that we will do EEH check
440          * while accessing I/O stuff
441          */
442         eeh_add_flag(EEH_ENABLED);
443
444         /* Save memory bars */
445         eeh_save_bars(edev);
446
447         return NULL;
448 }
449
450 /**
451  * pnv_eeh_set_option - Initialize EEH or MMIO/DMA reenable
452  * @pe: EEH PE
453  * @option: operation to be issued
454  *
455  * The function is used to control the EEH functionality globally.
456  * Currently, following options are support according to PAPR:
457  * Enable EEH, Disable EEH, Enable MMIO and Enable DMA
458  */
459 static int pnv_eeh_set_option(struct eeh_pe *pe, int option)
460 {
461         struct pci_controller *hose = pe->phb;
462         struct pnv_phb *phb = hose->private_data;
463         bool freeze_pe = false;
464         int opt;
465         s64 rc;
466
467         switch (option) {
468         case EEH_OPT_DISABLE:
469                 return -EPERM;
470         case EEH_OPT_ENABLE:
471                 return 0;
472         case EEH_OPT_THAW_MMIO:
473                 opt = OPAL_EEH_ACTION_CLEAR_FREEZE_MMIO;
474                 break;
475         case EEH_OPT_THAW_DMA:
476                 opt = OPAL_EEH_ACTION_CLEAR_FREEZE_DMA;
477                 break;
478         case EEH_OPT_FREEZE_PE:
479                 freeze_pe = true;
480                 opt = OPAL_EEH_ACTION_SET_FREEZE_ALL;
481                 break;
482         default:
483                 pr_warn("%s: Invalid option %d\n", __func__, option);
484                 return -EINVAL;
485         }
486
487         /* Freeze master and slave PEs if PHB supports compound PEs */
488         if (freeze_pe) {
489                 if (phb->freeze_pe) {
490                         phb->freeze_pe(phb, pe->addr);
491                         return 0;
492                 }
493
494                 rc = opal_pci_eeh_freeze_set(phb->opal_id, pe->addr, opt);
495                 if (rc != OPAL_SUCCESS) {
496                         pr_warn("%s: Failure %lld freezing PHB#%x-PE#%x\n",
497                                 __func__, rc, phb->hose->global_number,
498                                 pe->addr);
499                         return -EIO;
500                 }
501
502                 return 0;
503         }
504
505         /* Unfreeze master and slave PEs if PHB supports */
506         if (phb->unfreeze_pe)
507                 return phb->unfreeze_pe(phb, pe->addr, opt);
508
509         rc = opal_pci_eeh_freeze_clear(phb->opal_id, pe->addr, opt);
510         if (rc != OPAL_SUCCESS) {
511                 pr_warn("%s: Failure %lld enable %d for PHB#%x-PE#%x\n",
512                         __func__, rc, option, phb->hose->global_number,
513                         pe->addr);
514                 return -EIO;
515         }
516
517         return 0;
518 }
519
520 /**
521  * pnv_eeh_get_pe_addr - Retrieve PE address
522  * @pe: EEH PE
523  *
524  * Retrieve the PE address according to the given tranditional
525  * PCI BDF (Bus/Device/Function) address.
526  */
527 static int pnv_eeh_get_pe_addr(struct eeh_pe *pe)
528 {
529         return pe->addr;
530 }
531
532 static void pnv_eeh_get_phb_diag(struct eeh_pe *pe)
533 {
534         struct pnv_phb *phb = pe->phb->private_data;
535         s64 rc;
536
537         rc = opal_pci_get_phb_diag_data2(phb->opal_id, pe->data,
538                                          PNV_PCI_DIAG_BUF_SIZE);
539         if (rc != OPAL_SUCCESS)
540                 pr_warn("%s: Failure %lld getting PHB#%x diag-data\n",
541                         __func__, rc, pe->phb->global_number);
542 }
543
544 static int pnv_eeh_get_phb_state(struct eeh_pe *pe)
545 {
546         struct pnv_phb *phb = pe->phb->private_data;
547         u8 fstate;
548         __be16 pcierr;
549         s64 rc;
550         int result = 0;
551
552         rc = opal_pci_eeh_freeze_status(phb->opal_id,
553                                         pe->addr,
554                                         &fstate,
555                                         &pcierr,
556                                         NULL);
557         if (rc != OPAL_SUCCESS) {
558                 pr_warn("%s: Failure %lld getting PHB#%x state\n",
559                         __func__, rc, phb->hose->global_number);
560                 return EEH_STATE_NOT_SUPPORT;
561         }
562
563         /*
564          * Check PHB state. If the PHB is frozen for the
565          * first time, to dump the PHB diag-data.
566          */
567         if (be16_to_cpu(pcierr) != OPAL_EEH_PHB_ERROR) {
568                 result = (EEH_STATE_MMIO_ACTIVE  |
569                           EEH_STATE_DMA_ACTIVE   |
570                           EEH_STATE_MMIO_ENABLED |
571                           EEH_STATE_DMA_ENABLED);
572         } else if (!(pe->state & EEH_PE_ISOLATED)) {
573                 eeh_pe_state_mark(pe, EEH_PE_ISOLATED);
574                 pnv_eeh_get_phb_diag(pe);
575
576                 if (eeh_has_flag(EEH_EARLY_DUMP_LOG))
577                         pnv_pci_dump_phb_diag_data(pe->phb, pe->data);
578         }
579
580         return result;
581 }
582
583 static int pnv_eeh_get_pe_state(struct eeh_pe *pe)
584 {
585         struct pnv_phb *phb = pe->phb->private_data;
586         u8 fstate;
587         __be16 pcierr;
588         s64 rc;
589         int result;
590
591         /*
592          * We don't clobber hardware frozen state until PE
593          * reset is completed. In order to keep EEH core
594          * moving forward, we have to return operational
595          * state during PE reset.
596          */
597         if (pe->state & EEH_PE_RESET) {
598                 result = (EEH_STATE_MMIO_ACTIVE  |
599                           EEH_STATE_DMA_ACTIVE   |
600                           EEH_STATE_MMIO_ENABLED |
601                           EEH_STATE_DMA_ENABLED);
602                 return result;
603         }
604
605         /*
606          * Fetch PE state from hardware. If the PHB
607          * supports compound PE, let it handle that.
608          */
609         if (phb->get_pe_state) {
610                 fstate = phb->get_pe_state(phb, pe->addr);
611         } else {
612                 rc = opal_pci_eeh_freeze_status(phb->opal_id,
613                                                 pe->addr,
614                                                 &fstate,
615                                                 &pcierr,
616                                                 NULL);
617                 if (rc != OPAL_SUCCESS) {
618                         pr_warn("%s: Failure %lld getting PHB#%x-PE%x state\n",
619                                 __func__, rc, phb->hose->global_number,
620                                 pe->addr);
621                         return EEH_STATE_NOT_SUPPORT;
622                 }
623         }
624
625         /* Figure out state */
626         switch (fstate) {
627         case OPAL_EEH_STOPPED_NOT_FROZEN:
628                 result = (EEH_STATE_MMIO_ACTIVE  |
629                           EEH_STATE_DMA_ACTIVE   |
630                           EEH_STATE_MMIO_ENABLED |
631                           EEH_STATE_DMA_ENABLED);
632                 break;
633         case OPAL_EEH_STOPPED_MMIO_FREEZE:
634                 result = (EEH_STATE_DMA_ACTIVE |
635                           EEH_STATE_DMA_ENABLED);
636                 break;
637         case OPAL_EEH_STOPPED_DMA_FREEZE:
638                 result = (EEH_STATE_MMIO_ACTIVE |
639                           EEH_STATE_MMIO_ENABLED);
640                 break;
641         case OPAL_EEH_STOPPED_MMIO_DMA_FREEZE:
642                 result = 0;
643                 break;
644         case OPAL_EEH_STOPPED_RESET:
645                 result = EEH_STATE_RESET_ACTIVE;
646                 break;
647         case OPAL_EEH_STOPPED_TEMP_UNAVAIL:
648                 result = EEH_STATE_UNAVAILABLE;
649                 break;
650         case OPAL_EEH_STOPPED_PERM_UNAVAIL:
651                 result = EEH_STATE_NOT_SUPPORT;
652                 break;
653         default:
654                 result = EEH_STATE_NOT_SUPPORT;
655                 pr_warn("%s: Invalid PHB#%x-PE#%x state %x\n",
656                         __func__, phb->hose->global_number,
657                         pe->addr, fstate);
658         }
659
660         /*
661          * If PHB supports compound PE, to freeze all
662          * slave PEs for consistency.
663          *
664          * If the PE is switching to frozen state for the
665          * first time, to dump the PHB diag-data.
666          */
667         if (!(result & EEH_STATE_NOT_SUPPORT) &&
668             !(result & EEH_STATE_UNAVAILABLE) &&
669             !(result & EEH_STATE_MMIO_ACTIVE) &&
670             !(result & EEH_STATE_DMA_ACTIVE)  &&
671             !(pe->state & EEH_PE_ISOLATED)) {
672                 if (phb->freeze_pe)
673                         phb->freeze_pe(phb, pe->addr);
674
675                 eeh_pe_state_mark(pe, EEH_PE_ISOLATED);
676                 pnv_eeh_get_phb_diag(pe);
677
678                 if (eeh_has_flag(EEH_EARLY_DUMP_LOG))
679                         pnv_pci_dump_phb_diag_data(pe->phb, pe->data);
680         }
681
682         return result;
683 }
684
685 /**
686  * pnv_eeh_get_state - Retrieve PE state
687  * @pe: EEH PE
688  * @delay: delay while PE state is temporarily unavailable
689  *
690  * Retrieve the state of the specified PE. For IODA-compitable
691  * platform, it should be retrieved from IODA table. Therefore,
692  * we prefer passing down to hardware implementation to handle
693  * it.
694  */
695 static int pnv_eeh_get_state(struct eeh_pe *pe, int *delay)
696 {
697         int ret;
698
699         if (pe->type & EEH_PE_PHB)
700                 ret = pnv_eeh_get_phb_state(pe);
701         else
702                 ret = pnv_eeh_get_pe_state(pe);
703
704         if (!delay)
705                 return ret;
706
707         /*
708          * If the PE state is temporarily unavailable,
709          * to inform the EEH core delay for default
710          * period (1 second)
711          */
712         *delay = 0;
713         if (ret & EEH_STATE_UNAVAILABLE)
714                 *delay = 1000;
715
716         return ret;
717 }
718
719 static s64 pnv_eeh_phb_poll(struct pnv_phb *phb)
720 {
721         s64 rc = OPAL_HARDWARE;
722
723         while (1) {
724                 rc = opal_pci_poll(phb->opal_id);
725                 if (rc <= 0)
726                         break;
727
728                 if (system_state < SYSTEM_RUNNING)
729                         udelay(1000 * rc);
730                 else
731                         msleep(rc);
732         }
733
734         return rc;
735 }
736
737 int pnv_eeh_phb_reset(struct pci_controller *hose, int option)
738 {
739         struct pnv_phb *phb = hose->private_data;
740         s64 rc = OPAL_HARDWARE;
741
742         pr_debug("%s: Reset PHB#%x, option=%d\n",
743                  __func__, hose->global_number, option);
744
745         /* Issue PHB complete reset request */
746         if (option == EEH_RESET_FUNDAMENTAL ||
747             option == EEH_RESET_HOT)
748                 rc = opal_pci_reset(phb->opal_id,
749                                     OPAL_RESET_PHB_COMPLETE,
750                                     OPAL_ASSERT_RESET);
751         else if (option == EEH_RESET_DEACTIVATE)
752                 rc = opal_pci_reset(phb->opal_id,
753                                     OPAL_RESET_PHB_COMPLETE,
754                                     OPAL_DEASSERT_RESET);
755         if (rc < 0)
756                 goto out;
757
758         /*
759          * Poll state of the PHB until the request is done
760          * successfully. The PHB reset is usually PHB complete
761          * reset followed by hot reset on root bus. So we also
762          * need the PCI bus settlement delay.
763          */
764         rc = pnv_eeh_phb_poll(phb);
765         if (option == EEH_RESET_DEACTIVATE) {
766                 if (system_state < SYSTEM_RUNNING)
767                         udelay(1000 * EEH_PE_RST_SETTLE_TIME);
768                 else
769                         msleep(EEH_PE_RST_SETTLE_TIME);
770         }
771 out:
772         if (rc != OPAL_SUCCESS)
773                 return -EIO;
774
775         return 0;
776 }
777
778 static int pnv_eeh_root_reset(struct pci_controller *hose, int option)
779 {
780         struct pnv_phb *phb = hose->private_data;
781         s64 rc = OPAL_HARDWARE;
782
783         pr_debug("%s: Reset PHB#%x, option=%d\n",
784                  __func__, hose->global_number, option);
785
786         /*
787          * During the reset deassert time, we needn't care
788          * the reset scope because the firmware does nothing
789          * for fundamental or hot reset during deassert phase.
790          */
791         if (option == EEH_RESET_FUNDAMENTAL)
792                 rc = opal_pci_reset(phb->opal_id,
793                                     OPAL_RESET_PCI_FUNDAMENTAL,
794                                     OPAL_ASSERT_RESET);
795         else if (option == EEH_RESET_HOT)
796                 rc = opal_pci_reset(phb->opal_id,
797                                     OPAL_RESET_PCI_HOT,
798                                     OPAL_ASSERT_RESET);
799         else if (option == EEH_RESET_DEACTIVATE)
800                 rc = opal_pci_reset(phb->opal_id,
801                                     OPAL_RESET_PCI_HOT,
802                                     OPAL_DEASSERT_RESET);
803         if (rc < 0)
804                 goto out;
805
806         /* Poll state of the PHB until the request is done */
807         rc = pnv_eeh_phb_poll(phb);
808         if (option == EEH_RESET_DEACTIVATE)
809                 msleep(EEH_PE_RST_SETTLE_TIME);
810 out:
811         if (rc != OPAL_SUCCESS)
812                 return -EIO;
813
814         return 0;
815 }
816
817 static int pnv_eeh_bridge_reset(struct pci_dev *dev, int option)
818 {
819         struct pci_dn *pdn = pci_get_pdn_by_devfn(dev->bus, dev->devfn);
820         struct eeh_dev *edev = pdn_to_eeh_dev(pdn);
821         int aer = edev ? edev->aer_cap : 0;
822         u32 ctrl;
823
824         pr_debug("%s: Reset PCI bus %04x:%02x with option %d\n",
825                  __func__, pci_domain_nr(dev->bus),
826                  dev->bus->number, option);
827
828         switch (option) {
829         case EEH_RESET_FUNDAMENTAL:
830         case EEH_RESET_HOT:
831                 /* Don't report linkDown event */
832                 if (aer) {
833                         eeh_ops->read_config(pdn, aer + PCI_ERR_UNCOR_MASK,
834                                              4, &ctrl);
835                         ctrl |= PCI_ERR_UNC_SURPDN;
836                         eeh_ops->write_config(pdn, aer + PCI_ERR_UNCOR_MASK,
837                                               4, ctrl);
838                 }
839
840                 eeh_ops->read_config(pdn, PCI_BRIDGE_CONTROL, 2, &ctrl);
841                 ctrl |= PCI_BRIDGE_CTL_BUS_RESET;
842                 eeh_ops->write_config(pdn, PCI_BRIDGE_CONTROL, 2, ctrl);
843
844                 msleep(EEH_PE_RST_HOLD_TIME);
845                 break;
846         case EEH_RESET_DEACTIVATE:
847                 eeh_ops->read_config(pdn, PCI_BRIDGE_CONTROL, 2, &ctrl);
848                 ctrl &= ~PCI_BRIDGE_CTL_BUS_RESET;
849                 eeh_ops->write_config(pdn, PCI_BRIDGE_CONTROL, 2, ctrl);
850
851                 msleep(EEH_PE_RST_SETTLE_TIME);
852
853                 /* Continue reporting linkDown event */
854                 if (aer) {
855                         eeh_ops->read_config(pdn, aer + PCI_ERR_UNCOR_MASK,
856                                              4, &ctrl);
857                         ctrl &= ~PCI_ERR_UNC_SURPDN;
858                         eeh_ops->write_config(pdn, aer + PCI_ERR_UNCOR_MASK,
859                                               4, ctrl);
860                 }
861
862                 break;
863         }
864
865         return 0;
866 }
867
868 void pnv_pci_reset_secondary_bus(struct pci_dev *dev)
869 {
870         struct pci_controller *hose;
871
872         if (pci_is_root_bus(dev->bus)) {
873                 hose = pci_bus_to_host(dev->bus);
874                 pnv_eeh_root_reset(hose, EEH_RESET_HOT);
875                 pnv_eeh_root_reset(hose, EEH_RESET_DEACTIVATE);
876         } else {
877                 pnv_eeh_bridge_reset(dev, EEH_RESET_HOT);
878                 pnv_eeh_bridge_reset(dev, EEH_RESET_DEACTIVATE);
879         }
880 }
881
882 /**
883  * pnv_eeh_reset - Reset the specified PE
884  * @pe: EEH PE
885  * @option: reset option
886  *
887  * Do reset on the indicated PE. For PCI bus sensitive PE,
888  * we need to reset the parent p2p bridge. The PHB has to
889  * be reinitialized if the p2p bridge is root bridge. For
890  * PCI device sensitive PE, we will try to reset the device
891  * through FLR. For now, we don't have OPAL APIs to do HARD
892  * reset yet, so all reset would be SOFT (HOT) reset.
893  */
894 static int pnv_eeh_reset(struct eeh_pe *pe, int option)
895 {
896         struct pci_controller *hose = pe->phb;
897         struct pci_bus *bus;
898         int ret;
899
900         /*
901          * For PHB reset, we always have complete reset. For those PEs whose
902          * primary bus derived from root complex (root bus) or root port
903          * (usually bus#1), we apply hot or fundamental reset on the root port.
904          * For other PEs, we always have hot reset on the PE primary bus.
905          *
906          * Here, we have different design to pHyp, which always clear the
907          * frozen state during PE reset. However, the good idea here from
908          * benh is to keep frozen state before we get PE reset done completely
909          * (until BAR restore). With the frozen state, HW drops illegal IO
910          * or MMIO access, which can incur recrusive frozen PE during PE
911          * reset. The side effect is that EEH core has to clear the frozen
912          * state explicitly after BAR restore.
913          */
914         if (pe->type & EEH_PE_PHB) {
915                 ret = pnv_eeh_phb_reset(hose, option);
916         } else {
917                 struct pnv_phb *phb;
918                 s64 rc;
919
920                 /*
921                  * The frozen PE might be caused by PAPR error injection
922                  * registers, which are expected to be cleared after hitting
923                  * frozen PE as stated in the hardware spec. Unfortunately,
924                  * that's not true on P7IOC. So we have to clear it manually
925                  * to avoid recursive EEH errors during recovery.
926                  */
927                 phb = hose->private_data;
928                 if (phb->model == PNV_PHB_MODEL_P7IOC &&
929                     (option == EEH_RESET_HOT ||
930                     option == EEH_RESET_FUNDAMENTAL)) {
931                         rc = opal_pci_reset(phb->opal_id,
932                                             OPAL_RESET_PHB_ERROR,
933                                             OPAL_ASSERT_RESET);
934                         if (rc != OPAL_SUCCESS) {
935                                 pr_warn("%s: Failure %lld clearing "
936                                         "error injection registers\n",
937                                         __func__, rc);
938                                 return -EIO;
939                         }
940                 }
941
942                 bus = eeh_pe_bus_get(pe);
943                 if (pci_is_root_bus(bus) ||
944                         pci_is_root_bus(bus->parent))
945                         ret = pnv_eeh_root_reset(hose, option);
946                 else
947                         ret = pnv_eeh_bridge_reset(bus->self, option);
948         }
949
950         return ret;
951 }
952
953 /**
954  * pnv_eeh_wait_state - Wait for PE state
955  * @pe: EEH PE
956  * @max_wait: maximal period in millisecond
957  *
958  * Wait for the state of associated PE. It might take some time
959  * to retrieve the PE's state.
960  */
961 static int pnv_eeh_wait_state(struct eeh_pe *pe, int max_wait)
962 {
963         int ret;
964         int mwait;
965
966         while (1) {
967                 ret = pnv_eeh_get_state(pe, &mwait);
968
969                 /*
970                  * If the PE's state is temporarily unavailable,
971                  * we have to wait for the specified time. Otherwise,
972                  * the PE's state will be returned immediately.
973                  */
974                 if (ret != EEH_STATE_UNAVAILABLE)
975                         return ret;
976
977                 if (max_wait <= 0) {
978                         pr_warn("%s: Timeout getting PE#%x's state (%d)\n",
979                                 __func__, pe->addr, max_wait);
980                         return EEH_STATE_NOT_SUPPORT;
981                 }
982
983                 max_wait -= mwait;
984                 msleep(mwait);
985         }
986
987         return EEH_STATE_NOT_SUPPORT;
988 }
989
990 /**
991  * pnv_eeh_get_log - Retrieve error log
992  * @pe: EEH PE
993  * @severity: temporary or permanent error log
994  * @drv_log: driver log to be combined with retrieved error log
995  * @len: length of driver log
996  *
997  * Retrieve the temporary or permanent error from the PE.
998  */
999 static int pnv_eeh_get_log(struct eeh_pe *pe, int severity,
1000                            char *drv_log, unsigned long len)
1001 {
1002         if (!eeh_has_flag(EEH_EARLY_DUMP_LOG))
1003                 pnv_pci_dump_phb_diag_data(pe->phb, pe->data);
1004
1005         return 0;
1006 }
1007
1008 /**
1009  * pnv_eeh_configure_bridge - Configure PCI bridges in the indicated PE
1010  * @pe: EEH PE
1011  *
1012  * The function will be called to reconfigure the bridges included
1013  * in the specified PE so that the mulfunctional PE would be recovered
1014  * again.
1015  */
1016 static int pnv_eeh_configure_bridge(struct eeh_pe *pe)
1017 {
1018         return 0;
1019 }
1020
1021 /**
1022  * pnv_pe_err_inject - Inject specified error to the indicated PE
1023  * @pe: the indicated PE
1024  * @type: error type
1025  * @func: specific error type
1026  * @addr: address
1027  * @mask: address mask
1028  *
1029  * The routine is called to inject specified error, which is
1030  * determined by @type and @func, to the indicated PE for
1031  * testing purpose.
1032  */
1033 static int pnv_eeh_err_inject(struct eeh_pe *pe, int type, int func,
1034                               unsigned long addr, unsigned long mask)
1035 {
1036         struct pci_controller *hose = pe->phb;
1037         struct pnv_phb *phb = hose->private_data;
1038         s64 rc;
1039
1040         if (type != OPAL_ERR_INJECT_TYPE_IOA_BUS_ERR &&
1041             type != OPAL_ERR_INJECT_TYPE_IOA_BUS_ERR64) {
1042                 pr_warn("%s: Invalid error type %d\n",
1043                         __func__, type);
1044                 return -ERANGE;
1045         }
1046
1047         if (func < OPAL_ERR_INJECT_FUNC_IOA_LD_MEM_ADDR ||
1048             func > OPAL_ERR_INJECT_FUNC_IOA_DMA_WR_TARGET) {
1049                 pr_warn("%s: Invalid error function %d\n",
1050                         __func__, func);
1051                 return -ERANGE;
1052         }
1053
1054         /* Firmware supports error injection ? */
1055         if (!opal_check_token(OPAL_PCI_ERR_INJECT)) {
1056                 pr_warn("%s: Firmware doesn't support error injection\n",
1057                         __func__);
1058                 return -ENXIO;
1059         }
1060
1061         /* Do error injection */
1062         rc = opal_pci_err_inject(phb->opal_id, pe->addr,
1063                                  type, func, addr, mask);
1064         if (rc != OPAL_SUCCESS) {
1065                 pr_warn("%s: Failure %lld injecting error "
1066                         "%d-%d to PHB#%x-PE#%x\n",
1067                         __func__, rc, type, func,
1068                         hose->global_number, pe->addr);
1069                 return -EIO;
1070         }
1071
1072         return 0;
1073 }
1074
1075 static inline bool pnv_eeh_cfg_blocked(struct pci_dn *pdn)
1076 {
1077         struct eeh_dev *edev = pdn_to_eeh_dev(pdn);
1078
1079         if (!edev || !edev->pe)
1080                 return false;
1081
1082         if (edev->pe->state & EEH_PE_CFG_BLOCKED)
1083                 return true;
1084
1085         return false;
1086 }
1087
1088 static int pnv_eeh_read_config(struct pci_dn *pdn,
1089                                int where, int size, u32 *val)
1090 {
1091         if (!pdn)
1092                 return PCIBIOS_DEVICE_NOT_FOUND;
1093
1094         if (pnv_eeh_cfg_blocked(pdn)) {
1095                 *val = 0xFFFFFFFF;
1096                 return PCIBIOS_SET_FAILED;
1097         }
1098
1099         return pnv_pci_cfg_read(pdn, where, size, val);
1100 }
1101
1102 static int pnv_eeh_write_config(struct pci_dn *pdn,
1103                                 int where, int size, u32 val)
1104 {
1105         if (!pdn)
1106                 return PCIBIOS_DEVICE_NOT_FOUND;
1107
1108         if (pnv_eeh_cfg_blocked(pdn))
1109                 return PCIBIOS_SET_FAILED;
1110
1111         return pnv_pci_cfg_write(pdn, where, size, val);
1112 }
1113
1114 static void pnv_eeh_dump_hub_diag_common(struct OpalIoP7IOCErrorData *data)
1115 {
1116         /* GEM */
1117         if (data->gemXfir || data->gemRfir ||
1118             data->gemRirqfir || data->gemMask || data->gemRwof)
1119                 pr_info("  GEM: %016llx %016llx %016llx %016llx %016llx\n",
1120                         be64_to_cpu(data->gemXfir),
1121                         be64_to_cpu(data->gemRfir),
1122                         be64_to_cpu(data->gemRirqfir),
1123                         be64_to_cpu(data->gemMask),
1124                         be64_to_cpu(data->gemRwof));
1125
1126         /* LEM */
1127         if (data->lemFir || data->lemErrMask ||
1128             data->lemAction0 || data->lemAction1 || data->lemWof)
1129                 pr_info("  LEM: %016llx %016llx %016llx %016llx %016llx\n",
1130                         be64_to_cpu(data->lemFir),
1131                         be64_to_cpu(data->lemErrMask),
1132                         be64_to_cpu(data->lemAction0),
1133                         be64_to_cpu(data->lemAction1),
1134                         be64_to_cpu(data->lemWof));
1135 }
1136
1137 static void pnv_eeh_get_and_dump_hub_diag(struct pci_controller *hose)
1138 {
1139         struct pnv_phb *phb = hose->private_data;
1140         struct OpalIoP7IOCErrorData *data = &phb->diag.hub_diag;
1141         long rc;
1142
1143         rc = opal_pci_get_hub_diag_data(phb->hub_id, data, sizeof(*data));
1144         if (rc != OPAL_SUCCESS) {
1145                 pr_warn("%s: Failed to get HUB#%llx diag-data (%ld)\n",
1146                         __func__, phb->hub_id, rc);
1147                 return;
1148         }
1149
1150         switch (data->type) {
1151         case OPAL_P7IOC_DIAG_TYPE_RGC:
1152                 pr_info("P7IOC diag-data for RGC\n\n");
1153                 pnv_eeh_dump_hub_diag_common(data);
1154                 if (data->rgc.rgcStatus || data->rgc.rgcLdcp)
1155                         pr_info("  RGC: %016llx %016llx\n",
1156                                 be64_to_cpu(data->rgc.rgcStatus),
1157                                 be64_to_cpu(data->rgc.rgcLdcp));
1158                 break;
1159         case OPAL_P7IOC_DIAG_TYPE_BI:
1160                 pr_info("P7IOC diag-data for BI %s\n\n",
1161                         data->bi.biDownbound ? "Downbound" : "Upbound");
1162                 pnv_eeh_dump_hub_diag_common(data);
1163                 if (data->bi.biLdcp0 || data->bi.biLdcp1 ||
1164                     data->bi.biLdcp2 || data->bi.biFenceStatus)
1165                         pr_info("  BI:  %016llx %016llx %016llx %016llx\n",
1166                                 be64_to_cpu(data->bi.biLdcp0),
1167                                 be64_to_cpu(data->bi.biLdcp1),
1168                                 be64_to_cpu(data->bi.biLdcp2),
1169                                 be64_to_cpu(data->bi.biFenceStatus));
1170                 break;
1171         case OPAL_P7IOC_DIAG_TYPE_CI:
1172                 pr_info("P7IOC diag-data for CI Port %d\n\n",
1173                         data->ci.ciPort);
1174                 pnv_eeh_dump_hub_diag_common(data);
1175                 if (data->ci.ciPortStatus || data->ci.ciPortLdcp)
1176                         pr_info("  CI:  %016llx %016llx\n",
1177                                 be64_to_cpu(data->ci.ciPortStatus),
1178                                 be64_to_cpu(data->ci.ciPortLdcp));
1179                 break;
1180         case OPAL_P7IOC_DIAG_TYPE_MISC:
1181                 pr_info("P7IOC diag-data for MISC\n\n");
1182                 pnv_eeh_dump_hub_diag_common(data);
1183                 break;
1184         case OPAL_P7IOC_DIAG_TYPE_I2C:
1185                 pr_info("P7IOC diag-data for I2C\n\n");
1186                 pnv_eeh_dump_hub_diag_common(data);
1187                 break;
1188         default:
1189                 pr_warn("%s: Invalid type of HUB#%llx diag-data (%d)\n",
1190                         __func__, phb->hub_id, data->type);
1191         }
1192 }
1193
1194 static int pnv_eeh_get_pe(struct pci_controller *hose,
1195                           u16 pe_no, struct eeh_pe **pe)
1196 {
1197         struct pnv_phb *phb = hose->private_data;
1198         struct pnv_ioda_pe *pnv_pe;
1199         struct eeh_pe *dev_pe;
1200         struct eeh_dev edev;
1201
1202         /*
1203          * If PHB supports compound PE, to fetch
1204          * the master PE because slave PE is invisible
1205          * to EEH core.
1206          */
1207         pnv_pe = &phb->ioda.pe_array[pe_no];
1208         if (pnv_pe->flags & PNV_IODA_PE_SLAVE) {
1209                 pnv_pe = pnv_pe->master;
1210                 WARN_ON(!pnv_pe ||
1211                         !(pnv_pe->flags & PNV_IODA_PE_MASTER));
1212                 pe_no = pnv_pe->pe_number;
1213         }
1214
1215         /* Find the PE according to PE# */
1216         memset(&edev, 0, sizeof(struct eeh_dev));
1217         edev.phb = hose;
1218         edev.pe_config_addr = pe_no;
1219         dev_pe = eeh_pe_get(&edev);
1220         if (!dev_pe)
1221                 return -EEXIST;
1222
1223         /* Freeze the (compound) PE */
1224         *pe = dev_pe;
1225         if (!(dev_pe->state & EEH_PE_ISOLATED))
1226                 phb->freeze_pe(phb, pe_no);
1227
1228         /*
1229          * At this point, we're sure the (compound) PE should
1230          * have been frozen. However, we still need poke until
1231          * hitting the frozen PE on top level.
1232          */
1233         dev_pe = dev_pe->parent;
1234         while (dev_pe && !(dev_pe->type & EEH_PE_PHB)) {
1235                 int ret;
1236                 int active_flags = (EEH_STATE_MMIO_ACTIVE |
1237                                     EEH_STATE_DMA_ACTIVE);
1238
1239                 ret = eeh_ops->get_state(dev_pe, NULL);
1240                 if (ret <= 0 || (ret & active_flags) == active_flags) {
1241                         dev_pe = dev_pe->parent;
1242                         continue;
1243                 }
1244
1245                 /* Frozen parent PE */
1246                 *pe = dev_pe;
1247                 if (!(dev_pe->state & EEH_PE_ISOLATED))
1248                         phb->freeze_pe(phb, dev_pe->addr);
1249
1250                 /* Next one */
1251                 dev_pe = dev_pe->parent;
1252         }
1253
1254         return 0;
1255 }
1256
1257 /**
1258  * pnv_eeh_next_error - Retrieve next EEH error to handle
1259  * @pe: Affected PE
1260  *
1261  * The function is expected to be called by EEH core while it gets
1262  * special EEH event (without binding PE). The function calls to
1263  * OPAL APIs for next error to handle. The informational error is
1264  * handled internally by platform. However, the dead IOC, dead PHB,
1265  * fenced PHB and frozen PE should be handled by EEH core eventually.
1266  */
1267 static int pnv_eeh_next_error(struct eeh_pe **pe)
1268 {
1269         struct pci_controller *hose;
1270         struct pnv_phb *phb;
1271         struct eeh_pe *phb_pe, *parent_pe;
1272         __be64 frozen_pe_no;
1273         __be16 err_type, severity;
1274         int active_flags = (EEH_STATE_MMIO_ACTIVE | EEH_STATE_DMA_ACTIVE);
1275         long rc;
1276         int state, ret = EEH_NEXT_ERR_NONE;
1277
1278         /*
1279          * While running here, it's safe to purge the event queue. The
1280          * event should still be masked.
1281          */
1282         eeh_remove_event(NULL, false);
1283
1284         list_for_each_entry(hose, &hose_list, list_node) {
1285                 /*
1286                  * If the subordinate PCI buses of the PHB has been
1287                  * removed or is exactly under error recovery, we
1288                  * needn't take care of it any more.
1289                  */
1290                 phb = hose->private_data;
1291                 phb_pe = eeh_phb_pe_get(hose);
1292                 if (!phb_pe || (phb_pe->state & EEH_PE_ISOLATED))
1293                         continue;
1294
1295                 rc = opal_pci_next_error(phb->opal_id,
1296                                          &frozen_pe_no, &err_type, &severity);
1297                 if (rc != OPAL_SUCCESS) {
1298                         pr_devel("%s: Invalid return value on "
1299                                  "PHB#%x (0x%lx) from opal_pci_next_error",
1300                                  __func__, hose->global_number, rc);
1301                         continue;
1302                 }
1303
1304                 /* If the PHB doesn't have error, stop processing */
1305                 if (be16_to_cpu(err_type) == OPAL_EEH_NO_ERROR ||
1306                     be16_to_cpu(severity) == OPAL_EEH_SEV_NO_ERROR) {
1307                         pr_devel("%s: No error found on PHB#%x\n",
1308                                  __func__, hose->global_number);
1309                         continue;
1310                 }
1311
1312                 /*
1313                  * Processing the error. We're expecting the error with
1314                  * highest priority reported upon multiple errors on the
1315                  * specific PHB.
1316                  */
1317                 pr_devel("%s: Error (%d, %d, %llu) on PHB#%x\n",
1318                         __func__, be16_to_cpu(err_type),
1319                         be16_to_cpu(severity), be64_to_cpu(frozen_pe_no),
1320                         hose->global_number);
1321                 switch (be16_to_cpu(err_type)) {
1322                 case OPAL_EEH_IOC_ERROR:
1323                         if (be16_to_cpu(severity) == OPAL_EEH_SEV_IOC_DEAD) {
1324                                 pr_err("EEH: dead IOC detected\n");
1325                                 ret = EEH_NEXT_ERR_DEAD_IOC;
1326                         } else if (be16_to_cpu(severity) == OPAL_EEH_SEV_INF) {
1327                                 pr_info("EEH: IOC informative error "
1328                                         "detected\n");
1329                                 pnv_eeh_get_and_dump_hub_diag(hose);
1330                                 ret = EEH_NEXT_ERR_NONE;
1331                         }
1332
1333                         break;
1334                 case OPAL_EEH_PHB_ERROR:
1335                         if (be16_to_cpu(severity) == OPAL_EEH_SEV_PHB_DEAD) {
1336                                 *pe = phb_pe;
1337                                 pr_err("EEH: dead PHB#%x detected, "
1338                                        "location: %s\n",
1339                                         hose->global_number,
1340                                         eeh_pe_loc_get(phb_pe));
1341                                 ret = EEH_NEXT_ERR_DEAD_PHB;
1342                         } else if (be16_to_cpu(severity) ==
1343                                    OPAL_EEH_SEV_PHB_FENCED) {
1344                                 *pe = phb_pe;
1345                                 pr_err("EEH: Fenced PHB#%x detected, "
1346                                        "location: %s\n",
1347                                         hose->global_number,
1348                                         eeh_pe_loc_get(phb_pe));
1349                                 ret = EEH_NEXT_ERR_FENCED_PHB;
1350                         } else if (be16_to_cpu(severity) == OPAL_EEH_SEV_INF) {
1351                                 pr_info("EEH: PHB#%x informative error "
1352                                         "detected, location: %s\n",
1353                                         hose->global_number,
1354                                         eeh_pe_loc_get(phb_pe));
1355                                 pnv_eeh_get_phb_diag(phb_pe);
1356                                 pnv_pci_dump_phb_diag_data(hose, phb_pe->data);
1357                                 ret = EEH_NEXT_ERR_NONE;
1358                         }
1359
1360                         break;
1361                 case OPAL_EEH_PE_ERROR:
1362                         /*
1363                          * If we can't find the corresponding PE, we
1364                          * just try to unfreeze.
1365                          */
1366                         if (pnv_eeh_get_pe(hose,
1367                                 be64_to_cpu(frozen_pe_no), pe)) {
1368                                 pr_info("EEH: Clear non-existing PHB#%x-PE#%llx\n",
1369                                         hose->global_number, be64_to_cpu(frozen_pe_no));
1370                                 pr_info("EEH: PHB location: %s\n",
1371                                         eeh_pe_loc_get(phb_pe));
1372
1373                                 /* Dump PHB diag-data */
1374                                 rc = opal_pci_get_phb_diag_data2(phb->opal_id,
1375                                         phb->diag.blob, PNV_PCI_DIAG_BUF_SIZE);
1376                                 if (rc == OPAL_SUCCESS)
1377                                         pnv_pci_dump_phb_diag_data(hose,
1378                                                         phb->diag.blob);
1379
1380                                 /* Try best to clear it */
1381                                 opal_pci_eeh_freeze_clear(phb->opal_id,
1382                                         frozen_pe_no,
1383                                         OPAL_EEH_ACTION_CLEAR_FREEZE_ALL);
1384                                 ret = EEH_NEXT_ERR_NONE;
1385                         } else if ((*pe)->state & EEH_PE_ISOLATED ||
1386                                    eeh_pe_passed(*pe)) {
1387                                 ret = EEH_NEXT_ERR_NONE;
1388                         } else {
1389                                 pr_err("EEH: Frozen PE#%x "
1390                                        "on PHB#%x detected\n",
1391                                        (*pe)->addr,
1392                                         (*pe)->phb->global_number);
1393                                 pr_err("EEH: PE location: %s, "
1394                                        "PHB location: %s\n",
1395                                        eeh_pe_loc_get(*pe),
1396                                        eeh_pe_loc_get(phb_pe));
1397                                 ret = EEH_NEXT_ERR_FROZEN_PE;
1398                         }
1399
1400                         break;
1401                 default:
1402                         pr_warn("%s: Unexpected error type %d\n",
1403                                 __func__, be16_to_cpu(err_type));
1404                 }
1405
1406                 /*
1407                  * EEH core will try recover from fenced PHB or
1408                  * frozen PE. In the time for frozen PE, EEH core
1409                  * enable IO path for that before collecting logs,
1410                  * but it ruins the site. So we have to dump the
1411                  * log in advance here.
1412                  */
1413                 if ((ret == EEH_NEXT_ERR_FROZEN_PE  ||
1414                     ret == EEH_NEXT_ERR_FENCED_PHB) &&
1415                     !((*pe)->state & EEH_PE_ISOLATED)) {
1416                         eeh_pe_state_mark(*pe, EEH_PE_ISOLATED);
1417                         pnv_eeh_get_phb_diag(*pe);
1418
1419                         if (eeh_has_flag(EEH_EARLY_DUMP_LOG))
1420                                 pnv_pci_dump_phb_diag_data((*pe)->phb,
1421                                                            (*pe)->data);
1422                 }
1423
1424                 /*
1425                  * We probably have the frozen parent PE out there and
1426                  * we need have to handle frozen parent PE firstly.
1427                  */
1428                 if (ret == EEH_NEXT_ERR_FROZEN_PE) {
1429                         parent_pe = (*pe)->parent;
1430                         while (parent_pe) {
1431                                 /* Hit the ceiling ? */
1432                                 if (parent_pe->type & EEH_PE_PHB)
1433                                         break;
1434
1435                                 /* Frozen parent PE ? */
1436                                 state = eeh_ops->get_state(parent_pe, NULL);
1437                                 if (state > 0 &&
1438                                     (state & active_flags) != active_flags)
1439                                         *pe = parent_pe;
1440
1441                                 /* Next parent level */
1442                                 parent_pe = parent_pe->parent;
1443                         }
1444
1445                         /* We possibly migrate to another PE */
1446                         eeh_pe_state_mark(*pe, EEH_PE_ISOLATED);
1447                 }
1448
1449                 /*
1450                  * If we have no errors on the specific PHB or only
1451                  * informative error there, we continue poking it.
1452                  * Otherwise, we need actions to be taken by upper
1453                  * layer.
1454                  */
1455                 if (ret > EEH_NEXT_ERR_INF)
1456                         break;
1457         }
1458
1459         /* Unmask the event */
1460         if (ret == EEH_NEXT_ERR_NONE && eeh_enabled())
1461                 enable_irq(eeh_event_irq);
1462
1463         return ret;
1464 }
1465
1466 static int pnv_eeh_restore_config(struct pci_dn *pdn)
1467 {
1468         struct eeh_dev *edev = pdn_to_eeh_dev(pdn);
1469         struct pnv_phb *phb;
1470         s64 ret;
1471
1472         if (!edev)
1473                 return -EEXIST;
1474
1475         phb = edev->phb->private_data;
1476         ret = opal_pci_reinit(phb->opal_id,
1477                               OPAL_REINIT_PCI_DEV, edev->config_addr);
1478         if (ret) {
1479                 pr_warn("%s: Can't reinit PCI dev 0x%x (%lld)\n",
1480                         __func__, edev->config_addr, ret);
1481                 return -EIO;
1482         }
1483
1484         return 0;
1485 }
1486
1487 static struct eeh_ops pnv_eeh_ops = {
1488         .name                   = "powernv",
1489         .init                   = pnv_eeh_init,
1490         .post_init              = pnv_eeh_post_init,
1491         .probe                  = pnv_eeh_probe,
1492         .set_option             = pnv_eeh_set_option,
1493         .get_pe_addr            = pnv_eeh_get_pe_addr,
1494         .get_state              = pnv_eeh_get_state,
1495         .reset                  = pnv_eeh_reset,
1496         .wait_state             = pnv_eeh_wait_state,
1497         .get_log                = pnv_eeh_get_log,
1498         .configure_bridge       = pnv_eeh_configure_bridge,
1499         .err_inject             = pnv_eeh_err_inject,
1500         .read_config            = pnv_eeh_read_config,
1501         .write_config           = pnv_eeh_write_config,
1502         .next_error             = pnv_eeh_next_error,
1503         .restore_config         = pnv_eeh_restore_config
1504 };
1505
1506 void pcibios_bus_add_device(struct pci_dev *pdev)
1507 {
1508         struct pci_dn *pdn = pci_get_pdn(pdev);
1509
1510         if (!pdev->is_virtfn)
1511                 return;
1512
1513         /*
1514          * The following operations will fail if VF's sysfs files
1515          * aren't created or its resources aren't finalized.
1516          */
1517         eeh_add_device_early(pdn);
1518         eeh_add_device_late(pdev);
1519         eeh_sysfs_add_device(pdev);
1520 }
1521
1522 /**
1523  * eeh_powernv_init - Register platform dependent EEH operations
1524  *
1525  * EEH initialization on powernv platform. This function should be
1526  * called before any EEH related functions.
1527  */
1528 static int __init eeh_powernv_init(void)
1529 {
1530         int ret = -EINVAL;
1531
1532         eeh_set_pe_aux_size(PNV_PCI_DIAG_BUF_SIZE);
1533         ret = eeh_ops_register(&pnv_eeh_ops);
1534         if (!ret)
1535                 pr_info("EEH: PowerNV platform initialized\n");
1536         else
1537                 pr_info("EEH: Failed to initialize PowerNV platform (%d)\n", ret);
1538
1539         return ret;
1540 }
1541 machine_early_initcall(powernv, eeh_powernv_init);