Merge tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma
[linux-2.6-microblaze.git] / drivers / edac / cpc925_edac.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * cpc925_edac.c, EDAC driver for IBM CPC925 Bridge and Memory Controller.
4  *
5  * Copyright (c) 2008 Wind River Systems, Inc.
6  *
7  * Authors:     Cao Qingtao <qingtao.cao@windriver.com>
8  */
9
10 #include <linux/module.h>
11 #include <linux/init.h>
12 #include <linux/io.h>
13 #include <linux/edac.h>
14 #include <linux/of.h>
15 #include <linux/platform_device.h>
16 #include <linux/gfp.h>
17
18 #include "edac_module.h"
19
20 #define CPC925_EDAC_REVISION    " Ver: 1.0.0"
21 #define CPC925_EDAC_MOD_STR     "cpc925_edac"
22
23 #define cpc925_printk(level, fmt, arg...) \
24         edac_printk(level, "CPC925", fmt, ##arg)
25
26 #define cpc925_mc_printk(mci, level, fmt, arg...) \
27         edac_mc_chipset_printk(mci, level, "CPC925", fmt, ##arg)
28
29 /*
30  * CPC925 registers are of 32 bits with bit0 defined at the
31  * most significant bit and bit31 at that of least significant.
32  */
33 #define CPC925_BITS_PER_REG     32
34 #define CPC925_BIT(nr)          (1UL << (CPC925_BITS_PER_REG - 1 - nr))
35
36 /*
37  * EDAC device names for the error detections of
38  * CPU Interface and Hypertransport Link.
39  */
40 #define CPC925_CPU_ERR_DEV      "cpu"
41 #define CPC925_HT_LINK_DEV      "htlink"
42
43 /* Suppose DDR Refresh cycle is 15.6 microsecond */
44 #define CPC925_REF_FREQ         0xFA69
45 #define CPC925_SCRUB_BLOCK_SIZE 64      /* bytes */
46 #define CPC925_NR_CSROWS        8
47
48 /*
49  * All registers and bits definitions are taken from
50  * "CPC925 Bridge and Memory Controller User Manual, SA14-2761-02".
51  */
52
53 /*
54  * CPU and Memory Controller Registers
55  */
56 /************************************************************
57  *      Processor Interface Exception Mask Register (APIMASK)
58  ************************************************************/
59 #define REG_APIMASK_OFFSET      0x30070
60 enum apimask_bits {
61         APIMASK_DART    = CPC925_BIT(0), /* DART Exception */
62         APIMASK_ADI0    = CPC925_BIT(1), /* Handshake Error on PI0_ADI */
63         APIMASK_ADI1    = CPC925_BIT(2), /* Handshake Error on PI1_ADI */
64         APIMASK_STAT    = CPC925_BIT(3), /* Status Exception */
65         APIMASK_DERR    = CPC925_BIT(4), /* Data Error Exception */
66         APIMASK_ADRS0   = CPC925_BIT(5), /* Addressing Exception on PI0 */
67         APIMASK_ADRS1   = CPC925_BIT(6), /* Addressing Exception on PI1 */
68                                          /* BIT(7) Reserved */
69         APIMASK_ECC_UE_H = CPC925_BIT(8), /* UECC upper */
70         APIMASK_ECC_CE_H = CPC925_BIT(9), /* CECC upper */
71         APIMASK_ECC_UE_L = CPC925_BIT(10), /* UECC lower */
72         APIMASK_ECC_CE_L = CPC925_BIT(11), /* CECC lower */
73
74         CPU_MASK_ENABLE = (APIMASK_DART | APIMASK_ADI0 | APIMASK_ADI1 |
75                            APIMASK_STAT | APIMASK_DERR | APIMASK_ADRS0 |
76                            APIMASK_ADRS1),
77         ECC_MASK_ENABLE = (APIMASK_ECC_UE_H | APIMASK_ECC_CE_H |
78                            APIMASK_ECC_UE_L | APIMASK_ECC_CE_L),
79 };
80 #define APIMASK_ADI(n)          CPC925_BIT(((n)+1))
81
82 /************************************************************
83  *      Processor Interface Exception Register (APIEXCP)
84  ************************************************************/
85 #define REG_APIEXCP_OFFSET      0x30060
86 enum apiexcp_bits {
87         APIEXCP_DART    = CPC925_BIT(0), /* DART Exception */
88         APIEXCP_ADI0    = CPC925_BIT(1), /* Handshake Error on PI0_ADI */
89         APIEXCP_ADI1    = CPC925_BIT(2), /* Handshake Error on PI1_ADI */
90         APIEXCP_STAT    = CPC925_BIT(3), /* Status Exception */
91         APIEXCP_DERR    = CPC925_BIT(4), /* Data Error Exception */
92         APIEXCP_ADRS0   = CPC925_BIT(5), /* Addressing Exception on PI0 */
93         APIEXCP_ADRS1   = CPC925_BIT(6), /* Addressing Exception on PI1 */
94                                          /* BIT(7) Reserved */
95         APIEXCP_ECC_UE_H = CPC925_BIT(8), /* UECC upper */
96         APIEXCP_ECC_CE_H = CPC925_BIT(9), /* CECC upper */
97         APIEXCP_ECC_UE_L = CPC925_BIT(10), /* UECC lower */
98         APIEXCP_ECC_CE_L = CPC925_BIT(11), /* CECC lower */
99
100         CPU_EXCP_DETECTED = (APIEXCP_DART | APIEXCP_ADI0 | APIEXCP_ADI1 |
101                              APIEXCP_STAT | APIEXCP_DERR | APIEXCP_ADRS0 |
102                              APIEXCP_ADRS1),
103         UECC_EXCP_DETECTED = (APIEXCP_ECC_UE_H | APIEXCP_ECC_UE_L),
104         CECC_EXCP_DETECTED = (APIEXCP_ECC_CE_H | APIEXCP_ECC_CE_L),
105         ECC_EXCP_DETECTED = (UECC_EXCP_DETECTED | CECC_EXCP_DETECTED),
106 };
107
108 /************************************************************
109  *      Memory Bus Configuration Register (MBCR)
110 ************************************************************/
111 #define REG_MBCR_OFFSET         0x2190
112 #define MBCR_64BITCFG_SHIFT     23
113 #define MBCR_64BITCFG_MASK      (1UL << MBCR_64BITCFG_SHIFT)
114 #define MBCR_64BITBUS_SHIFT     22
115 #define MBCR_64BITBUS_MASK      (1UL << MBCR_64BITBUS_SHIFT)
116
117 /************************************************************
118  *      Memory Bank Mode Register (MBMR)
119 ************************************************************/
120 #define REG_MBMR_OFFSET         0x21C0
121 #define MBMR_MODE_MAX_VALUE     0xF
122 #define MBMR_MODE_SHIFT         25
123 #define MBMR_MODE_MASK          (MBMR_MODE_MAX_VALUE << MBMR_MODE_SHIFT)
124 #define MBMR_BBA_SHIFT          24
125 #define MBMR_BBA_MASK           (1UL << MBMR_BBA_SHIFT)
126
127 /************************************************************
128  *      Memory Bank Boundary Address Register (MBBAR)
129  ************************************************************/
130 #define REG_MBBAR_OFFSET        0x21D0
131 #define MBBAR_BBA_MAX_VALUE     0xFF
132 #define MBBAR_BBA_SHIFT         24
133 #define MBBAR_BBA_MASK          (MBBAR_BBA_MAX_VALUE << MBBAR_BBA_SHIFT)
134
135 /************************************************************
136  *      Memory Scrub Control Register (MSCR)
137  ************************************************************/
138 #define REG_MSCR_OFFSET         0x2400
139 #define MSCR_SCRUB_MOD_MASK     0xC0000000 /* scrub_mod - bit0:1*/
140 #define MSCR_BACKGR_SCRUB       0x40000000 /* 01 */
141 #define MSCR_SI_SHIFT           16      /* si - bit8:15*/
142 #define MSCR_SI_MAX_VALUE       0xFF
143 #define MSCR_SI_MASK            (MSCR_SI_MAX_VALUE << MSCR_SI_SHIFT)
144
145 /************************************************************
146  *      Memory Scrub Range Start Register (MSRSR)
147  ************************************************************/
148 #define REG_MSRSR_OFFSET        0x2410
149
150 /************************************************************
151  *      Memory Scrub Range End Register (MSRER)
152  ************************************************************/
153 #define REG_MSRER_OFFSET        0x2420
154
155 /************************************************************
156  *      Memory Scrub Pattern Register (MSPR)
157  ************************************************************/
158 #define REG_MSPR_OFFSET         0x2430
159
160 /************************************************************
161  *      Memory Check Control Register (MCCR)
162  ************************************************************/
163 #define REG_MCCR_OFFSET         0x2440
164 enum mccr_bits {
165         MCCR_ECC_EN     = CPC925_BIT(0), /* ECC high and low check */
166 };
167
168 /************************************************************
169  *      Memory Check Range End Register (MCRER)
170  ************************************************************/
171 #define REG_MCRER_OFFSET        0x2450
172
173 /************************************************************
174  *      Memory Error Address Register (MEAR)
175  ************************************************************/
176 #define REG_MEAR_OFFSET         0x2460
177 #define MEAR_BCNT_MAX_VALUE     0x3
178 #define MEAR_BCNT_SHIFT         30
179 #define MEAR_BCNT_MASK          (MEAR_BCNT_MAX_VALUE << MEAR_BCNT_SHIFT)
180 #define MEAR_RANK_MAX_VALUE     0x7
181 #define MEAR_RANK_SHIFT         27
182 #define MEAR_RANK_MASK          (MEAR_RANK_MAX_VALUE << MEAR_RANK_SHIFT)
183 #define MEAR_COL_MAX_VALUE      0x7FF
184 #define MEAR_COL_SHIFT          16
185 #define MEAR_COL_MASK           (MEAR_COL_MAX_VALUE << MEAR_COL_SHIFT)
186 #define MEAR_BANK_MAX_VALUE     0x3
187 #define MEAR_BANK_SHIFT         14
188 #define MEAR_BANK_MASK          (MEAR_BANK_MAX_VALUE << MEAR_BANK_SHIFT)
189 #define MEAR_ROW_MASK           0x00003FFF
190
191 /************************************************************
192  *      Memory Error Syndrome Register (MESR)
193  ************************************************************/
194 #define REG_MESR_OFFSET         0x2470
195 #define MESR_ECC_SYN_H_MASK     0xFF00
196 #define MESR_ECC_SYN_L_MASK     0x00FF
197
198 /************************************************************
199  *      Memory Mode Control Register (MMCR)
200  ************************************************************/
201 #define REG_MMCR_OFFSET         0x2500
202 enum mmcr_bits {
203         MMCR_REG_DIMM_MODE = CPC925_BIT(3),
204 };
205
206 /*
207  * HyperTransport Link Registers
208  */
209 /************************************************************
210  *  Error Handling/Enumeration Scratch Pad Register (ERRCTRL)
211  ************************************************************/
212 #define REG_ERRCTRL_OFFSET      0x70140
213 enum errctrl_bits {                      /* nonfatal interrupts for */
214         ERRCTRL_SERR_NF = CPC925_BIT(0), /* system error */
215         ERRCTRL_CRC_NF  = CPC925_BIT(1), /* CRC error */
216         ERRCTRL_RSP_NF  = CPC925_BIT(2), /* Response error */
217         ERRCTRL_EOC_NF  = CPC925_BIT(3), /* End-Of-Chain error */
218         ERRCTRL_OVF_NF  = CPC925_BIT(4), /* Overflow error */
219         ERRCTRL_PROT_NF = CPC925_BIT(5), /* Protocol error */
220
221         ERRCTRL_RSP_ERR = CPC925_BIT(6), /* Response error received */
222         ERRCTRL_CHN_FAL = CPC925_BIT(7), /* Sync flooding detected */
223
224         HT_ERRCTRL_ENABLE = (ERRCTRL_SERR_NF | ERRCTRL_CRC_NF |
225                              ERRCTRL_RSP_NF | ERRCTRL_EOC_NF |
226                              ERRCTRL_OVF_NF | ERRCTRL_PROT_NF),
227         HT_ERRCTRL_DETECTED = (ERRCTRL_RSP_ERR | ERRCTRL_CHN_FAL),
228 };
229
230 /************************************************************
231  *  Link Configuration and Link Control Register (LINKCTRL)
232  ************************************************************/
233 #define REG_LINKCTRL_OFFSET     0x70110
234 enum linkctrl_bits {
235         LINKCTRL_CRC_ERR        = (CPC925_BIT(22) | CPC925_BIT(23)),
236         LINKCTRL_LINK_FAIL      = CPC925_BIT(27),
237
238         HT_LINKCTRL_DETECTED    = (LINKCTRL_CRC_ERR | LINKCTRL_LINK_FAIL),
239 };
240
241 /************************************************************
242  *  Link FreqCap/Error/Freq/Revision ID Register (LINKERR)
243  ************************************************************/
244 #define REG_LINKERR_OFFSET      0x70120
245 enum linkerr_bits {
246         LINKERR_EOC_ERR         = CPC925_BIT(17), /* End-Of-Chain error */
247         LINKERR_OVF_ERR         = CPC925_BIT(18), /* Receive Buffer Overflow */
248         LINKERR_PROT_ERR        = CPC925_BIT(19), /* Protocol error */
249
250         HT_LINKERR_DETECTED     = (LINKERR_EOC_ERR | LINKERR_OVF_ERR |
251                                    LINKERR_PROT_ERR),
252 };
253
254 /************************************************************
255  *      Bridge Control Register (BRGCTRL)
256  ************************************************************/
257 #define REG_BRGCTRL_OFFSET      0x70300
258 enum brgctrl_bits {
259         BRGCTRL_DETSERR = CPC925_BIT(0), /* SERR on Secondary Bus */
260         BRGCTRL_SECBUSRESET = CPC925_BIT(9), /* Secondary Bus Reset */
261 };
262
263 /* Private structure for edac memory controller */
264 struct cpc925_mc_pdata {
265         void __iomem *vbase;
266         unsigned long total_mem;
267         const char *name;
268         int edac_idx;
269 };
270
271 /* Private structure for common edac device */
272 struct cpc925_dev_info {
273         void __iomem *vbase;
274         struct platform_device *pdev;
275         char *ctl_name;
276         int edac_idx;
277         struct edac_device_ctl_info *edac_dev;
278         void (*init)(struct cpc925_dev_info *dev_info);
279         void (*exit)(struct cpc925_dev_info *dev_info);
280         void (*check)(struct edac_device_ctl_info *edac_dev);
281 };
282
283 /* Get total memory size from Open Firmware DTB */
284 static void get_total_mem(struct cpc925_mc_pdata *pdata)
285 {
286         struct device_node *np = NULL;
287         const unsigned int *reg, *reg_end;
288         int len, sw, aw;
289         unsigned long start, size;
290
291         np = of_find_node_by_type(NULL, "memory");
292         if (!np)
293                 return;
294
295         aw = of_n_addr_cells(np);
296         sw = of_n_size_cells(np);
297         reg = (const unsigned int *)of_get_property(np, "reg", &len);
298         reg_end = reg + len/4;
299
300         pdata->total_mem = 0;
301         do {
302                 start = of_read_number(reg, aw);
303                 reg += aw;
304                 size = of_read_number(reg, sw);
305                 reg += sw;
306                 edac_dbg(1, "start 0x%lx, size 0x%lx\n", start, size);
307                 pdata->total_mem += size;
308         } while (reg < reg_end);
309
310         of_node_put(np);
311         edac_dbg(0, "total_mem 0x%lx\n", pdata->total_mem);
312 }
313
314 static void cpc925_init_csrows(struct mem_ctl_info *mci)
315 {
316         struct cpc925_mc_pdata *pdata = mci->pvt_info;
317         struct csrow_info *csrow;
318         struct dimm_info *dimm;
319         enum dev_type dtype;
320         int index, j;
321         u32 mbmr, mbbar, bba, grain;
322         unsigned long row_size, nr_pages, last_nr_pages = 0;
323
324         get_total_mem(pdata);
325
326         for (index = 0; index < mci->nr_csrows; index++) {
327                 mbmr = __raw_readl(pdata->vbase + REG_MBMR_OFFSET +
328                                    0x20 * index);
329                 mbbar = __raw_readl(pdata->vbase + REG_MBBAR_OFFSET +
330                                    0x20 + index);
331                 bba = (((mbmr & MBMR_BBA_MASK) >> MBMR_BBA_SHIFT) << 8) |
332                        ((mbbar & MBBAR_BBA_MASK) >> MBBAR_BBA_SHIFT);
333
334                 if (bba == 0)
335                         continue; /* not populated */
336
337                 csrow = mci->csrows[index];
338
339                 row_size = bba * (1UL << 28);   /* 256M */
340                 csrow->first_page = last_nr_pages;
341                 nr_pages = row_size >> PAGE_SHIFT;
342                 csrow->last_page = csrow->first_page + nr_pages - 1;
343                 last_nr_pages = csrow->last_page + 1;
344
345                 switch (csrow->nr_channels) {
346                 case 1: /* Single channel */
347                         grain = 32; /* four-beat burst of 32 bytes */
348                         break;
349                 case 2: /* Dual channel */
350                 default:
351                         grain = 64; /* four-beat burst of 64 bytes */
352                         break;
353                 }
354                 switch ((mbmr & MBMR_MODE_MASK) >> MBMR_MODE_SHIFT) {
355                 case 6: /* 0110, no way to differentiate X8 VS X16 */
356                 case 5: /* 0101 */
357                 case 8: /* 1000 */
358                         dtype = DEV_X16;
359                         break;
360                 case 7: /* 0111 */
361                 case 9: /* 1001 */
362                         dtype = DEV_X8;
363                         break;
364                 default:
365                         dtype = DEV_UNKNOWN;
366                 break;
367                 }
368                 for (j = 0; j < csrow->nr_channels; j++) {
369                         dimm = csrow->channels[j]->dimm;
370                         dimm->nr_pages = nr_pages / csrow->nr_channels;
371                         dimm->mtype = MEM_RDDR;
372                         dimm->edac_mode = EDAC_SECDED;
373                         dimm->grain = grain;
374                         dimm->dtype = dtype;
375                 }
376         }
377 }
378
379 /* Enable memory controller ECC detection */
380 static void cpc925_mc_init(struct mem_ctl_info *mci)
381 {
382         struct cpc925_mc_pdata *pdata = mci->pvt_info;
383         u32 apimask;
384         u32 mccr;
385
386         /* Enable various ECC error exceptions */
387         apimask = __raw_readl(pdata->vbase + REG_APIMASK_OFFSET);
388         if ((apimask & ECC_MASK_ENABLE) == 0) {
389                 apimask |= ECC_MASK_ENABLE;
390                 __raw_writel(apimask, pdata->vbase + REG_APIMASK_OFFSET);
391         }
392
393         /* Enable ECC detection */
394         mccr = __raw_readl(pdata->vbase + REG_MCCR_OFFSET);
395         if ((mccr & MCCR_ECC_EN) == 0) {
396                 mccr |= MCCR_ECC_EN;
397                 __raw_writel(mccr, pdata->vbase + REG_MCCR_OFFSET);
398         }
399 }
400
401 /* Disable memory controller ECC detection */
402 static void cpc925_mc_exit(struct mem_ctl_info *mci)
403 {
404         /*
405          * WARNING:
406          * We are supposed to clear the ECC error detection bits,
407          * and it will be no problem to do so. However, once they
408          * are cleared here if we want to re-install CPC925 EDAC
409          * module later, setting them up in cpc925_mc_init() will
410          * trigger machine check exception.
411          * Also, it's ok to leave ECC error detection bits enabled,
412          * since they are reset to 1 by default or by boot loader.
413          */
414
415         return;
416 }
417
418 /*
419  * Revert DDR column/row/bank addresses into page frame number and
420  * offset in page.
421  *
422  * Suppose memory mode is 0x0111(128-bit mode, identical DIMM pairs),
423  * physical address(PA) bits to column address(CA) bits mappings are:
424  * CA   0   1   2   3   4   5   6   7   8   9   10
425  * PA   59  58  57  56  55  54  53  52  51  50  49
426  *
427  * physical address(PA) bits to bank address(BA) bits mappings are:
428  * BA   0   1
429  * PA   43  44
430  *
431  * physical address(PA) bits to row address(RA) bits mappings are:
432  * RA   0   1   2   3   4   5   6   7   8   9   10   11   12
433  * PA   36  35  34  48  47  46  45  40  41  42  39   38   37
434  */
435 static void cpc925_mc_get_pfn(struct mem_ctl_info *mci, u32 mear,
436                 unsigned long *pfn, unsigned long *offset, int *csrow)
437 {
438         u32 bcnt, rank, col, bank, row;
439         u32 c;
440         unsigned long pa;
441         int i;
442
443         bcnt = (mear & MEAR_BCNT_MASK) >> MEAR_BCNT_SHIFT;
444         rank = (mear & MEAR_RANK_MASK) >> MEAR_RANK_SHIFT;
445         col = (mear & MEAR_COL_MASK) >> MEAR_COL_SHIFT;
446         bank = (mear & MEAR_BANK_MASK) >> MEAR_BANK_SHIFT;
447         row = mear & MEAR_ROW_MASK;
448
449         *csrow = rank;
450
451 #ifdef CONFIG_EDAC_DEBUG
452         if (mci->csrows[rank]->first_page == 0) {
453                 cpc925_mc_printk(mci, KERN_ERR, "ECC occurs in a "
454                         "non-populated csrow, broken hardware?\n");
455                 return;
456         }
457 #endif
458
459         /* Revert csrow number */
460         pa = mci->csrows[rank]->first_page << PAGE_SHIFT;
461
462         /* Revert column address */
463         col += bcnt;
464         for (i = 0; i < 11; i++) {
465                 c = col & 0x1;
466                 col >>= 1;
467                 pa |= c << (14 - i);
468         }
469
470         /* Revert bank address */
471         pa |= bank << 19;
472
473         /* Revert row address, in 4 steps */
474         for (i = 0; i < 3; i++) {
475                 c = row & 0x1;
476                 row >>= 1;
477                 pa |= c << (26 - i);
478         }
479
480         for (i = 0; i < 3; i++) {
481                 c = row & 0x1;
482                 row >>= 1;
483                 pa |= c << (21 + i);
484         }
485
486         for (i = 0; i < 4; i++) {
487                 c = row & 0x1;
488                 row >>= 1;
489                 pa |= c << (18 - i);
490         }
491
492         for (i = 0; i < 3; i++) {
493                 c = row & 0x1;
494                 row >>= 1;
495                 pa |= c << (29 - i);
496         }
497
498         *offset = pa & (PAGE_SIZE - 1);
499         *pfn = pa >> PAGE_SHIFT;
500
501         edac_dbg(0, "ECC physical address 0x%lx\n", pa);
502 }
503
504 static int cpc925_mc_find_channel(struct mem_ctl_info *mci, u16 syndrome)
505 {
506         if ((syndrome & MESR_ECC_SYN_H_MASK) == 0)
507                 return 0;
508
509         if ((syndrome & MESR_ECC_SYN_L_MASK) == 0)
510                 return 1;
511
512         cpc925_mc_printk(mci, KERN_INFO, "Unexpected syndrome value: 0x%x\n",
513                          syndrome);
514         return 1;
515 }
516
517 /* Check memory controller registers for ECC errors */
518 static void cpc925_mc_check(struct mem_ctl_info *mci)
519 {
520         struct cpc925_mc_pdata *pdata = mci->pvt_info;
521         u32 apiexcp;
522         u32 mear;
523         u32 mesr;
524         u16 syndrome;
525         unsigned long pfn = 0, offset = 0;
526         int csrow = 0, channel = 0;
527
528         /* APIEXCP is cleared when read */
529         apiexcp = __raw_readl(pdata->vbase + REG_APIEXCP_OFFSET);
530         if ((apiexcp & ECC_EXCP_DETECTED) == 0)
531                 return;
532
533         mesr = __raw_readl(pdata->vbase + REG_MESR_OFFSET);
534         syndrome = mesr | (MESR_ECC_SYN_H_MASK | MESR_ECC_SYN_L_MASK);
535
536         mear = __raw_readl(pdata->vbase + REG_MEAR_OFFSET);
537
538         /* Revert column/row addresses into page frame number, etc */
539         cpc925_mc_get_pfn(mci, mear, &pfn, &offset, &csrow);
540
541         if (apiexcp & CECC_EXCP_DETECTED) {
542                 cpc925_mc_printk(mci, KERN_INFO, "DRAM CECC Fault\n");
543                 channel = cpc925_mc_find_channel(mci, syndrome);
544                 edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 1,
545                                      pfn, offset, syndrome,
546                                      csrow, channel, -1,
547                                      mci->ctl_name, "");
548         }
549
550         if (apiexcp & UECC_EXCP_DETECTED) {
551                 cpc925_mc_printk(mci, KERN_INFO, "DRAM UECC Fault\n");
552                 edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1,
553                                      pfn, offset, 0,
554                                      csrow, -1, -1,
555                                      mci->ctl_name, "");
556         }
557
558         cpc925_mc_printk(mci, KERN_INFO, "Dump registers:\n");
559         cpc925_mc_printk(mci, KERN_INFO, "APIMASK               0x%08x\n",
560                 __raw_readl(pdata->vbase + REG_APIMASK_OFFSET));
561         cpc925_mc_printk(mci, KERN_INFO, "APIEXCP               0x%08x\n",
562                 apiexcp);
563         cpc925_mc_printk(mci, KERN_INFO, "Mem Scrub Ctrl        0x%08x\n",
564                 __raw_readl(pdata->vbase + REG_MSCR_OFFSET));
565         cpc925_mc_printk(mci, KERN_INFO, "Mem Scrub Rge Start   0x%08x\n",
566                 __raw_readl(pdata->vbase + REG_MSRSR_OFFSET));
567         cpc925_mc_printk(mci, KERN_INFO, "Mem Scrub Rge End     0x%08x\n",
568                 __raw_readl(pdata->vbase + REG_MSRER_OFFSET));
569         cpc925_mc_printk(mci, KERN_INFO, "Mem Scrub Pattern     0x%08x\n",
570                 __raw_readl(pdata->vbase + REG_MSPR_OFFSET));
571         cpc925_mc_printk(mci, KERN_INFO, "Mem Chk Ctrl          0x%08x\n",
572                 __raw_readl(pdata->vbase + REG_MCCR_OFFSET));
573         cpc925_mc_printk(mci, KERN_INFO, "Mem Chk Rge End       0x%08x\n",
574                 __raw_readl(pdata->vbase + REG_MCRER_OFFSET));
575         cpc925_mc_printk(mci, KERN_INFO, "Mem Err Address       0x%08x\n",
576                 mesr);
577         cpc925_mc_printk(mci, KERN_INFO, "Mem Err Syndrome      0x%08x\n",
578                 syndrome);
579 }
580
581 /******************** CPU err device********************************/
582 static u32 cpc925_cpu_mask_disabled(void)
583 {
584         struct device_node *cpunode;
585         static u32 mask = 0;
586
587         /* use cached value if available */
588         if (mask != 0)
589                 return mask;
590
591         mask = APIMASK_ADI0 | APIMASK_ADI1;
592
593         for_each_of_cpu_node(cpunode) {
594                 const u32 *reg = of_get_property(cpunode, "reg", NULL);
595                 if (reg == NULL || *reg > 2) {
596                         cpc925_printk(KERN_ERR, "Bad reg value at %pOF\n", cpunode);
597                         continue;
598                 }
599
600                 mask &= ~APIMASK_ADI(*reg);
601         }
602
603         if (mask != (APIMASK_ADI0 | APIMASK_ADI1)) {
604                 /* We assume that each CPU sits on it's own PI and that
605                  * for present CPUs the reg property equals to the PI
606                  * interface id */
607                 cpc925_printk(KERN_WARNING,
608                                 "Assuming PI id is equal to CPU MPIC id!\n");
609         }
610
611         return mask;
612 }
613
614 /* Enable CPU Errors detection */
615 static void cpc925_cpu_init(struct cpc925_dev_info *dev_info)
616 {
617         u32 apimask;
618         u32 cpumask;
619
620         apimask = __raw_readl(dev_info->vbase + REG_APIMASK_OFFSET);
621
622         cpumask = cpc925_cpu_mask_disabled();
623         if (apimask & cpumask) {
624                 cpc925_printk(KERN_WARNING, "CPU(s) not present, "
625                                 "but enabled in APIMASK, disabling\n");
626                 apimask &= ~cpumask;
627         }
628
629         if ((apimask & CPU_MASK_ENABLE) == 0)
630                 apimask |= CPU_MASK_ENABLE;
631
632         __raw_writel(apimask, dev_info->vbase + REG_APIMASK_OFFSET);
633 }
634
635 /* Disable CPU Errors detection */
636 static void cpc925_cpu_exit(struct cpc925_dev_info *dev_info)
637 {
638         /*
639          * WARNING:
640          * We are supposed to clear the CPU error detection bits,
641          * and it will be no problem to do so. However, once they
642          * are cleared here if we want to re-install CPC925 EDAC
643          * module later, setting them up in cpc925_cpu_init() will
644          * trigger machine check exception.
645          * Also, it's ok to leave CPU error detection bits enabled,
646          * since they are reset to 1 by default.
647          */
648
649         return;
650 }
651
652 /* Check for CPU Errors */
653 static void cpc925_cpu_check(struct edac_device_ctl_info *edac_dev)
654 {
655         struct cpc925_dev_info *dev_info = edac_dev->pvt_info;
656         u32 apiexcp;
657         u32 apimask;
658
659         /* APIEXCP is cleared when read */
660         apiexcp = __raw_readl(dev_info->vbase + REG_APIEXCP_OFFSET);
661         if ((apiexcp & CPU_EXCP_DETECTED) == 0)
662                 return;
663
664         if ((apiexcp & ~cpc925_cpu_mask_disabled()) == 0)
665                 return;
666
667         apimask = __raw_readl(dev_info->vbase + REG_APIMASK_OFFSET);
668         cpc925_printk(KERN_INFO, "Processor Interface Fault\n"
669                                  "Processor Interface register dump:\n");
670         cpc925_printk(KERN_INFO, "APIMASK               0x%08x\n", apimask);
671         cpc925_printk(KERN_INFO, "APIEXCP               0x%08x\n", apiexcp);
672
673         edac_device_handle_ue(edac_dev, 0, 0, edac_dev->ctl_name);
674 }
675
676 /******************** HT Link err device****************************/
677 /* Enable HyperTransport Link Error detection */
678 static void cpc925_htlink_init(struct cpc925_dev_info *dev_info)
679 {
680         u32 ht_errctrl;
681
682         ht_errctrl = __raw_readl(dev_info->vbase + REG_ERRCTRL_OFFSET);
683         if ((ht_errctrl & HT_ERRCTRL_ENABLE) == 0) {
684                 ht_errctrl |= HT_ERRCTRL_ENABLE;
685                 __raw_writel(ht_errctrl, dev_info->vbase + REG_ERRCTRL_OFFSET);
686         }
687 }
688
689 /* Disable HyperTransport Link Error detection */
690 static void cpc925_htlink_exit(struct cpc925_dev_info *dev_info)
691 {
692         u32 ht_errctrl;
693
694         ht_errctrl = __raw_readl(dev_info->vbase + REG_ERRCTRL_OFFSET);
695         ht_errctrl &= ~HT_ERRCTRL_ENABLE;
696         __raw_writel(ht_errctrl, dev_info->vbase + REG_ERRCTRL_OFFSET);
697 }
698
699 /* Check for HyperTransport Link errors */
700 static void cpc925_htlink_check(struct edac_device_ctl_info *edac_dev)
701 {
702         struct cpc925_dev_info *dev_info = edac_dev->pvt_info;
703         u32 brgctrl = __raw_readl(dev_info->vbase + REG_BRGCTRL_OFFSET);
704         u32 linkctrl = __raw_readl(dev_info->vbase + REG_LINKCTRL_OFFSET);
705         u32 errctrl = __raw_readl(dev_info->vbase + REG_ERRCTRL_OFFSET);
706         u32 linkerr = __raw_readl(dev_info->vbase + REG_LINKERR_OFFSET);
707
708         if (!((brgctrl & BRGCTRL_DETSERR) ||
709               (linkctrl & HT_LINKCTRL_DETECTED) ||
710               (errctrl & HT_ERRCTRL_DETECTED) ||
711               (linkerr & HT_LINKERR_DETECTED)))
712                 return;
713
714         cpc925_printk(KERN_INFO, "HT Link Fault\n"
715                                  "HT register dump:\n");
716         cpc925_printk(KERN_INFO, "Bridge Ctrl                   0x%08x\n",
717                       brgctrl);
718         cpc925_printk(KERN_INFO, "Link Config Ctrl              0x%08x\n",
719                       linkctrl);
720         cpc925_printk(KERN_INFO, "Error Enum and Ctrl           0x%08x\n",
721                       errctrl);
722         cpc925_printk(KERN_INFO, "Link Error                    0x%08x\n",
723                       linkerr);
724
725         /* Clear by write 1 */
726         if (brgctrl & BRGCTRL_DETSERR)
727                 __raw_writel(BRGCTRL_DETSERR,
728                                 dev_info->vbase + REG_BRGCTRL_OFFSET);
729
730         if (linkctrl & HT_LINKCTRL_DETECTED)
731                 __raw_writel(HT_LINKCTRL_DETECTED,
732                                 dev_info->vbase + REG_LINKCTRL_OFFSET);
733
734         /* Initiate Secondary Bus Reset to clear the chain failure */
735         if (errctrl & ERRCTRL_CHN_FAL)
736                 __raw_writel(BRGCTRL_SECBUSRESET,
737                                 dev_info->vbase + REG_BRGCTRL_OFFSET);
738
739         if (errctrl & ERRCTRL_RSP_ERR)
740                 __raw_writel(ERRCTRL_RSP_ERR,
741                                 dev_info->vbase + REG_ERRCTRL_OFFSET);
742
743         if (linkerr & HT_LINKERR_DETECTED)
744                 __raw_writel(HT_LINKERR_DETECTED,
745                                 dev_info->vbase + REG_LINKERR_OFFSET);
746
747         edac_device_handle_ce(edac_dev, 0, 0, edac_dev->ctl_name);
748 }
749
750 static struct cpc925_dev_info cpc925_devs[] = {
751         {
752         .ctl_name = CPC925_CPU_ERR_DEV,
753         .init = cpc925_cpu_init,
754         .exit = cpc925_cpu_exit,
755         .check = cpc925_cpu_check,
756         },
757         {
758         .ctl_name = CPC925_HT_LINK_DEV,
759         .init = cpc925_htlink_init,
760         .exit = cpc925_htlink_exit,
761         .check = cpc925_htlink_check,
762         },
763         { }
764 };
765
766 /*
767  * Add CPU Err detection and HyperTransport Link Err detection
768  * as common "edac_device", they have no corresponding device
769  * nodes in the Open Firmware DTB and we have to add platform
770  * devices for them. Also, they will share the MMIO with that
771  * of memory controller.
772  */
773 static void cpc925_add_edac_devices(void __iomem *vbase)
774 {
775         struct cpc925_dev_info *dev_info;
776
777         if (!vbase) {
778                 cpc925_printk(KERN_ERR, "MMIO not established yet\n");
779                 return;
780         }
781
782         for (dev_info = &cpc925_devs[0]; dev_info->init; dev_info++) {
783                 dev_info->vbase = vbase;
784                 dev_info->pdev = platform_device_register_simple(
785                                         dev_info->ctl_name, 0, NULL, 0);
786                 if (IS_ERR(dev_info->pdev)) {
787                         cpc925_printk(KERN_ERR,
788                                 "Can't register platform device for %s\n",
789                                 dev_info->ctl_name);
790                         continue;
791                 }
792
793                 /*
794                  * Don't have to allocate private structure but
795                  * make use of cpc925_devs[] instead.
796                  */
797                 dev_info->edac_idx = edac_device_alloc_index();
798                 dev_info->edac_dev =
799                         edac_device_alloc_ctl_info(0, dev_info->ctl_name,
800                                 1, NULL, 0, 0, NULL, 0, dev_info->edac_idx);
801                 if (!dev_info->edac_dev) {
802                         cpc925_printk(KERN_ERR, "No memory for edac device\n");
803                         goto err1;
804                 }
805
806                 dev_info->edac_dev->pvt_info = dev_info;
807                 dev_info->edac_dev->dev = &dev_info->pdev->dev;
808                 dev_info->edac_dev->ctl_name = dev_info->ctl_name;
809                 dev_info->edac_dev->mod_name = CPC925_EDAC_MOD_STR;
810                 dev_info->edac_dev->dev_name = dev_name(&dev_info->pdev->dev);
811
812                 if (edac_op_state == EDAC_OPSTATE_POLL)
813                         dev_info->edac_dev->edac_check = dev_info->check;
814
815                 if (dev_info->init)
816                         dev_info->init(dev_info);
817
818                 if (edac_device_add_device(dev_info->edac_dev) > 0) {
819                         cpc925_printk(KERN_ERR,
820                                 "Unable to add edac device for %s\n",
821                                 dev_info->ctl_name);
822                         goto err2;
823                 }
824
825                 edac_dbg(0, "Successfully added edac device for %s\n",
826                          dev_info->ctl_name);
827
828                 continue;
829
830 err2:
831                 if (dev_info->exit)
832                         dev_info->exit(dev_info);
833                 edac_device_free_ctl_info(dev_info->edac_dev);
834 err1:
835                 platform_device_unregister(dev_info->pdev);
836         }
837 }
838
839 /*
840  * Delete the common "edac_device" for CPU Err Detection
841  * and HyperTransport Link Err Detection
842  */
843 static void cpc925_del_edac_devices(void)
844 {
845         struct cpc925_dev_info *dev_info;
846
847         for (dev_info = &cpc925_devs[0]; dev_info->init; dev_info++) {
848                 if (dev_info->edac_dev) {
849                         edac_device_del_device(dev_info->edac_dev->dev);
850                         edac_device_free_ctl_info(dev_info->edac_dev);
851                         platform_device_unregister(dev_info->pdev);
852                 }
853
854                 if (dev_info->exit)
855                         dev_info->exit(dev_info);
856
857                 edac_dbg(0, "Successfully deleted edac device for %s\n",
858                          dev_info->ctl_name);
859         }
860 }
861
862 /* Convert current back-ground scrub rate into byte/sec bandwidth */
863 static int cpc925_get_sdram_scrub_rate(struct mem_ctl_info *mci)
864 {
865         struct cpc925_mc_pdata *pdata = mci->pvt_info;
866         int bw;
867         u32 mscr;
868         u8 si;
869
870         mscr = __raw_readl(pdata->vbase + REG_MSCR_OFFSET);
871         si = (mscr & MSCR_SI_MASK) >> MSCR_SI_SHIFT;
872
873         edac_dbg(0, "Mem Scrub Ctrl Register 0x%x\n", mscr);
874
875         if (((mscr & MSCR_SCRUB_MOD_MASK) != MSCR_BACKGR_SCRUB) ||
876             (si == 0)) {
877                 cpc925_mc_printk(mci, KERN_INFO, "Scrub mode not enabled\n");
878                 bw = 0;
879         } else
880                 bw = CPC925_SCRUB_BLOCK_SIZE * 0xFA67 / si;
881
882         return bw;
883 }
884
885 /* Return 0 for single channel; 1 for dual channel */
886 static int cpc925_mc_get_channels(void __iomem *vbase)
887 {
888         int dual = 0;
889         u32 mbcr;
890
891         mbcr = __raw_readl(vbase + REG_MBCR_OFFSET);
892
893         /*
894          * Dual channel only when 128-bit wide physical bus
895          * and 128-bit configuration.
896          */
897         if (((mbcr & MBCR_64BITCFG_MASK) == 0) &&
898             ((mbcr & MBCR_64BITBUS_MASK) == 0))
899                 dual = 1;
900
901         edac_dbg(0, "%s channel\n", (dual > 0) ? "Dual" : "Single");
902
903         return dual;
904 }
905
906 static int cpc925_probe(struct platform_device *pdev)
907 {
908         static int edac_mc_idx;
909         struct mem_ctl_info *mci;
910         struct edac_mc_layer layers[2];
911         void __iomem *vbase;
912         struct cpc925_mc_pdata *pdata;
913         struct resource *r;
914         int res = 0, nr_channels;
915
916         edac_dbg(0, "%s platform device found!\n", pdev->name);
917
918         if (!devres_open_group(&pdev->dev, cpc925_probe, GFP_KERNEL)) {
919                 res = -ENOMEM;
920                 goto out;
921         }
922
923         r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
924         if (!r) {
925                 cpc925_printk(KERN_ERR, "Unable to get resource\n");
926                 res = -ENOENT;
927                 goto err1;
928         }
929
930         if (!devm_request_mem_region(&pdev->dev,
931                                      r->start,
932                                      resource_size(r),
933                                      pdev->name)) {
934                 cpc925_printk(KERN_ERR, "Unable to request mem region\n");
935                 res = -EBUSY;
936                 goto err1;
937         }
938
939         vbase = devm_ioremap(&pdev->dev, r->start, resource_size(r));
940         if (!vbase) {
941                 cpc925_printk(KERN_ERR, "Unable to ioremap device\n");
942                 res = -ENOMEM;
943                 goto err2;
944         }
945
946         nr_channels = cpc925_mc_get_channels(vbase) + 1;
947
948         layers[0].type = EDAC_MC_LAYER_CHIP_SELECT;
949         layers[0].size = CPC925_NR_CSROWS;
950         layers[0].is_virt_csrow = true;
951         layers[1].type = EDAC_MC_LAYER_CHANNEL;
952         layers[1].size = nr_channels;
953         layers[1].is_virt_csrow = false;
954         mci = edac_mc_alloc(edac_mc_idx, ARRAY_SIZE(layers), layers,
955                             sizeof(struct cpc925_mc_pdata));
956         if (!mci) {
957                 cpc925_printk(KERN_ERR, "No memory for mem_ctl_info\n");
958                 res = -ENOMEM;
959                 goto err2;
960         }
961
962         pdata = mci->pvt_info;
963         pdata->vbase = vbase;
964         pdata->edac_idx = edac_mc_idx++;
965         pdata->name = pdev->name;
966
967         mci->pdev = &pdev->dev;
968         platform_set_drvdata(pdev, mci);
969         mci->dev_name = dev_name(&pdev->dev);
970         mci->mtype_cap = MEM_FLAG_RDDR | MEM_FLAG_DDR;
971         mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_SECDED;
972         mci->edac_cap = EDAC_FLAG_SECDED;
973         mci->mod_name = CPC925_EDAC_MOD_STR;
974         mci->ctl_name = pdev->name;
975
976         if (edac_op_state == EDAC_OPSTATE_POLL)
977                 mci->edac_check = cpc925_mc_check;
978
979         mci->ctl_page_to_phys = NULL;
980         mci->scrub_mode = SCRUB_SW_SRC;
981         mci->set_sdram_scrub_rate = NULL;
982         mci->get_sdram_scrub_rate = cpc925_get_sdram_scrub_rate;
983
984         cpc925_init_csrows(mci);
985
986         /* Setup memory controller registers */
987         cpc925_mc_init(mci);
988
989         if (edac_mc_add_mc(mci) > 0) {
990                 cpc925_mc_printk(mci, KERN_ERR, "Failed edac_mc_add_mc()\n");
991                 goto err3;
992         }
993
994         cpc925_add_edac_devices(vbase);
995
996         /* get this far and it's successful */
997         edac_dbg(0, "success\n");
998
999         res = 0;
1000         goto out;
1001
1002 err3:
1003         cpc925_mc_exit(mci);
1004         edac_mc_free(mci);
1005 err2:
1006         devm_release_mem_region(&pdev->dev, r->start, resource_size(r));
1007 err1:
1008         devres_release_group(&pdev->dev, cpc925_probe);
1009 out:
1010         return res;
1011 }
1012
1013 static int cpc925_remove(struct platform_device *pdev)
1014 {
1015         struct mem_ctl_info *mci = platform_get_drvdata(pdev);
1016
1017         /*
1018          * Delete common edac devices before edac mc, because
1019          * the former share the MMIO of the latter.
1020          */
1021         cpc925_del_edac_devices();
1022         cpc925_mc_exit(mci);
1023
1024         edac_mc_del_mc(&pdev->dev);
1025         edac_mc_free(mci);
1026
1027         return 0;
1028 }
1029
1030 static struct platform_driver cpc925_edac_driver = {
1031         .probe = cpc925_probe,
1032         .remove = cpc925_remove,
1033         .driver = {
1034                    .name = "cpc925_edac",
1035         }
1036 };
1037
1038 static int __init cpc925_edac_init(void)
1039 {
1040         int ret = 0;
1041
1042         printk(KERN_INFO "IBM CPC925 EDAC driver " CPC925_EDAC_REVISION "\n");
1043         printk(KERN_INFO "\t(c) 2008 Wind River Systems, Inc\n");
1044
1045         /* Only support POLL mode so far */
1046         edac_op_state = EDAC_OPSTATE_POLL;
1047
1048         ret = platform_driver_register(&cpc925_edac_driver);
1049         if (ret) {
1050                 printk(KERN_WARNING "Failed to register %s\n",
1051                         CPC925_EDAC_MOD_STR);
1052         }
1053
1054         return ret;
1055 }
1056
1057 static void __exit cpc925_edac_exit(void)
1058 {
1059         platform_driver_unregister(&cpc925_edac_driver);
1060 }
1061
1062 module_init(cpc925_edac_init);
1063 module_exit(cpc925_edac_exit);
1064
1065 MODULE_LICENSE("GPL");
1066 MODULE_AUTHOR("Cao Qingtao <qingtao.cao@windriver.com>");
1067 MODULE_DESCRIPTION("IBM CPC925 Bridge and MC EDAC kernel module");