2 * Synopsys DDR ECC Driver
3 * This driver is based on ppc4xx_edac.c drivers
5 * Copyright (C) 2012 - 2014 Xilinx, Inc.
7 * This program is free software: you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation, either version 2 of the License, or
10 * (at your option) any later version.
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
17 * This file is subject to the terms and conditions of the GNU General Public
18 * License. See the file "COPYING" in the main directory of this archive
22 #include <linux/edac.h>
23 #include <linux/module.h>
24 #include <linux/platform_device.h>
25 #include <linux/interrupt.h>
27 #include <linux/of_device.h>
29 #include "edac_module.h"
31 /* Number of cs_rows needed per memory controller */
32 #define SYNPS_EDAC_NR_CSROWS 1
34 /* Number of channels per memory controller */
35 #define SYNPS_EDAC_NR_CHANS 1
37 /* Granularity of reported error in bytes */
38 #define SYNPS_EDAC_ERR_GRAIN 1
40 #define SYNPS_EDAC_MSG_SIZE 256
42 #define SYNPS_EDAC_MOD_STRING "synps_edac"
43 #define SYNPS_EDAC_MOD_VER "1"
45 /* Synopsys DDR memory controller registers that are relevant to ECC */
47 #define T_ZQ_OFST 0xA4
49 /* ECC control register */
50 #define ECC_CTRL_OFST 0xC4
51 /* ECC log register */
52 #define CE_LOG_OFST 0xC8
53 /* ECC address register */
54 #define CE_ADDR_OFST 0xCC
55 /* ECC data[31:0] register */
56 #define CE_DATA_31_0_OFST 0xD0
58 /* Uncorrectable error info registers */
59 #define UE_LOG_OFST 0xDC
60 #define UE_ADDR_OFST 0xE0
61 #define UE_DATA_31_0_OFST 0xE4
63 #define STAT_OFST 0xF0
64 #define SCRUB_OFST 0xF4
66 /* Control register bit field definitions */
67 #define CTRL_BW_MASK 0xC
68 #define CTRL_BW_SHIFT 2
70 #define DDRCTL_WDTH_16 1
71 #define DDRCTL_WDTH_32 0
73 /* ZQ register bit field definitions */
74 #define T_ZQ_DDRMODE_MASK 0x2
76 /* ECC control register bit field definitions */
77 #define ECC_CTRL_CLR_CE_ERR 0x2
78 #define ECC_CTRL_CLR_UE_ERR 0x1
80 /* ECC correctable/uncorrectable error log register definitions */
82 #define CE_LOG_BITPOS_MASK 0xFE
83 #define CE_LOG_BITPOS_SHIFT 1
85 /* ECC correctable/uncorrectable error address register definitions */
86 #define ADDR_COL_MASK 0xFFF
87 #define ADDR_ROW_MASK 0xFFFF000
88 #define ADDR_ROW_SHIFT 12
89 #define ADDR_BANK_MASK 0x70000000
90 #define ADDR_BANK_SHIFT 28
92 /* ECC statistic register definitions */
93 #define STAT_UECNT_MASK 0xFF
94 #define STAT_CECNT_MASK 0xFF00
95 #define STAT_CECNT_SHIFT 8
97 /* ECC scrub register definitions */
98 #define SCRUB_MODE_MASK 0x7
99 #define SCRUB_MODE_SECDED 0x4
102 #define DDR_ECC_INTR_SUPPORT BIT(0)
103 #define DDR_ECC_DATA_POISON_SUPPORT BIT(1)
104 #define DDR_ECC_INTR_SELF_CLEAR BIT(2)
106 /* ZynqMP Enhanced DDR memory controller registers that are relevant to ECC */
107 /* ECC Configuration Registers */
108 #define ECC_CFG0_OFST 0x70
109 #define ECC_CFG1_OFST 0x74
111 /* ECC Status Register */
112 #define ECC_STAT_OFST 0x78
114 /* ECC Clear Register */
115 #define ECC_CLR_OFST 0x7C
117 /* ECC Error count Register */
118 #define ECC_ERRCNT_OFST 0x80
120 /* ECC Corrected Error Address Register */
121 #define ECC_CEADDR0_OFST 0x84
122 #define ECC_CEADDR1_OFST 0x88
124 /* ECC Syndrome Registers */
125 #define ECC_CSYND0_OFST 0x8C
126 #define ECC_CSYND1_OFST 0x90
127 #define ECC_CSYND2_OFST 0x94
129 /* ECC Bit Mask0 Address Register */
130 #define ECC_BITMASK0_OFST 0x98
131 #define ECC_BITMASK1_OFST 0x9C
132 #define ECC_BITMASK2_OFST 0xA0
134 /* ECC UnCorrected Error Address Register */
135 #define ECC_UEADDR0_OFST 0xA4
136 #define ECC_UEADDR1_OFST 0xA8
138 /* ECC Syndrome Registers */
139 #define ECC_UESYND0_OFST 0xAC
140 #define ECC_UESYND1_OFST 0xB0
141 #define ECC_UESYND2_OFST 0xB4
143 /* ECC Poison Address Reg */
144 #define ECC_POISON0_OFST 0xB8
145 #define ECC_POISON1_OFST 0xBC
147 #define ECC_ADDRMAP0_OFFSET 0x200
149 /* Control register bitfield definitions */
150 #define ECC_CTRL_BUSWIDTH_MASK 0x3000
151 #define ECC_CTRL_BUSWIDTH_SHIFT 12
152 #define ECC_CTRL_CLR_CE_ERRCNT BIT(2)
153 #define ECC_CTRL_CLR_UE_ERRCNT BIT(3)
155 /* DDR Control Register width definitions */
156 #define DDRCTL_EWDTH_16 2
157 #define DDRCTL_EWDTH_32 1
158 #define DDRCTL_EWDTH_64 0
160 /* ECC status register definitions */
161 #define ECC_STAT_UECNT_MASK 0xF0000
162 #define ECC_STAT_UECNT_SHIFT 16
163 #define ECC_STAT_CECNT_MASK 0xF00
164 #define ECC_STAT_CECNT_SHIFT 8
165 #define ECC_STAT_BITNUM_MASK 0x7F
167 /* ECC error count register definitions */
168 #define ECC_ERRCNT_UECNT_MASK 0xFFFF0000
169 #define ECC_ERRCNT_UECNT_SHIFT 16
170 #define ECC_ERRCNT_CECNT_MASK 0xFFFF
172 /* DDR QOS Interrupt register definitions */
173 #define DDR_QOS_IRQ_STAT_OFST 0x20200
174 #define DDR_QOSUE_MASK 0x4
175 #define DDR_QOSCE_MASK 0x2
176 #define ECC_CE_UE_INTR_MASK 0x6
177 #define DDR_QOS_IRQ_EN_OFST 0x20208
178 #define DDR_QOS_IRQ_DB_OFST 0x2020C
180 /* DDR QOS Interrupt register definitions */
181 #define DDR_UE_MASK BIT(9)
182 #define DDR_CE_MASK BIT(8)
184 /* ECC Corrected Error Register Mask and Shifts*/
185 #define ECC_CEADDR0_RW_MASK 0x3FFFF
186 #define ECC_CEADDR0_RNK_MASK BIT(24)
187 #define ECC_CEADDR1_BNKGRP_MASK 0x3000000
188 #define ECC_CEADDR1_BNKNR_MASK 0x70000
189 #define ECC_CEADDR1_BLKNR_MASK 0xFFF
190 #define ECC_CEADDR1_BNKGRP_SHIFT 24
191 #define ECC_CEADDR1_BNKNR_SHIFT 16
193 /* ECC Poison register shifts */
194 #define ECC_POISON0_RANK_SHIFT 24
195 #define ECC_POISON0_RANK_MASK BIT(24)
196 #define ECC_POISON0_COLUMN_SHIFT 0
197 #define ECC_POISON0_COLUMN_MASK 0xFFF
198 #define ECC_POISON1_BG_SHIFT 28
199 #define ECC_POISON1_BG_MASK 0x30000000
200 #define ECC_POISON1_BANKNR_SHIFT 24
201 #define ECC_POISON1_BANKNR_MASK 0x7000000
202 #define ECC_POISON1_ROW_SHIFT 0
203 #define ECC_POISON1_ROW_MASK 0x3FFFF
205 /* DDR Memory type defines */
206 #define MEM_TYPE_DDR3 0x1
207 #define MEM_TYPE_LPDDR3 0x8
208 #define MEM_TYPE_DDR2 0x4
209 #define MEM_TYPE_DDR4 0x10
210 #define MEM_TYPE_LPDDR4 0x20
212 /* DDRC Software control register */
213 #define DDRC_SWCTL 0x320
215 /* DDRC ECC CE & UE poison mask */
216 #define ECC_CEPOISON_MASK 0x3
217 #define ECC_UEPOISON_MASK 0x1
219 /* DDRC Device config masks */
220 #define DDRC_MSTR_CFG_MASK 0xC0000000
221 #define DDRC_MSTR_CFG_SHIFT 30
222 #define DDRC_MSTR_CFG_X4_MASK 0x0
223 #define DDRC_MSTR_CFG_X8_MASK 0x1
224 #define DDRC_MSTR_CFG_X16_MASK 0x2
225 #define DDRC_MSTR_CFG_X32_MASK 0x3
227 #define DDR_MAX_ROW_SHIFT 18
228 #define DDR_MAX_COL_SHIFT 14
229 #define DDR_MAX_BANK_SHIFT 3
230 #define DDR_MAX_BANKGRP_SHIFT 2
232 #define ROW_MAX_VAL_MASK 0xF
233 #define COL_MAX_VAL_MASK 0xF
234 #define BANK_MAX_VAL_MASK 0x1F
235 #define BANKGRP_MAX_VAL_MASK 0x1F
236 #define RANK_MAX_VAL_MASK 0x1F
238 #define ROW_B0_BASE 6
239 #define ROW_B1_BASE 7
240 #define ROW_B2_BASE 8
241 #define ROW_B3_BASE 9
242 #define ROW_B4_BASE 10
243 #define ROW_B5_BASE 11
244 #define ROW_B6_BASE 12
245 #define ROW_B7_BASE 13
246 #define ROW_B8_BASE 14
247 #define ROW_B9_BASE 15
248 #define ROW_B10_BASE 16
249 #define ROW_B11_BASE 17
250 #define ROW_B12_BASE 18
251 #define ROW_B13_BASE 19
252 #define ROW_B14_BASE 20
253 #define ROW_B15_BASE 21
254 #define ROW_B16_BASE 22
255 #define ROW_B17_BASE 23
257 #define COL_B2_BASE 2
258 #define COL_B3_BASE 3
259 #define COL_B4_BASE 4
260 #define COL_B5_BASE 5
261 #define COL_B6_BASE 6
262 #define COL_B7_BASE 7
263 #define COL_B8_BASE 8
264 #define COL_B9_BASE 9
265 #define COL_B10_BASE 10
266 #define COL_B11_BASE 11
267 #define COL_B12_BASE 12
268 #define COL_B13_BASE 13
270 #define BANK_B0_BASE 2
271 #define BANK_B1_BASE 3
272 #define BANK_B2_BASE 4
274 #define BANKGRP_B0_BASE 2
275 #define BANKGRP_B1_BASE 3
277 #define RANK_B0_BASE 6
280 * struct ecc_error_info - ECC error log information.
282 * @col: Column number.
283 * @bank: Bank number.
284 * @bitpos: Bit position.
285 * @data: Data causing the error.
286 * @bankgrpnr: Bank group number.
287 * @blknr: Block number.
289 struct ecc_error_info {
300 * struct synps_ecc_status - ECC status information to report.
301 * @ce_cnt: Correctable error count.
302 * @ue_cnt: Uncorrectable error count.
303 * @ceinfo: Correctable error log information.
304 * @ueinfo: Uncorrectable error log information.
306 struct synps_ecc_status {
309 struct ecc_error_info ceinfo;
310 struct ecc_error_info ueinfo;
314 * struct synps_edac_priv - DDR memory controller private instance data.
315 * @baseaddr: Base address of the DDR controller.
316 * @message: Buffer for framing the event specific info.
317 * @stat: ECC status information.
318 * @p_data: Platform data.
319 * @ce_cnt: Correctable Error count.
320 * @ue_cnt: Uncorrectable Error count.
321 * @poison_addr: Data poison address.
322 * @row_shift: Bit shifts for row bit.
323 * @col_shift: Bit shifts for column bit.
324 * @bank_shift: Bit shifts for bank bit.
325 * @bankgrp_shift: Bit shifts for bank group bit.
326 * @rank_shift: Bit shifts for rank bit.
328 struct synps_edac_priv {
329 void __iomem *baseaddr;
330 char message[SYNPS_EDAC_MSG_SIZE];
331 struct synps_ecc_status stat;
332 const struct synps_platform_data *p_data;
335 #ifdef CONFIG_EDAC_DEBUG
340 u32 bankgrp_shift[2];
346 * struct synps_platform_data - synps platform data structure.
347 * @get_error_info: Get EDAC error info.
348 * @get_mtype: Get mtype.
349 * @get_dtype: Get dtype.
350 * @get_ecc_state: Get ECC state.
351 * @quirks: To differentiate IPs.
353 struct synps_platform_data {
354 int (*get_error_info)(struct synps_edac_priv *priv);
355 enum mem_type (*get_mtype)(const void __iomem *base);
356 enum dev_type (*get_dtype)(const void __iomem *base);
357 bool (*get_ecc_state)(void __iomem *base);
362 * zynq_get_error_info - Get the current ECC error info.
363 * @priv: DDR memory controller private instance data.
365 * Return: one if there is no error, otherwise zero.
367 static int zynq_get_error_info(struct synps_edac_priv *priv)
369 struct synps_ecc_status *p;
370 u32 regval, clearval = 0;
373 base = priv->baseaddr;
376 regval = readl(base + STAT_OFST);
380 p->ce_cnt = (regval & STAT_CECNT_MASK) >> STAT_CECNT_SHIFT;
381 p->ue_cnt = regval & STAT_UECNT_MASK;
383 regval = readl(base + CE_LOG_OFST);
384 if (!(p->ce_cnt && (regval & LOG_VALID)))
387 p->ceinfo.bitpos = (regval & CE_LOG_BITPOS_MASK) >> CE_LOG_BITPOS_SHIFT;
388 regval = readl(base + CE_ADDR_OFST);
389 p->ceinfo.row = (regval & ADDR_ROW_MASK) >> ADDR_ROW_SHIFT;
390 p->ceinfo.col = regval & ADDR_COL_MASK;
391 p->ceinfo.bank = (regval & ADDR_BANK_MASK) >> ADDR_BANK_SHIFT;
392 p->ceinfo.data = readl(base + CE_DATA_31_0_OFST);
393 edac_dbg(3, "CE bit position: %d data: %d\n", p->ceinfo.bitpos,
395 clearval = ECC_CTRL_CLR_CE_ERR;
398 regval = readl(base + UE_LOG_OFST);
399 if (!(p->ue_cnt && (regval & LOG_VALID)))
402 regval = readl(base + UE_ADDR_OFST);
403 p->ueinfo.row = (regval & ADDR_ROW_MASK) >> ADDR_ROW_SHIFT;
404 p->ueinfo.col = regval & ADDR_COL_MASK;
405 p->ueinfo.bank = (regval & ADDR_BANK_MASK) >> ADDR_BANK_SHIFT;
406 p->ueinfo.data = readl(base + UE_DATA_31_0_OFST);
407 clearval |= ECC_CTRL_CLR_UE_ERR;
410 writel(clearval, base + ECC_CTRL_OFST);
411 writel(0x0, base + ECC_CTRL_OFST);
417 * zynqmp_get_error_info - Get the current ECC error info.
418 * @priv: DDR memory controller private instance data.
420 * Return: one if there is no error otherwise returns zero.
422 static int zynqmp_get_error_info(struct synps_edac_priv *priv)
424 struct synps_ecc_status *p;
425 u32 regval, clearval = 0;
428 base = priv->baseaddr;
431 regval = readl(base + ECC_ERRCNT_OFST);
432 p->ce_cnt = regval & ECC_ERRCNT_CECNT_MASK;
433 p->ue_cnt = (regval & ECC_ERRCNT_UECNT_MASK) >> ECC_ERRCNT_UECNT_SHIFT;
437 regval = readl(base + ECC_STAT_OFST);
441 p->ceinfo.bitpos = (regval & ECC_STAT_BITNUM_MASK);
443 regval = readl(base + ECC_CEADDR0_OFST);
444 p->ceinfo.row = (regval & ECC_CEADDR0_RW_MASK);
445 regval = readl(base + ECC_CEADDR1_OFST);
446 p->ceinfo.bank = (regval & ECC_CEADDR1_BNKNR_MASK) >>
447 ECC_CEADDR1_BNKNR_SHIFT;
448 p->ceinfo.bankgrpnr = (regval & ECC_CEADDR1_BNKGRP_MASK) >>
449 ECC_CEADDR1_BNKGRP_SHIFT;
450 p->ceinfo.blknr = (regval & ECC_CEADDR1_BLKNR_MASK);
451 p->ceinfo.data = readl(base + ECC_CSYND0_OFST);
452 edac_dbg(2, "ECCCSYN0: 0x%08X ECCCSYN1: 0x%08X ECCCSYN2: 0x%08X\n",
453 readl(base + ECC_CSYND0_OFST), readl(base + ECC_CSYND1_OFST),
454 readl(base + ECC_CSYND2_OFST));
459 regval = readl(base + ECC_UEADDR0_OFST);
460 p->ueinfo.row = (regval & ECC_CEADDR0_RW_MASK);
461 regval = readl(base + ECC_UEADDR1_OFST);
462 p->ueinfo.bankgrpnr = (regval & ECC_CEADDR1_BNKGRP_MASK) >>
463 ECC_CEADDR1_BNKGRP_SHIFT;
464 p->ueinfo.bank = (regval & ECC_CEADDR1_BNKNR_MASK) >>
465 ECC_CEADDR1_BNKNR_SHIFT;
466 p->ueinfo.blknr = (regval & ECC_CEADDR1_BLKNR_MASK);
467 p->ueinfo.data = readl(base + ECC_UESYND0_OFST);
469 clearval = ECC_CTRL_CLR_CE_ERR | ECC_CTRL_CLR_CE_ERRCNT;
470 clearval |= ECC_CTRL_CLR_UE_ERR | ECC_CTRL_CLR_UE_ERRCNT;
471 writel(clearval, base + ECC_CLR_OFST);
472 writel(0x0, base + ECC_CLR_OFST);
478 * handle_error - Handle Correctable and Uncorrectable errors.
479 * @mci: EDAC memory controller instance.
480 * @p: Synopsys ECC status structure.
482 * Handles ECC correctable and uncorrectable errors.
484 static void handle_error(struct mem_ctl_info *mci, struct synps_ecc_status *p)
486 struct synps_edac_priv *priv = mci->pvt_info;
487 struct ecc_error_info *pinf;
491 if (priv->p_data->quirks & DDR_ECC_INTR_SUPPORT) {
492 snprintf(priv->message, SYNPS_EDAC_MSG_SIZE,
493 "DDR ECC error type:%s Row %d Bank %d BankGroup Number %d Block Number %d Bit Position: %d Data: 0x%08x",
494 "CE", pinf->row, pinf->bank,
495 pinf->bankgrpnr, pinf->blknr,
496 pinf->bitpos, pinf->data);
498 snprintf(priv->message, SYNPS_EDAC_MSG_SIZE,
499 "DDR ECC error type:%s Row %d Bank %d Col %d Bit Position: %d Data: 0x%08x",
500 "CE", pinf->row, pinf->bank, pinf->col,
501 pinf->bitpos, pinf->data);
504 edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci,
505 p->ce_cnt, 0, 0, 0, 0, 0, -1,
511 if (priv->p_data->quirks & DDR_ECC_INTR_SUPPORT) {
512 snprintf(priv->message, SYNPS_EDAC_MSG_SIZE,
513 "DDR ECC error type :%s Row %d Bank %d BankGroup Number %d Block Number %d",
514 "UE", pinf->row, pinf->bank,
515 pinf->bankgrpnr, pinf->blknr);
517 snprintf(priv->message, SYNPS_EDAC_MSG_SIZE,
518 "DDR ECC error type :%s Row %d Bank %d Col %d ",
519 "UE", pinf->row, pinf->bank, pinf->col);
522 edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci,
523 p->ue_cnt, 0, 0, 0, 0, 0, -1,
527 memset(p, 0, sizeof(*p));
531 * intr_handler - Interrupt Handler for ECC interrupts.
533 * @dev_id: Device ID.
535 * Return: IRQ_NONE, if interrupt not set or IRQ_HANDLED otherwise.
537 static irqreturn_t intr_handler(int irq, void *dev_id)
539 const struct synps_platform_data *p_data;
540 struct mem_ctl_info *mci = dev_id;
541 struct synps_edac_priv *priv;
544 priv = mci->pvt_info;
545 p_data = priv->p_data;
548 * v3.0 of the controller has the ce/ue bits cleared automatically,
549 * so this condition does not apply.
551 if (!(priv->p_data->quirks & DDR_ECC_INTR_SELF_CLEAR)) {
552 regval = readl(priv->baseaddr + DDR_QOS_IRQ_STAT_OFST);
553 regval &= (DDR_QOSCE_MASK | DDR_QOSUE_MASK);
554 if (!(regval & ECC_CE_UE_INTR_MASK))
558 status = p_data->get_error_info(priv);
562 priv->ce_cnt += priv->stat.ce_cnt;
563 priv->ue_cnt += priv->stat.ue_cnt;
564 handle_error(mci, &priv->stat);
566 edac_dbg(3, "Total error count CE %d UE %d\n",
567 priv->ce_cnt, priv->ue_cnt);
568 /* v3.0 of the controller does not have this register */
569 if (!(priv->p_data->quirks & DDR_ECC_INTR_SELF_CLEAR))
570 writel(regval, priv->baseaddr + DDR_QOS_IRQ_STAT_OFST);
575 * check_errors - Check controller for ECC errors.
576 * @mci: EDAC memory controller instance.
578 * Check and post ECC errors. Called by the polling thread.
580 static void check_errors(struct mem_ctl_info *mci)
582 const struct synps_platform_data *p_data;
583 struct synps_edac_priv *priv;
586 priv = mci->pvt_info;
587 p_data = priv->p_data;
589 status = p_data->get_error_info(priv);
593 priv->ce_cnt += priv->stat.ce_cnt;
594 priv->ue_cnt += priv->stat.ue_cnt;
595 handle_error(mci, &priv->stat);
597 edac_dbg(3, "Total error count CE %d UE %d\n",
598 priv->ce_cnt, priv->ue_cnt);
602 * zynq_get_dtype - Return the controller memory width.
603 * @base: DDR memory controller base address.
605 * Get the EDAC device type width appropriate for the current controller
608 * Return: a device type width enumeration.
610 static enum dev_type zynq_get_dtype(const void __iomem *base)
615 width = readl(base + CTRL_OFST);
616 width = (width & CTRL_BW_MASK) >> CTRL_BW_SHIFT;
633 * zynqmp_get_dtype - Return the controller memory width.
634 * @base: DDR memory controller base address.
636 * Get the EDAC device type width appropriate for the current controller
639 * Return: a device type width enumeration.
641 static enum dev_type zynqmp_get_dtype(const void __iomem *base)
646 width = readl(base + CTRL_OFST);
647 width = (width & ECC_CTRL_BUSWIDTH_MASK) >> ECC_CTRL_BUSWIDTH_SHIFT;
649 case DDRCTL_EWDTH_16:
652 case DDRCTL_EWDTH_32:
655 case DDRCTL_EWDTH_64:
666 * zynq_get_ecc_state - Return the controller ECC enable/disable status.
667 * @base: DDR memory controller base address.
669 * Get the ECC enable/disable status of the controller.
671 * Return: true if enabled, otherwise false.
673 static bool zynq_get_ecc_state(void __iomem *base)
678 dt = zynq_get_dtype(base);
679 if (dt == DEV_UNKNOWN)
682 ecctype = readl(base + SCRUB_OFST) & SCRUB_MODE_MASK;
683 if ((ecctype == SCRUB_MODE_SECDED) && (dt == DEV_X2))
690 * zynqmp_get_ecc_state - Return the controller ECC enable/disable status.
691 * @base: DDR memory controller base address.
693 * Get the ECC enable/disable status for the controller.
695 * Return: a ECC status boolean i.e true/false - enabled/disabled.
697 static bool zynqmp_get_ecc_state(void __iomem *base)
702 dt = zynqmp_get_dtype(base);
703 if (dt == DEV_UNKNOWN)
706 ecctype = readl(base + ECC_CFG0_OFST) & SCRUB_MODE_MASK;
707 if ((ecctype == SCRUB_MODE_SECDED) &&
708 ((dt == DEV_X2) || (dt == DEV_X4) || (dt == DEV_X8)))
715 * get_memsize - Read the size of the attached memory device.
717 * Return: the memory size in bytes.
719 static u32 get_memsize(void)
725 return inf.totalram * inf.mem_unit;
729 * zynq_get_mtype - Return the controller memory type.
730 * @base: Synopsys ECC status structure.
732 * Get the EDAC memory type appropriate for the current controller
735 * Return: a memory type enumeration.
737 static enum mem_type zynq_get_mtype(const void __iomem *base)
742 memtype = readl(base + T_ZQ_OFST);
744 if (memtype & T_ZQ_DDRMODE_MASK)
753 * zynqmp_get_mtype - Returns controller memory type.
754 * @base: Synopsys ECC status structure.
756 * Get the EDAC memory type appropriate for the current controller
759 * Return: a memory type enumeration.
761 static enum mem_type zynqmp_get_mtype(const void __iomem *base)
766 memtype = readl(base + CTRL_OFST);
768 if ((memtype & MEM_TYPE_DDR3) || (memtype & MEM_TYPE_LPDDR3))
770 else if (memtype & MEM_TYPE_DDR2)
772 else if ((memtype & MEM_TYPE_LPDDR4) || (memtype & MEM_TYPE_DDR4))
781 * init_csrows - Initialize the csrow data.
782 * @mci: EDAC memory controller instance.
784 * Initialize the chip select rows associated with the EDAC memory
785 * controller instance.
787 static void init_csrows(struct mem_ctl_info *mci)
789 struct synps_edac_priv *priv = mci->pvt_info;
790 const struct synps_platform_data *p_data;
791 struct csrow_info *csi;
792 struct dimm_info *dimm;
796 p_data = priv->p_data;
798 for (row = 0; row < mci->nr_csrows; row++) {
799 csi = mci->csrows[row];
800 size = get_memsize();
802 for (j = 0; j < csi->nr_channels; j++) {
803 dimm = csi->channels[j]->dimm;
804 dimm->edac_mode = EDAC_SECDED;
805 dimm->mtype = p_data->get_mtype(priv->baseaddr);
806 dimm->nr_pages = (size >> PAGE_SHIFT) / csi->nr_channels;
807 dimm->grain = SYNPS_EDAC_ERR_GRAIN;
808 dimm->dtype = p_data->get_dtype(priv->baseaddr);
814 * mc_init - Initialize one driver instance.
815 * @mci: EDAC memory controller instance.
816 * @pdev: platform device.
818 * Perform initialization of the EDAC memory controller instance and
819 * related driver-private data associated with the memory controller the
820 * instance is bound to.
822 static void mc_init(struct mem_ctl_info *mci, struct platform_device *pdev)
824 struct synps_edac_priv *priv;
826 mci->pdev = &pdev->dev;
827 priv = mci->pvt_info;
828 platform_set_drvdata(pdev, mci);
830 /* Initialize controller capabilities and configuration */
831 mci->mtype_cap = MEM_FLAG_DDR3 | MEM_FLAG_DDR2;
832 mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_SECDED;
833 mci->scrub_cap = SCRUB_HW_SRC;
834 mci->scrub_mode = SCRUB_NONE;
836 mci->edac_cap = EDAC_FLAG_SECDED;
837 mci->ctl_name = "synps_ddr_controller";
838 mci->dev_name = SYNPS_EDAC_MOD_STRING;
839 mci->mod_name = SYNPS_EDAC_MOD_VER;
841 if (priv->p_data->quirks & DDR_ECC_INTR_SUPPORT) {
842 edac_op_state = EDAC_OPSTATE_INT;
844 edac_op_state = EDAC_OPSTATE_POLL;
845 mci->edac_check = check_errors;
848 mci->ctl_page_to_phys = NULL;
853 static void enable_intr(struct synps_edac_priv *priv)
855 /* Enable UE/CE Interrupts */
856 if (priv->p_data->quirks & DDR_ECC_INTR_SELF_CLEAR)
857 writel(DDR_UE_MASK | DDR_CE_MASK,
858 priv->baseaddr + ECC_CLR_OFST);
860 writel(DDR_QOSUE_MASK | DDR_QOSCE_MASK,
861 priv->baseaddr + DDR_QOS_IRQ_EN_OFST);
865 static void disable_intr(struct synps_edac_priv *priv)
867 /* Disable UE/CE Interrupts */
868 writel(DDR_QOSUE_MASK | DDR_QOSCE_MASK,
869 priv->baseaddr + DDR_QOS_IRQ_DB_OFST);
872 static int setup_irq(struct mem_ctl_info *mci,
873 struct platform_device *pdev)
875 struct synps_edac_priv *priv = mci->pvt_info;
878 irq = platform_get_irq(pdev, 0);
880 edac_printk(KERN_ERR, EDAC_MC,
881 "No IRQ %d in DT\n", irq);
885 ret = devm_request_irq(&pdev->dev, irq, intr_handler,
886 0, dev_name(&pdev->dev), mci);
888 edac_printk(KERN_ERR, EDAC_MC, "Failed to request IRQ\n");
897 static const struct synps_platform_data zynq_edac_def = {
898 .get_error_info = zynq_get_error_info,
899 .get_mtype = zynq_get_mtype,
900 .get_dtype = zynq_get_dtype,
901 .get_ecc_state = zynq_get_ecc_state,
905 static const struct synps_platform_data zynqmp_edac_def = {
906 .get_error_info = zynqmp_get_error_info,
907 .get_mtype = zynqmp_get_mtype,
908 .get_dtype = zynqmp_get_dtype,
909 .get_ecc_state = zynqmp_get_ecc_state,
910 .quirks = (DDR_ECC_INTR_SUPPORT
911 #ifdef CONFIG_EDAC_DEBUG
912 | DDR_ECC_DATA_POISON_SUPPORT
917 static const struct synps_platform_data synopsys_edac_def = {
918 .get_error_info = zynqmp_get_error_info,
919 .get_mtype = zynqmp_get_mtype,
920 .get_dtype = zynqmp_get_dtype,
921 .get_ecc_state = zynqmp_get_ecc_state,
922 .quirks = (DDR_ECC_INTR_SUPPORT | DDR_ECC_INTR_SELF_CLEAR
923 #ifdef CONFIG_EDAC_DEBUG
924 | DDR_ECC_DATA_POISON_SUPPORT
930 static const struct of_device_id synps_edac_match[] = {
932 .compatible = "xlnx,zynq-ddrc-a05",
933 .data = (void *)&zynq_edac_def
936 .compatible = "xlnx,zynqmp-ddrc-2.40a",
937 .data = (void *)&zynqmp_edac_def
940 .compatible = "snps,ddrc-3.80a",
941 .data = (void *)&synopsys_edac_def
948 MODULE_DEVICE_TABLE(of, synps_edac_match);
950 #ifdef CONFIG_EDAC_DEBUG
951 #define to_mci(k) container_of(k, struct mem_ctl_info, dev)
954 * ddr_poison_setup - Update poison registers.
955 * @priv: DDR memory controller private instance data.
957 * Update poison registers as per DDR mapping.
960 static void ddr_poison_setup(struct synps_edac_priv *priv)
962 int col = 0, row = 0, bank = 0, bankgrp = 0, rank = 0, regval;
966 hif_addr = priv->poison_addr >> 3;
968 for (index = 0; index < DDR_MAX_ROW_SHIFT; index++) {
969 if (priv->row_shift[index])
970 row |= (((hif_addr >> priv->row_shift[index]) &
976 for (index = 0; index < DDR_MAX_COL_SHIFT; index++) {
977 if (priv->col_shift[index] || index < 3)
978 col |= (((hif_addr >> priv->col_shift[index]) &
984 for (index = 0; index < DDR_MAX_BANK_SHIFT; index++) {
985 if (priv->bank_shift[index])
986 bank |= (((hif_addr >> priv->bank_shift[index]) &
992 for (index = 0; index < DDR_MAX_BANKGRP_SHIFT; index++) {
993 if (priv->bankgrp_shift[index])
994 bankgrp |= (((hif_addr >> priv->bankgrp_shift[index])
1000 if (priv->rank_shift[0])
1001 rank = (hif_addr >> priv->rank_shift[0]) & BIT(0);
1003 regval = (rank << ECC_POISON0_RANK_SHIFT) & ECC_POISON0_RANK_MASK;
1004 regval |= (col << ECC_POISON0_COLUMN_SHIFT) & ECC_POISON0_COLUMN_MASK;
1005 writel(regval, priv->baseaddr + ECC_POISON0_OFST);
1007 regval = (bankgrp << ECC_POISON1_BG_SHIFT) & ECC_POISON1_BG_MASK;
1008 regval |= (bank << ECC_POISON1_BANKNR_SHIFT) & ECC_POISON1_BANKNR_MASK;
1009 regval |= (row << ECC_POISON1_ROW_SHIFT) & ECC_POISON1_ROW_MASK;
1010 writel(regval, priv->baseaddr + ECC_POISON1_OFST);
1013 static ssize_t inject_data_error_show(struct device *dev,
1014 struct device_attribute *mattr,
1017 struct mem_ctl_info *mci = to_mci(dev);
1018 struct synps_edac_priv *priv = mci->pvt_info;
1020 return sprintf(data, "Poison0 Addr: 0x%08x\n\rPoison1 Addr: 0x%08x\n\r"
1021 "Error injection Address: 0x%lx\n\r",
1022 readl(priv->baseaddr + ECC_POISON0_OFST),
1023 readl(priv->baseaddr + ECC_POISON1_OFST),
1027 static ssize_t inject_data_error_store(struct device *dev,
1028 struct device_attribute *mattr,
1029 const char *data, size_t count)
1031 struct mem_ctl_info *mci = to_mci(dev);
1032 struct synps_edac_priv *priv = mci->pvt_info;
1034 if (kstrtoul(data, 0, &priv->poison_addr))
1037 ddr_poison_setup(priv);
1042 static ssize_t inject_data_poison_show(struct device *dev,
1043 struct device_attribute *mattr,
1046 struct mem_ctl_info *mci = to_mci(dev);
1047 struct synps_edac_priv *priv = mci->pvt_info;
1049 return sprintf(data, "Data Poisoning: %s\n\r",
1050 (((readl(priv->baseaddr + ECC_CFG1_OFST)) & 0x3) == 0x3)
1051 ? ("Correctable Error") : ("UnCorrectable Error"));
1054 static ssize_t inject_data_poison_store(struct device *dev,
1055 struct device_attribute *mattr,
1056 const char *data, size_t count)
1058 struct mem_ctl_info *mci = to_mci(dev);
1059 struct synps_edac_priv *priv = mci->pvt_info;
1061 writel(0, priv->baseaddr + DDRC_SWCTL);
1062 if (strncmp(data, "CE", 2) == 0)
1063 writel(ECC_CEPOISON_MASK, priv->baseaddr + ECC_CFG1_OFST);
1065 writel(ECC_UEPOISON_MASK, priv->baseaddr + ECC_CFG1_OFST);
1066 writel(1, priv->baseaddr + DDRC_SWCTL);
1071 static DEVICE_ATTR_RW(inject_data_error);
1072 static DEVICE_ATTR_RW(inject_data_poison);
1074 static int edac_create_sysfs_attributes(struct mem_ctl_info *mci)
1078 rc = device_create_file(&mci->dev, &dev_attr_inject_data_error);
1081 rc = device_create_file(&mci->dev, &dev_attr_inject_data_poison);
1087 static void edac_remove_sysfs_attributes(struct mem_ctl_info *mci)
1089 device_remove_file(&mci->dev, &dev_attr_inject_data_error);
1090 device_remove_file(&mci->dev, &dev_attr_inject_data_poison);
1093 static void setup_row_address_map(struct synps_edac_priv *priv, u32 *addrmap)
1095 u32 addrmap_row_b2_10;
1098 priv->row_shift[0] = (addrmap[5] & ROW_MAX_VAL_MASK) + ROW_B0_BASE;
1099 priv->row_shift[1] = ((addrmap[5] >> 8) &
1100 ROW_MAX_VAL_MASK) + ROW_B1_BASE;
1102 addrmap_row_b2_10 = (addrmap[5] >> 16) & ROW_MAX_VAL_MASK;
1103 if (addrmap_row_b2_10 != ROW_MAX_VAL_MASK) {
1104 for (index = 2; index < 11; index++)
1105 priv->row_shift[index] = addrmap_row_b2_10 +
1106 index + ROW_B0_BASE;
1109 priv->row_shift[2] = (addrmap[9] &
1110 ROW_MAX_VAL_MASK) + ROW_B2_BASE;
1111 priv->row_shift[3] = ((addrmap[9] >> 8) &
1112 ROW_MAX_VAL_MASK) + ROW_B3_BASE;
1113 priv->row_shift[4] = ((addrmap[9] >> 16) &
1114 ROW_MAX_VAL_MASK) + ROW_B4_BASE;
1115 priv->row_shift[5] = ((addrmap[9] >> 24) &
1116 ROW_MAX_VAL_MASK) + ROW_B5_BASE;
1117 priv->row_shift[6] = (addrmap[10] &
1118 ROW_MAX_VAL_MASK) + ROW_B6_BASE;
1119 priv->row_shift[7] = ((addrmap[10] >> 8) &
1120 ROW_MAX_VAL_MASK) + ROW_B7_BASE;
1121 priv->row_shift[8] = ((addrmap[10] >> 16) &
1122 ROW_MAX_VAL_MASK) + ROW_B8_BASE;
1123 priv->row_shift[9] = ((addrmap[10] >> 24) &
1124 ROW_MAX_VAL_MASK) + ROW_B9_BASE;
1125 priv->row_shift[10] = (addrmap[11] &
1126 ROW_MAX_VAL_MASK) + ROW_B10_BASE;
1129 priv->row_shift[11] = (((addrmap[5] >> 24) & ROW_MAX_VAL_MASK) ==
1130 ROW_MAX_VAL_MASK) ? 0 : (((addrmap[5] >> 24) &
1131 ROW_MAX_VAL_MASK) + ROW_B11_BASE);
1132 priv->row_shift[12] = ((addrmap[6] & ROW_MAX_VAL_MASK) ==
1133 ROW_MAX_VAL_MASK) ? 0 : ((addrmap[6] &
1134 ROW_MAX_VAL_MASK) + ROW_B12_BASE);
1135 priv->row_shift[13] = (((addrmap[6] >> 8) & ROW_MAX_VAL_MASK) ==
1136 ROW_MAX_VAL_MASK) ? 0 : (((addrmap[6] >> 8) &
1137 ROW_MAX_VAL_MASK) + ROW_B13_BASE);
1138 priv->row_shift[14] = (((addrmap[6] >> 16) & ROW_MAX_VAL_MASK) ==
1139 ROW_MAX_VAL_MASK) ? 0 : (((addrmap[6] >> 16) &
1140 ROW_MAX_VAL_MASK) + ROW_B14_BASE);
1141 priv->row_shift[15] = (((addrmap[6] >> 24) & ROW_MAX_VAL_MASK) ==
1142 ROW_MAX_VAL_MASK) ? 0 : (((addrmap[6] >> 24) &
1143 ROW_MAX_VAL_MASK) + ROW_B15_BASE);
1144 priv->row_shift[16] = ((addrmap[7] & ROW_MAX_VAL_MASK) ==
1145 ROW_MAX_VAL_MASK) ? 0 : ((addrmap[7] &
1146 ROW_MAX_VAL_MASK) + ROW_B16_BASE);
1147 priv->row_shift[17] = (((addrmap[7] >> 8) & ROW_MAX_VAL_MASK) ==
1148 ROW_MAX_VAL_MASK) ? 0 : (((addrmap[7] >> 8) &
1149 ROW_MAX_VAL_MASK) + ROW_B17_BASE);
1152 static void setup_column_address_map(struct synps_edac_priv *priv, u32 *addrmap)
1157 memtype = readl(priv->baseaddr + CTRL_OFST);
1158 width = (memtype & ECC_CTRL_BUSWIDTH_MASK) >> ECC_CTRL_BUSWIDTH_SHIFT;
1160 priv->col_shift[0] = 0;
1161 priv->col_shift[1] = 1;
1162 priv->col_shift[2] = (addrmap[2] & COL_MAX_VAL_MASK) + COL_B2_BASE;
1163 priv->col_shift[3] = ((addrmap[2] >> 8) &
1164 COL_MAX_VAL_MASK) + COL_B3_BASE;
1165 priv->col_shift[4] = (((addrmap[2] >> 16) & COL_MAX_VAL_MASK) ==
1166 COL_MAX_VAL_MASK) ? 0 : (((addrmap[2] >> 16) &
1167 COL_MAX_VAL_MASK) + COL_B4_BASE);
1168 priv->col_shift[5] = (((addrmap[2] >> 24) & COL_MAX_VAL_MASK) ==
1169 COL_MAX_VAL_MASK) ? 0 : (((addrmap[2] >> 24) &
1170 COL_MAX_VAL_MASK) + COL_B5_BASE);
1171 priv->col_shift[6] = ((addrmap[3] & COL_MAX_VAL_MASK) ==
1172 COL_MAX_VAL_MASK) ? 0 : ((addrmap[3] &
1173 COL_MAX_VAL_MASK) + COL_B6_BASE);
1174 priv->col_shift[7] = (((addrmap[3] >> 8) & COL_MAX_VAL_MASK) ==
1175 COL_MAX_VAL_MASK) ? 0 : (((addrmap[3] >> 8) &
1176 COL_MAX_VAL_MASK) + COL_B7_BASE);
1177 priv->col_shift[8] = (((addrmap[3] >> 16) & COL_MAX_VAL_MASK) ==
1178 COL_MAX_VAL_MASK) ? 0 : (((addrmap[3] >> 16) &
1179 COL_MAX_VAL_MASK) + COL_B8_BASE);
1180 priv->col_shift[9] = (((addrmap[3] >> 24) & COL_MAX_VAL_MASK) ==
1181 COL_MAX_VAL_MASK) ? 0 : (((addrmap[3] >> 24) &
1182 COL_MAX_VAL_MASK) + COL_B9_BASE);
1183 if (width == DDRCTL_EWDTH_64) {
1184 if (memtype & MEM_TYPE_LPDDR3) {
1185 priv->col_shift[10] = ((addrmap[4] &
1186 COL_MAX_VAL_MASK) == COL_MAX_VAL_MASK) ? 0 :
1187 ((addrmap[4] & COL_MAX_VAL_MASK) +
1189 priv->col_shift[11] = (((addrmap[4] >> 8) &
1190 COL_MAX_VAL_MASK) == COL_MAX_VAL_MASK) ? 0 :
1191 (((addrmap[4] >> 8) & COL_MAX_VAL_MASK) +
1194 priv->col_shift[11] = ((addrmap[4] &
1195 COL_MAX_VAL_MASK) == COL_MAX_VAL_MASK) ? 0 :
1196 ((addrmap[4] & COL_MAX_VAL_MASK) +
1198 priv->col_shift[13] = (((addrmap[4] >> 8) &
1199 COL_MAX_VAL_MASK) == COL_MAX_VAL_MASK) ? 0 :
1200 (((addrmap[4] >> 8) & COL_MAX_VAL_MASK) +
1203 } else if (width == DDRCTL_EWDTH_32) {
1204 if (memtype & MEM_TYPE_LPDDR3) {
1205 priv->col_shift[10] = (((addrmap[3] >> 24) &
1206 COL_MAX_VAL_MASK) == COL_MAX_VAL_MASK) ? 0 :
1207 (((addrmap[3] >> 24) & COL_MAX_VAL_MASK) +
1209 priv->col_shift[11] = ((addrmap[4] &
1210 COL_MAX_VAL_MASK) == COL_MAX_VAL_MASK) ? 0 :
1211 ((addrmap[4] & COL_MAX_VAL_MASK) +
1214 priv->col_shift[11] = (((addrmap[3] >> 24) &
1215 COL_MAX_VAL_MASK) == COL_MAX_VAL_MASK) ? 0 :
1216 (((addrmap[3] >> 24) & COL_MAX_VAL_MASK) +
1218 priv->col_shift[13] = ((addrmap[4] &
1219 COL_MAX_VAL_MASK) == COL_MAX_VAL_MASK) ? 0 :
1220 ((addrmap[4] & COL_MAX_VAL_MASK) +
1224 if (memtype & MEM_TYPE_LPDDR3) {
1225 priv->col_shift[10] = (((addrmap[3] >> 16) &
1226 COL_MAX_VAL_MASK) == COL_MAX_VAL_MASK) ? 0 :
1227 (((addrmap[3] >> 16) & COL_MAX_VAL_MASK) +
1229 priv->col_shift[11] = (((addrmap[3] >> 24) &
1230 COL_MAX_VAL_MASK) == COL_MAX_VAL_MASK) ? 0 :
1231 (((addrmap[3] >> 24) & COL_MAX_VAL_MASK) +
1233 priv->col_shift[13] = ((addrmap[4] &
1234 COL_MAX_VAL_MASK) == COL_MAX_VAL_MASK) ? 0 :
1235 ((addrmap[4] & COL_MAX_VAL_MASK) +
1238 priv->col_shift[11] = (((addrmap[3] >> 16) &
1239 COL_MAX_VAL_MASK) == COL_MAX_VAL_MASK) ? 0 :
1240 (((addrmap[3] >> 16) & COL_MAX_VAL_MASK) +
1242 priv->col_shift[13] = (((addrmap[3] >> 24) &
1243 COL_MAX_VAL_MASK) == COL_MAX_VAL_MASK) ? 0 :
1244 (((addrmap[3] >> 24) & COL_MAX_VAL_MASK) +
1250 for (index = 9; index > width; index--) {
1251 priv->col_shift[index] = priv->col_shift[index - width];
1252 priv->col_shift[index - width] = 0;
1258 static void setup_bank_address_map(struct synps_edac_priv *priv, u32 *addrmap)
1260 priv->bank_shift[0] = (addrmap[1] & BANK_MAX_VAL_MASK) + BANK_B0_BASE;
1261 priv->bank_shift[1] = ((addrmap[1] >> 8) &
1262 BANK_MAX_VAL_MASK) + BANK_B1_BASE;
1263 priv->bank_shift[2] = (((addrmap[1] >> 16) &
1264 BANK_MAX_VAL_MASK) == BANK_MAX_VAL_MASK) ? 0 :
1265 (((addrmap[1] >> 16) & BANK_MAX_VAL_MASK) +
1270 static void setup_bg_address_map(struct synps_edac_priv *priv, u32 *addrmap)
1272 priv->bankgrp_shift[0] = (addrmap[8] &
1273 BANKGRP_MAX_VAL_MASK) + BANKGRP_B0_BASE;
1274 priv->bankgrp_shift[1] = (((addrmap[8] >> 8) & BANKGRP_MAX_VAL_MASK) ==
1275 BANKGRP_MAX_VAL_MASK) ? 0 : (((addrmap[8] >> 8)
1276 & BANKGRP_MAX_VAL_MASK) + BANKGRP_B1_BASE);
1280 static void setup_rank_address_map(struct synps_edac_priv *priv, u32 *addrmap)
1282 priv->rank_shift[0] = ((addrmap[0] & RANK_MAX_VAL_MASK) ==
1283 RANK_MAX_VAL_MASK) ? 0 : ((addrmap[0] &
1284 RANK_MAX_VAL_MASK) + RANK_B0_BASE);
1288 * setup_address_map - Set Address Map by querying ADDRMAP registers.
1289 * @priv: DDR memory controller private instance data.
1291 * Set Address Map by querying ADDRMAP registers.
1295 static void setup_address_map(struct synps_edac_priv *priv)
1300 for (index = 0; index < 12; index++) {
1303 addrmap_offset = ECC_ADDRMAP0_OFFSET + (index * 4);
1304 addrmap[index] = readl(priv->baseaddr + addrmap_offset);
1307 setup_row_address_map(priv, addrmap);
1309 setup_column_address_map(priv, addrmap);
1311 setup_bank_address_map(priv, addrmap);
1313 setup_bg_address_map(priv, addrmap);
1315 setup_rank_address_map(priv, addrmap);
1317 #endif /* CONFIG_EDAC_DEBUG */
1320 * mc_probe - Check controller and bind driver.
1321 * @pdev: platform device.
1323 * Probe a specific controller instance for binding with the driver.
1325 * Return: 0 if the controller instance was successfully bound to the
1326 * driver; otherwise, < 0 on error.
1328 static int mc_probe(struct platform_device *pdev)
1330 const struct synps_platform_data *p_data;
1331 struct edac_mc_layer layers[2];
1332 struct synps_edac_priv *priv;
1333 struct mem_ctl_info *mci;
1334 void __iomem *baseaddr;
1335 struct resource *res;
1338 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1339 baseaddr = devm_ioremap_resource(&pdev->dev, res);
1340 if (IS_ERR(baseaddr))
1341 return PTR_ERR(baseaddr);
1343 p_data = of_device_get_match_data(&pdev->dev);
1347 if (!p_data->get_ecc_state(baseaddr)) {
1348 edac_printk(KERN_INFO, EDAC_MC, "ECC not enabled\n");
1352 layers[0].type = EDAC_MC_LAYER_CHIP_SELECT;
1353 layers[0].size = SYNPS_EDAC_NR_CSROWS;
1354 layers[0].is_virt_csrow = true;
1355 layers[1].type = EDAC_MC_LAYER_CHANNEL;
1356 layers[1].size = SYNPS_EDAC_NR_CHANS;
1357 layers[1].is_virt_csrow = false;
1359 mci = edac_mc_alloc(0, ARRAY_SIZE(layers), layers,
1360 sizeof(struct synps_edac_priv));
1362 edac_printk(KERN_ERR, EDAC_MC,
1363 "Failed memory allocation for mc instance\n");
1367 priv = mci->pvt_info;
1368 priv->baseaddr = baseaddr;
1369 priv->p_data = p_data;
1373 if (priv->p_data->quirks & DDR_ECC_INTR_SUPPORT) {
1374 rc = setup_irq(mci, pdev);
1379 rc = edac_mc_add_mc(mci);
1381 edac_printk(KERN_ERR, EDAC_MC,
1382 "Failed to register with EDAC core\n");
1386 #ifdef CONFIG_EDAC_DEBUG
1387 if (priv->p_data->quirks & DDR_ECC_DATA_POISON_SUPPORT) {
1388 rc = edac_create_sysfs_attributes(mci);
1390 edac_printk(KERN_ERR, EDAC_MC,
1391 "Failed to create sysfs entries\n");
1396 if (priv->p_data->quirks & DDR_ECC_INTR_SUPPORT)
1397 setup_address_map(priv);
1401 * Start capturing the correctable and uncorrectable errors. A write of
1402 * 0 starts the counters.
1404 if (!(priv->p_data->quirks & DDR_ECC_INTR_SUPPORT))
1405 writel(0x0, baseaddr + ECC_CTRL_OFST);
1416 * mc_remove - Unbind driver from controller.
1417 * @pdev: Platform device.
1419 * Return: Unconditionally 0
1421 static int mc_remove(struct platform_device *pdev)
1423 struct mem_ctl_info *mci = platform_get_drvdata(pdev);
1424 struct synps_edac_priv *priv = mci->pvt_info;
1426 if (priv->p_data->quirks & DDR_ECC_INTR_SUPPORT)
1429 #ifdef CONFIG_EDAC_DEBUG
1430 if (priv->p_data->quirks & DDR_ECC_DATA_POISON_SUPPORT)
1431 edac_remove_sysfs_attributes(mci);
1434 edac_mc_del_mc(&pdev->dev);
1440 static struct platform_driver synps_edac_mc_driver = {
1442 .name = "synopsys-edac",
1443 .of_match_table = synps_edac_match,
1446 .remove = mc_remove,
1449 module_platform_driver(synps_edac_mc_driver);
1451 MODULE_AUTHOR("Xilinx Inc");
1452 MODULE_DESCRIPTION("Synopsys DDR ECC driver");
1453 MODULE_LICENSE("GPL v2");