2 * Driver for Pondicherry2 memory controller.
4 * Copyright (c) 2016, Intel Corporation.
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * [Derived from sb_edac.c]
17 * Translation of system physical addresses to DIMM addresses
18 * is a two stage process:
20 * First the Pondicherry 2 memory controller handles slice and channel interleaving
21 * in "sys2pmi()". This is (almost) completley common between platforms.
23 * Then a platform specific dunit (DIMM unit) completes the process to provide DIMM,
24 * rank, bank, row and column using the appropriate "dunit_ops" functions/parameters.
27 #include <linux/module.h>
28 #include <linux/init.h>
29 #include <linux/pci.h>
30 #include <linux/pci_ids.h>
31 #include <linux/slab.h>
32 #include <linux/delay.h>
33 #include <linux/edac.h>
34 #include <linux/mmzone.h>
35 #include <linux/smp.h>
36 #include <linux/bitmap.h>
37 #include <linux/math64.h>
38 #include <linux/mod_devicetable.h>
39 #include <asm/cpu_device_id.h>
40 #include <asm/intel-family.h>
41 #include <asm/processor.h>
45 #include "edac_module.h"
46 #include "pnd2_edac.h"
48 #define APL_NUM_CHANNELS 4
49 #define DNV_NUM_CHANNELS 2
50 #define DNV_MAX_DIMMS 2 /* Max DIMMs per channel */
54 DNV, /* All requests go to PMI CH0 on each slice (CH1 disabled) */
67 int dimm_geom[APL_NUM_CHANNELS];
72 * System address space is divided into multiple regions with
73 * different interleave rules in each. The as0/as1 regions
74 * have no interleaving at all. The as2 region is interleaved
75 * between two channels. The mot region is magic and may overlap
76 * other regions, with its interleave rules taking precedence.
77 * Addresses not in any of these regions are interleaved across
80 static struct region {
86 static struct dunit_ops {
92 int dimms_per_channel;
93 int (*rd_reg)(int port, int off, int op, void *data, size_t sz, char *name);
94 int (*get_registers)(void);
95 int (*check_ecc)(void);
96 void (*mk_region)(char *name, struct region *rp, void *asym);
97 void (*get_dimm_config)(struct mem_ctl_info *mci);
98 int (*pmi2mem)(struct mem_ctl_info *mci, u64 pmiaddr, u32 pmiidx,
99 struct dram_addr *daddr, char *msg);
102 static struct mem_ctl_info *pnd2_mci;
104 #define PND2_MSG_SIZE 256
107 #define pnd2_printk(level, fmt, arg...) \
108 edac_printk(level, "pnd2", fmt, ##arg)
110 #define pnd2_mc_printk(mci, level, fmt, arg...) \
111 edac_mc_chipset_printk(mci, level, "pnd2", fmt, ##arg)
113 #define MOT_CHAN_INTLV_BIT_1SLC_2CH 12
114 #define MOT_CHAN_INTLV_BIT_2SLC_2CH 13
115 #define SELECTOR_DISABLED (-1)
116 #define _4GB (1ul << 32)
118 #define PMI_ADDRESS_WIDTH 31
119 #define PND_MAX_PHYS_BIT 39
121 #define APL_ASYMSHIFT 28
122 #define DNV_ASYMSHIFT 31
123 #define CH_HASH_MASK_LSB 6
124 #define SLICE_HASH_MASK_LSB 6
125 #define MOT_SLC_INTLV_BIT 12
126 #define LOG2_PMI_ADDR_GRANULARITY 5
129 #define GET_BITFIELD(v, lo, hi) (((v) & GENMASK_ULL(hi, lo)) >> (lo))
130 #define U64_LSHIFT(val, s) ((u64)(val) << (s))
133 * On Apollo Lake we access memory controller registers via a
134 * side-band mailbox style interface in a hidden PCI device
135 * configuration space.
137 static struct pci_bus *p2sb_bus;
138 #define P2SB_DEVFN PCI_DEVFN(0xd, 0)
139 #define P2SB_ADDR_OFF 0xd0
140 #define P2SB_DATA_OFF 0xd4
141 #define P2SB_STAT_OFF 0xd8
142 #define P2SB_ROUT_OFF 0xda
143 #define P2SB_EADD_OFF 0xdc
144 #define P2SB_HIDE_OFF 0xe1
148 #define P2SB_READ(size, off, ptr) \
149 pci_bus_read_config_##size(p2sb_bus, P2SB_DEVFN, off, ptr)
150 #define P2SB_WRITE(size, off, val) \
151 pci_bus_write_config_##size(p2sb_bus, P2SB_DEVFN, off, val)
153 static bool p2sb_is_busy(u16 *status)
155 P2SB_READ(word, P2SB_STAT_OFF, status);
157 return !!(*status & P2SB_BUSY);
160 static int _apl_rd_reg(int port, int off, int op, u32 *data)
162 int retries = 0xff, ret;
166 /* Unhide the P2SB device, if it's hidden */
167 P2SB_READ(byte, P2SB_HIDE_OFF, &hidden);
169 P2SB_WRITE(byte, P2SB_HIDE_OFF, 0);
171 if (p2sb_is_busy(&status)) {
176 P2SB_WRITE(dword, P2SB_ADDR_OFF, (port << 24) | off);
177 P2SB_WRITE(dword, P2SB_DATA_OFF, 0);
178 P2SB_WRITE(dword, P2SB_EADD_OFF, 0);
179 P2SB_WRITE(word, P2SB_ROUT_OFF, 0);
180 P2SB_WRITE(word, P2SB_STAT_OFF, (op << 8) | P2SB_BUSY);
182 while (p2sb_is_busy(&status)) {
183 if (retries-- == 0) {
189 P2SB_READ(dword, P2SB_DATA_OFF, data);
190 ret = (status >> 1) & 0x3;
192 /* Hide the P2SB device, if it was hidden before */
194 P2SB_WRITE(byte, P2SB_HIDE_OFF, hidden);
199 static int apl_rd_reg(int port, int off, int op, void *data, size_t sz, char *name)
203 edac_dbg(2, "Read %s port=%x off=%x op=%x\n", name, port, off, op);
206 ret = _apl_rd_reg(port, off + 4, op, (u32 *)(data + 4));
209 ret |= _apl_rd_reg(port, off, op, (u32 *)data);
210 pnd2_printk(KERN_DEBUG, "%s=%x%08x ret=%d\n", name,
211 sz == 8 ? *((u32 *)(data + 4)) : 0, *((u32 *)data), ret);
218 static u64 get_mem_ctrl_hub_base_addr(void)
220 struct b_cr_mchbar_lo_pci lo;
221 struct b_cr_mchbar_hi_pci hi;
222 struct pci_dev *pdev;
224 pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x1980, NULL);
226 pci_read_config_dword(pdev, 0x48, (u32 *)&lo);
227 pci_read_config_dword(pdev, 0x4c, (u32 *)&hi);
234 edac_dbg(2, "MMIO via memory controller hub base address is disabled!\n");
238 return U64_LSHIFT(hi.base, 32) | U64_LSHIFT(lo.base, 15);
241 static u64 get_sideband_reg_base_addr(void)
243 struct pci_dev *pdev;
247 pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x19dd, NULL);
249 /* Unhide the P2SB device, if it's hidden */
250 pci_read_config_byte(pdev, 0xe1, &hidden);
252 pci_write_config_byte(pdev, 0xe1, 0);
254 pci_read_config_dword(pdev, 0x10, &lo);
255 pci_read_config_dword(pdev, 0x14, &hi);
258 /* Hide the P2SB device, if it was hidden before */
260 pci_write_config_byte(pdev, 0xe1, hidden);
263 return (U64_LSHIFT(hi, 32) | U64_LSHIFT(lo, 0));
269 static int dnv_rd_reg(int port, int off, int op, void *data, size_t sz, char *name)
271 struct pci_dev *pdev;
276 pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x1980, NULL);
280 pci_read_config_dword(pdev, off, data);
283 /* MMIO via memory controller hub base address */
284 if (op == 0 && port == 0x4c) {
285 addr = get_mem_ctrl_hub_base_addr();
289 /* MMIO via sideband register base address */
290 addr = get_sideband_reg_base_addr();
293 addr += (port << 16);
296 base = ioremap((resource_size_t)addr, 0x10000);
301 *(u32 *)(data + 4) = *(u32 *)(base + off + 4);
302 *(u32 *)data = *(u32 *)(base + off);
307 edac_dbg(2, "Read %s=%.8x_%.8x\n", name,
308 (sz == 8) ? *(u32 *)(data + 4) : 0, *(u32 *)data);
313 #define RD_REGP(regp, regname, port) \
316 regname##_r_opcode, \
317 regp, sizeof(struct regname), \
320 #define RD_REG(regp, regname) \
321 ops->rd_reg(regname ## _port, \
323 regname##_r_opcode, \
324 regp, sizeof(struct regname), \
327 static u64 top_lm, top_hm;
328 static bool two_slices;
329 static bool two_channels; /* Both PMI channels in one slice enabled */
331 static u8 sym_chan_mask;
332 static u8 asym_chan_mask;
335 static int slice_selector = -1;
336 static int chan_selector = -1;
337 static u64 slice_hash_mask;
338 static u64 chan_hash_mask;
340 static void mk_region(char *name, struct region *rp, u64 base, u64 limit)
345 edac_dbg(2, "Region:%s [%llx, %llx]\n", name, base, limit);
348 static void mk_region_mask(char *name, struct region *rp, u64 base, u64 mask)
351 pr_info(FW_BUG "MOT mask cannot be zero\n");
354 if (mask != GENMASK_ULL(PND_MAX_PHYS_BIT, __ffs(mask))) {
355 pr_info(FW_BUG "MOT mask not power of two\n");
359 pr_info(FW_BUG "MOT region base/mask alignment error\n");
363 rp->limit = (base | ~mask) & GENMASK_ULL(PND_MAX_PHYS_BIT, 0);
365 edac_dbg(2, "Region:%s [%llx, %llx]\n", name, base, rp->limit);
368 static bool in_region(struct region *rp, u64 addr)
373 return rp->base <= addr && addr <= rp->limit;
376 static int gen_sym_mask(struct b_cr_slice_channel_hash *p)
380 if (!p->slice_0_mem_disabled)
381 mask |= p->sym_slice0_channel_enabled;
383 if (!p->slice_1_disabled)
384 mask |= p->sym_slice1_channel_enabled << 2;
386 if (p->ch_1_disabled || p->enable_pmi_dual_data_mode)
392 static int gen_asym_mask(struct b_cr_slice_channel_hash *p,
393 struct b_cr_asym_mem_region0_mchbar *as0,
394 struct b_cr_asym_mem_region1_mchbar *as1,
395 struct b_cr_asym_2way_mem_region_mchbar *as2way)
397 const int intlv[] = { 0x5, 0xA, 0x3, 0xC };
400 if (as2way->asym_2way_interleave_enable)
401 mask = intlv[as2way->asym_2way_intlv_mode];
402 if (as0->slice0_asym_enable)
403 mask |= (1 << as0->slice0_asym_channel_select);
404 if (as1->slice1_asym_enable)
405 mask |= (4 << as1->slice1_asym_channel_select);
406 if (p->slice_0_mem_disabled)
408 if (p->slice_1_disabled)
410 if (p->ch_1_disabled || p->enable_pmi_dual_data_mode)
416 static struct b_cr_tolud_pci tolud;
417 static struct b_cr_touud_lo_pci touud_lo;
418 static struct b_cr_touud_hi_pci touud_hi;
419 static struct b_cr_asym_mem_region0_mchbar asym0;
420 static struct b_cr_asym_mem_region1_mchbar asym1;
421 static struct b_cr_asym_2way_mem_region_mchbar asym_2way;
422 static struct b_cr_mot_out_base_mchbar mot_base;
423 static struct b_cr_mot_out_mask_mchbar mot_mask;
424 static struct b_cr_slice_channel_hash chash;
426 /* Apollo Lake dunit */
428 * Validated on board with just two DIMMs in the [0] and [2] positions
429 * in this array. Other port number matches documentation, but caution
432 static const int apl_dports[APL_NUM_CHANNELS] = { 0x18, 0x10, 0x11, 0x19 };
433 static struct d_cr_drp0 drp0[APL_NUM_CHANNELS];
435 /* Denverton dunit */
436 static const int dnv_dports[DNV_NUM_CHANNELS] = { 0x10, 0x12 };
437 static struct d_cr_dsch dsch;
438 static struct d_cr_ecc_ctrl ecc_ctrl[DNV_NUM_CHANNELS];
439 static struct d_cr_drp drp[DNV_NUM_CHANNELS];
440 static struct d_cr_dmap dmap[DNV_NUM_CHANNELS];
441 static struct d_cr_dmap1 dmap1[DNV_NUM_CHANNELS];
442 static struct d_cr_dmap2 dmap2[DNV_NUM_CHANNELS];
443 static struct d_cr_dmap3 dmap3[DNV_NUM_CHANNELS];
444 static struct d_cr_dmap4 dmap4[DNV_NUM_CHANNELS];
445 static struct d_cr_dmap5 dmap5[DNV_NUM_CHANNELS];
447 static void apl_mk_region(char *name, struct region *rp, void *asym)
449 struct b_cr_asym_mem_region0_mchbar *a = asym;
452 U64_LSHIFT(a->slice0_asym_base, APL_ASYMSHIFT),
453 U64_LSHIFT(a->slice0_asym_limit, APL_ASYMSHIFT) +
454 GENMASK_ULL(APL_ASYMSHIFT - 1, 0));
457 static void dnv_mk_region(char *name, struct region *rp, void *asym)
459 struct b_cr_asym_mem_region_denverton *a = asym;
462 U64_LSHIFT(a->slice_asym_base, DNV_ASYMSHIFT),
463 U64_LSHIFT(a->slice_asym_limit, DNV_ASYMSHIFT) +
464 GENMASK_ULL(DNV_ASYMSHIFT - 1, 0));
467 static int apl_get_registers(void)
472 if (RD_REG(&asym_2way, b_cr_asym_2way_mem_region_mchbar))
476 * RD_REGP() will fail for unpopulated or non-existent
477 * DIMM slots. Return success if we find at least one DIMM.
479 for (i = 0; i < APL_NUM_CHANNELS; i++)
480 if (!RD_REGP(&drp0[i], d_cr_drp0, apl_dports[i]))
486 static int dnv_get_registers(void)
490 if (RD_REG(&dsch, d_cr_dsch))
493 for (i = 0; i < DNV_NUM_CHANNELS; i++)
494 if (RD_REGP(&ecc_ctrl[i], d_cr_ecc_ctrl, dnv_dports[i]) ||
495 RD_REGP(&drp[i], d_cr_drp, dnv_dports[i]) ||
496 RD_REGP(&dmap[i], d_cr_dmap, dnv_dports[i]) ||
497 RD_REGP(&dmap1[i], d_cr_dmap1, dnv_dports[i]) ||
498 RD_REGP(&dmap2[i], d_cr_dmap2, dnv_dports[i]) ||
499 RD_REGP(&dmap3[i], d_cr_dmap3, dnv_dports[i]) ||
500 RD_REGP(&dmap4[i], d_cr_dmap4, dnv_dports[i]) ||
501 RD_REGP(&dmap5[i], d_cr_dmap5, dnv_dports[i]))
508 * Read all the h/w config registers once here (they don't
509 * change at run time. Figure out which address ranges have
510 * which interleave characteristics.
512 static int get_registers(void)
514 const int intlv[] = { 10, 11, 12, 12 };
516 if (RD_REG(&tolud, b_cr_tolud_pci) ||
517 RD_REG(&touud_lo, b_cr_touud_lo_pci) ||
518 RD_REG(&touud_hi, b_cr_touud_hi_pci) ||
519 RD_REG(&asym0, b_cr_asym_mem_region0_mchbar) ||
520 RD_REG(&asym1, b_cr_asym_mem_region1_mchbar) ||
521 RD_REG(&mot_base, b_cr_mot_out_base_mchbar) ||
522 RD_REG(&mot_mask, b_cr_mot_out_mask_mchbar) ||
523 RD_REG(&chash, b_cr_slice_channel_hash))
526 if (ops->get_registers())
529 if (ops->type == DNV) {
530 /* PMI channel idx (always 0) for asymmetric region */
531 asym0.slice0_asym_channel_select = 0;
532 asym1.slice1_asym_channel_select = 0;
533 /* PMI channel bitmap (always 1) for symmetric region */
534 chash.sym_slice0_channel_enabled = 0x1;
535 chash.sym_slice1_channel_enabled = 0x1;
538 if (asym0.slice0_asym_enable)
539 ops->mk_region("as0", &as0, &asym0);
541 if (asym1.slice1_asym_enable)
542 ops->mk_region("as1", &as1, &asym1);
544 if (asym_2way.asym_2way_interleave_enable) {
545 mk_region("as2way", &as2,
546 U64_LSHIFT(asym_2way.asym_2way_base, APL_ASYMSHIFT),
547 U64_LSHIFT(asym_2way.asym_2way_limit, APL_ASYMSHIFT) +
548 GENMASK_ULL(APL_ASYMSHIFT - 1, 0));
551 if (mot_base.imr_en) {
552 mk_region_mask("mot", &mot,
553 U64_LSHIFT(mot_base.mot_out_base, MOT_SHIFT),
554 U64_LSHIFT(mot_mask.mot_out_mask, MOT_SHIFT));
557 top_lm = U64_LSHIFT(tolud.tolud, 20);
558 top_hm = U64_LSHIFT(touud_hi.touud, 32) | U64_LSHIFT(touud_lo.touud, 20);
560 two_slices = !chash.slice_1_disabled &&
561 !chash.slice_0_mem_disabled &&
562 (chash.sym_slice0_channel_enabled != 0) &&
563 (chash.sym_slice1_channel_enabled != 0);
564 two_channels = !chash.ch_1_disabled &&
565 !chash.enable_pmi_dual_data_mode &&
566 ((chash.sym_slice0_channel_enabled == 3) ||
567 (chash.sym_slice1_channel_enabled == 3));
569 sym_chan_mask = gen_sym_mask(&chash);
570 asym_chan_mask = gen_asym_mask(&chash, &asym0, &asym1, &asym_2way);
571 chan_mask = sym_chan_mask | asym_chan_mask;
573 if (two_slices && !two_channels) {
577 slice_selector = intlv[chash.interleave_mode];
578 } else if (!two_slices && two_channels) {
582 chan_selector = intlv[chash.interleave_mode];
583 } else if (two_slices && two_channels) {
584 if (chash.hvm_mode) {
588 slice_selector = intlv[chash.interleave_mode];
589 chan_selector = intlv[chash.interleave_mode] + 1;
595 slice_hash_mask = chash.slice_hash_mask << SLICE_HASH_MASK_LSB;
597 slice_hash_mask |= BIT_ULL(slice_selector);
602 chan_hash_mask = chash.ch_hash_mask << CH_HASH_MASK_LSB;
604 chan_hash_mask |= BIT_ULL(chan_selector);
610 /* Get a contiguous memory address (remove the MMIO gap) */
611 static u64 remove_mmio_gap(u64 sys)
613 return (sys < _4GB) ? sys : sys - (_4GB - top_lm);
616 /* Squeeze out one address bit, shift upper part down to fill gap */
617 static void remove_addr_bit(u64 *addr, int bitidx)
624 mask = (1ull << bitidx) - 1;
625 *addr = ((*addr >> 1) & ~mask) | (*addr & mask);
628 /* XOR all the bits from addr specified in mask */
629 static int hash_by_mask(u64 addr, u64 mask)
631 u64 result = addr & mask;
633 result = (result >> 32) ^ result;
634 result = (result >> 16) ^ result;
635 result = (result >> 8) ^ result;
636 result = (result >> 4) ^ result;
637 result = (result >> 2) ^ result;
638 result = (result >> 1) ^ result;
640 return (int)result & 1;
644 * First stage decode. Take the system address and figure out which
645 * second stage will deal with it based on interleave modes.
647 static int sys2pmi(const u64 addr, u32 *pmiidx, u64 *pmiaddr, char *msg)
649 u64 contig_addr, contig_base, contig_offset, contig_base_adj;
650 int mot_intlv_bit = two_slices ? MOT_CHAN_INTLV_BIT_2SLC_2CH :
651 MOT_CHAN_INTLV_BIT_1SLC_2CH;
652 int slice_intlv_bit_rm = SELECTOR_DISABLED;
653 int chan_intlv_bit_rm = SELECTOR_DISABLED;
654 /* Determine if address is in the MOT region. */
655 bool mot_hit = in_region(&mot, addr);
656 /* Calculate the number of symmetric regions enabled. */
657 int sym_channels = hweight8(sym_chan_mask);
660 * The amount we need to shift the asym base can be determined by the
661 * number of enabled symmetric channels.
662 * NOTE: This can only work because symmetric memory is not supposed
663 * to do a 3-way interleave.
665 int sym_chan_shift = sym_channels >> 1;
667 /* Give up if address is out of range, or in MMIO gap */
668 if (addr >= (1ul << PND_MAX_PHYS_BIT) ||
669 (addr >= top_lm && addr < _4GB) || addr >= top_hm) {
670 snprintf(msg, PND2_MSG_SIZE, "Error address 0x%llx is not DRAM", addr);
674 /* Get a contiguous memory address (remove the MMIO gap) */
675 contig_addr = remove_mmio_gap(addr);
677 if (in_region(&as0, addr)) {
678 *pmiidx = asym0.slice0_asym_channel_select;
680 contig_base = remove_mmio_gap(as0.base);
681 contig_offset = contig_addr - contig_base;
682 contig_base_adj = (contig_base >> sym_chan_shift) *
683 ((chash.sym_slice0_channel_enabled >> (*pmiidx & 1)) & 1);
684 contig_addr = contig_offset + ((sym_channels > 0) ? contig_base_adj : 0ull);
685 } else if (in_region(&as1, addr)) {
686 *pmiidx = 2u + asym1.slice1_asym_channel_select;
688 contig_base = remove_mmio_gap(as1.base);
689 contig_offset = contig_addr - contig_base;
690 contig_base_adj = (contig_base >> sym_chan_shift) *
691 ((chash.sym_slice1_channel_enabled >> (*pmiidx & 1)) & 1);
692 contig_addr = contig_offset + ((sym_channels > 0) ? contig_base_adj : 0ull);
693 } else if (in_region(&as2, addr) && (asym_2way.asym_2way_intlv_mode == 0x3ul)) {
696 mot_intlv_bit = MOT_CHAN_INTLV_BIT_1SLC_2CH;
697 *pmiidx = (asym_2way.asym_2way_intlv_mode & 1) << 1;
698 channel1 = mot_hit ? ((bool)((addr >> mot_intlv_bit) & 1)) :
699 hash_by_mask(contig_addr, chan_hash_mask);
700 *pmiidx |= (u32)channel1;
702 contig_base = remove_mmio_gap(as2.base);
703 chan_intlv_bit_rm = mot_hit ? mot_intlv_bit : chan_selector;
704 contig_offset = contig_addr - contig_base;
705 remove_addr_bit(&contig_offset, chan_intlv_bit_rm);
706 contig_addr = (contig_base >> sym_chan_shift) + contig_offset;
708 /* Otherwise we're in normal, boring symmetric mode. */
715 slice_intlv_bit_rm = MOT_SLC_INTLV_BIT;
716 slice1 = (addr >> MOT_SLC_INTLV_BIT) & 1;
718 slice_intlv_bit_rm = slice_selector;
719 slice1 = hash_by_mask(addr, slice_hash_mask);
722 *pmiidx = (u32)slice1 << 1;
728 mot_intlv_bit = two_slices ? MOT_CHAN_INTLV_BIT_2SLC_2CH :
729 MOT_CHAN_INTLV_BIT_1SLC_2CH;
732 chan_intlv_bit_rm = mot_intlv_bit;
733 channel1 = (addr >> mot_intlv_bit) & 1;
735 chan_intlv_bit_rm = chan_selector;
736 channel1 = hash_by_mask(contig_addr, chan_hash_mask);
739 *pmiidx |= (u32)channel1;
743 /* Remove the chan_selector bit first */
744 remove_addr_bit(&contig_addr, chan_intlv_bit_rm);
745 /* Remove the slice bit (we remove it second because it must be lower */
746 remove_addr_bit(&contig_addr, slice_intlv_bit_rm);
747 *pmiaddr = contig_addr;
752 /* Translate PMI address to memory (rank, row, bank, column) */
753 #define C(n) (0x10 | (n)) /* column */
754 #define B(n) (0x20 | (n)) /* bank */
755 #define R(n) (0x40 | (n)) /* row */
756 #define RS (0x80) /* rank */
772 static struct dimm_geometry {
777 u16 bits[PMI_ADDRESS_WIDTH];
780 .addrdec = AMAP_1KB, .dden = DEN_4Gb, .dwid = X16,
781 .rowbits = 15, .colbits = 10,
783 C(2), C(3), C(4), C(5), C(6), B(0), B(1), B(2), R(0),
784 R(1), R(2), R(3), R(4), R(5), R(6), R(7), R(8), R(9),
785 R(10), C(7), C(8), C(9), R(11), RS, R(12), R(13), R(14),
790 .addrdec = AMAP_1KB, .dden = DEN_4Gb, .dwid = X8,
791 .rowbits = 16, .colbits = 10,
793 C(2), C(3), C(4), C(5), C(6), B(0), B(1), B(2), R(0),
794 R(1), R(2), R(3), R(4), R(5), R(6), R(7), R(8), R(9),
795 R(10), C(7), C(8), C(9), R(11), RS, R(12), R(13), R(14),
800 .addrdec = AMAP_1KB, .dden = DEN_8Gb, .dwid = X16,
801 .rowbits = 16, .colbits = 10,
803 C(2), C(3), C(4), C(5), C(6), B(0), B(1), B(2), R(0),
804 R(1), R(2), R(3), R(4), R(5), R(6), R(7), R(8), R(9),
805 R(10), C(7), C(8), C(9), R(11), RS, R(12), R(13), R(14),
810 .addrdec = AMAP_1KB, .dden = DEN_8Gb, .dwid = X8,
811 .rowbits = 16, .colbits = 11,
813 C(2), C(3), C(4), C(5), C(6), B(0), B(1), B(2), R(0),
814 R(1), R(2), R(3), R(4), R(5), R(6), R(7), R(8), R(9),
815 R(10), C(7), C(8), C(9), R(11), RS, C(11), R(12), R(13),
820 .addrdec = AMAP_2KB, .dden = DEN_4Gb, .dwid = X16,
821 .rowbits = 15, .colbits = 10,
823 C(2), C(3), C(4), C(5), C(6), C(7), B(0), B(1), B(2),
824 R(0), R(1), R(2), R(3), R(4), R(5), R(6), R(7), R(8),
825 R(9), R(10), C(8), C(9), R(11), RS, R(12), R(13), R(14),
830 .addrdec = AMAP_2KB, .dden = DEN_4Gb, .dwid = X8,
831 .rowbits = 16, .colbits = 10,
833 C(2), C(3), C(4), C(5), C(6), C(7), B(0), B(1), B(2),
834 R(0), R(1), R(2), R(3), R(4), R(5), R(6), R(7), R(8),
835 R(9), R(10), C(8), C(9), R(11), RS, R(12), R(13), R(14),
840 .addrdec = AMAP_2KB, .dden = DEN_8Gb, .dwid = X16,
841 .rowbits = 16, .colbits = 10,
843 C(2), C(3), C(4), C(5), C(6), C(7), B(0), B(1), B(2),
844 R(0), R(1), R(2), R(3), R(4), R(5), R(6), R(7), R(8),
845 R(9), R(10), C(8), C(9), R(11), RS, R(12), R(13), R(14),
850 .addrdec = AMAP_2KB, .dden = DEN_8Gb, .dwid = X8,
851 .rowbits = 16, .colbits = 11,
853 C(2), C(3), C(4), C(5), C(6), C(7), B(0), B(1), B(2),
854 R(0), R(1), R(2), R(3), R(4), R(5), R(6), R(7), R(8),
855 R(9), R(10), C(8), C(9), R(11), RS, C(11), R(12), R(13),
860 .addrdec = AMAP_4KB, .dden = DEN_4Gb, .dwid = X16,
861 .rowbits = 15, .colbits = 10,
863 C(2), C(3), C(4), C(5), C(6), C(7), C(8), B(0), B(1),
864 B(2), R(0), R(1), R(2), R(3), R(4), R(5), R(6), R(7),
865 R(8), R(9), R(10), C(9), R(11), RS, R(12), R(13), R(14),
870 .addrdec = AMAP_4KB, .dden = DEN_4Gb, .dwid = X8,
871 .rowbits = 16, .colbits = 10,
873 C(2), C(3), C(4), C(5), C(6), C(7), C(8), B(0), B(1),
874 B(2), R(0), R(1), R(2), R(3), R(4), R(5), R(6), R(7),
875 R(8), R(9), R(10), C(9), R(11), RS, R(12), R(13), R(14),
880 .addrdec = AMAP_4KB, .dden = DEN_8Gb, .dwid = X16,
881 .rowbits = 16, .colbits = 10,
883 C(2), C(3), C(4), C(5), C(6), C(7), C(8), B(0), B(1),
884 B(2), R(0), R(1), R(2), R(3), R(4), R(5), R(6), R(7),
885 R(8), R(9), R(10), C(9), R(11), RS, R(12), R(13), R(14),
890 .addrdec = AMAP_4KB, .dden = DEN_8Gb, .dwid = X8,
891 .rowbits = 16, .colbits = 11,
893 C(2), C(3), C(4), C(5), C(6), C(7), C(8), B(0), B(1),
894 B(2), R(0), R(1), R(2), R(3), R(4), R(5), R(6), R(7),
895 R(8), R(9), R(10), C(9), R(11), RS, C(11), R(12), R(13),
901 static int bank_hash(u64 pmiaddr, int idx, int shft)
907 bhash ^= ((pmiaddr >> (12 + shft)) ^ (pmiaddr >> (9 + shft))) & 1;
910 bhash ^= (((pmiaddr >> (10 + shft)) ^ (pmiaddr >> (8 + shft))) & 1) << 1;
911 bhash ^= ((pmiaddr >> 22) & 1) << 1;
914 bhash ^= (((pmiaddr >> (13 + shft)) ^ (pmiaddr >> (11 + shft))) & 1) << 2;
921 static int rank_hash(u64 pmiaddr)
923 return ((pmiaddr >> 16) ^ (pmiaddr >> 10)) & 1;
926 /* Second stage decode. Compute rank, bank, row & column. */
927 static int apl_pmi2mem(struct mem_ctl_info *mci, u64 pmiaddr, u32 pmiidx,
928 struct dram_addr *daddr, char *msg)
930 struct d_cr_drp0 *cr_drp0 = &drp0[pmiidx];
931 struct pnd2_pvt *pvt = mci->pvt_info;
932 int g = pvt->dimm_geom[pmiidx];
933 struct dimm_geometry *d = &dimms[g];
934 int column = 0, bank = 0, row = 0, rank = 0;
935 int i, idx, type, skiprs = 0;
937 for (i = 0; i < PMI_ADDRESS_WIDTH; i++) {
938 int bit = (pmiaddr >> i) & 1;
940 if (i + skiprs >= PMI_ADDRESS_WIDTH) {
941 snprintf(msg, PND2_MSG_SIZE, "Bad dimm_geometry[] table\n");
945 type = d->bits[i + skiprs] & ~0xf;
946 idx = d->bits[i + skiprs] & 0xf;
949 * On single rank DIMMs ignore the rank select bit
950 * and shift remainder of "bits[]" down one place.
952 if (type == RS && (cr_drp0->rken0 + cr_drp0->rken1) == 1) {
954 type = d->bits[i + skiprs] & ~0xf;
955 idx = d->bits[i + skiprs] & 0xf;
960 column |= (bit << idx);
963 bank |= (bit << idx);
965 bank ^= bank_hash(pmiaddr, idx, d->addrdec);
973 rank ^= rank_hash(pmiaddr);
977 snprintf(msg, PND2_MSG_SIZE, "Bad translation\n");
994 /* Pluck bit "in" from pmiaddr and return value shifted to bit "out" */
995 #define dnv_get_bit(pmi, in, out) ((int)(((pmi) >> (in)) & 1u) << (out))
997 static int dnv_pmi2mem(struct mem_ctl_info *mci, u64 pmiaddr, u32 pmiidx,
998 struct dram_addr *daddr, char *msg)
1001 daddr->rank = dnv_get_bit(pmiaddr, dmap[pmiidx].rs0 + 13, 0);
1003 daddr->rank |= dnv_get_bit(pmiaddr, dmap[pmiidx].rs1 + 13, 1);
1006 * Normally ranks 0,1 are DIMM0, and 2,3 are DIMM1, but we
1007 * flip them if DIMM1 is larger than DIMM0.
1009 daddr->dimm = (daddr->rank >= 2) ^ drp[pmiidx].dimmflip;
1011 daddr->bank = dnv_get_bit(pmiaddr, dmap[pmiidx].ba0 + 6, 0);
1012 daddr->bank |= dnv_get_bit(pmiaddr, dmap[pmiidx].ba1 + 6, 1);
1013 daddr->bank |= dnv_get_bit(pmiaddr, dmap[pmiidx].bg0 + 6, 2);
1015 daddr->bank |= dnv_get_bit(pmiaddr, dmap[pmiidx].bg1 + 6, 3);
1016 if (dmap1[pmiidx].bxor) {
1018 daddr->bank ^= dnv_get_bit(pmiaddr, dmap3[pmiidx].row6 + 6, 0);
1019 daddr->bank ^= dnv_get_bit(pmiaddr, dmap3[pmiidx].row7 + 6, 1);
1020 if (dsch.chan_width == 0)
1021 /* 64/72 bit dram channel width */
1022 daddr->bank ^= dnv_get_bit(pmiaddr, dmap5[pmiidx].ca3 + 6, 2);
1024 /* 32/40 bit dram channel width */
1025 daddr->bank ^= dnv_get_bit(pmiaddr, dmap5[pmiidx].ca4 + 6, 2);
1026 daddr->bank ^= dnv_get_bit(pmiaddr, dmap2[pmiidx].row2 + 6, 3);
1028 daddr->bank ^= dnv_get_bit(pmiaddr, dmap2[pmiidx].row2 + 6, 0);
1029 daddr->bank ^= dnv_get_bit(pmiaddr, dmap3[pmiidx].row6 + 6, 1);
1030 if (dsch.chan_width == 0)
1031 daddr->bank ^= dnv_get_bit(pmiaddr, dmap5[pmiidx].ca3 + 6, 2);
1033 daddr->bank ^= dnv_get_bit(pmiaddr, dmap5[pmiidx].ca4 + 6, 2);
1037 daddr->row = dnv_get_bit(pmiaddr, dmap2[pmiidx].row0 + 6, 0);
1038 daddr->row |= dnv_get_bit(pmiaddr, dmap2[pmiidx].row1 + 6, 1);
1039 daddr->row |= dnv_get_bit(pmiaddr, dmap2[pmiidx].row2 + 6, 2);
1040 daddr->row |= dnv_get_bit(pmiaddr, dmap2[pmiidx].row3 + 6, 3);
1041 daddr->row |= dnv_get_bit(pmiaddr, dmap2[pmiidx].row4 + 6, 4);
1042 daddr->row |= dnv_get_bit(pmiaddr, dmap2[pmiidx].row5 + 6, 5);
1043 daddr->row |= dnv_get_bit(pmiaddr, dmap3[pmiidx].row6 + 6, 6);
1044 daddr->row |= dnv_get_bit(pmiaddr, dmap3[pmiidx].row7 + 6, 7);
1045 daddr->row |= dnv_get_bit(pmiaddr, dmap3[pmiidx].row8 + 6, 8);
1046 daddr->row |= dnv_get_bit(pmiaddr, dmap3[pmiidx].row9 + 6, 9);
1047 daddr->row |= dnv_get_bit(pmiaddr, dmap3[pmiidx].row10 + 6, 10);
1048 daddr->row |= dnv_get_bit(pmiaddr, dmap3[pmiidx].row11 + 6, 11);
1049 daddr->row |= dnv_get_bit(pmiaddr, dmap4[pmiidx].row12 + 6, 12);
1050 daddr->row |= dnv_get_bit(pmiaddr, dmap4[pmiidx].row13 + 6, 13);
1051 if (dmap4[pmiidx].row14 != 31)
1052 daddr->row |= dnv_get_bit(pmiaddr, dmap4[pmiidx].row14 + 6, 14);
1053 if (dmap4[pmiidx].row15 != 31)
1054 daddr->row |= dnv_get_bit(pmiaddr, dmap4[pmiidx].row15 + 6, 15);
1055 if (dmap4[pmiidx].row16 != 31)
1056 daddr->row |= dnv_get_bit(pmiaddr, dmap4[pmiidx].row16 + 6, 16);
1057 if (dmap4[pmiidx].row17 != 31)
1058 daddr->row |= dnv_get_bit(pmiaddr, dmap4[pmiidx].row17 + 6, 17);
1060 daddr->col = dnv_get_bit(pmiaddr, dmap5[pmiidx].ca3 + 6, 3);
1061 daddr->col |= dnv_get_bit(pmiaddr, dmap5[pmiidx].ca4 + 6, 4);
1062 daddr->col |= dnv_get_bit(pmiaddr, dmap5[pmiidx].ca5 + 6, 5);
1063 daddr->col |= dnv_get_bit(pmiaddr, dmap5[pmiidx].ca6 + 6, 6);
1064 daddr->col |= dnv_get_bit(pmiaddr, dmap5[pmiidx].ca7 + 6, 7);
1065 daddr->col |= dnv_get_bit(pmiaddr, dmap5[pmiidx].ca8 + 6, 8);
1066 daddr->col |= dnv_get_bit(pmiaddr, dmap5[pmiidx].ca9 + 6, 9);
1067 if (!dsch.ddr4en && dmap1[pmiidx].ca11 != 0x3f)
1068 daddr->col |= dnv_get_bit(pmiaddr, dmap1[pmiidx].ca11 + 13, 11);
1073 static int check_channel(int ch)
1075 if (drp0[ch].dramtype != 0) {
1076 pnd2_printk(KERN_INFO, "Unsupported DIMM in channel %d\n", ch);
1078 } else if (drp0[ch].eccen == 0) {
1079 pnd2_printk(KERN_INFO, "ECC disabled on channel %d\n", ch);
1085 static int apl_check_ecc_active(void)
1089 /* Check dramtype and ECC mode for each present DIMM */
1090 for (i = 0; i < APL_NUM_CHANNELS; i++)
1091 if (chan_mask & BIT(i))
1092 ret += check_channel(i);
1093 return ret ? -EINVAL : 0;
1096 #define DIMMS_PRESENT(d) ((d)->rken0 + (d)->rken1 + (d)->rken2 + (d)->rken3)
1098 static int check_unit(int ch)
1100 struct d_cr_drp *d = &drp[ch];
1102 if (DIMMS_PRESENT(d) && !ecc_ctrl[ch].eccen) {
1103 pnd2_printk(KERN_INFO, "ECC disabled on channel %d\n", ch);
1109 static int dnv_check_ecc_active(void)
1113 for (i = 0; i < DNV_NUM_CHANNELS; i++)
1114 ret += check_unit(i);
1115 return ret ? -EINVAL : 0;
1118 static int get_memory_error_data(struct mem_ctl_info *mci, u64 addr,
1119 struct dram_addr *daddr, char *msg)
1125 ret = sys2pmi(addr, &pmiidx, &pmiaddr, msg);
1129 pmiaddr >>= ops->pmiaddr_shift;
1130 /* pmi channel idx to dimm channel idx */
1131 pmiidx >>= ops->pmiidx_shift;
1132 daddr->chan = pmiidx;
1134 ret = ops->pmi2mem(mci, pmiaddr, pmiidx, daddr, msg);
1138 edac_dbg(0, "SysAddr=%llx PmiAddr=%llx Channel=%d DIMM=%d Rank=%d Bank=%d Row=%d Column=%d\n",
1139 addr, pmiaddr, daddr->chan, daddr->dimm, daddr->rank, daddr->bank, daddr->row, daddr->col);
1144 static void pnd2_mce_output_error(struct mem_ctl_info *mci, const struct mce *m,
1145 struct dram_addr *daddr)
1147 enum hw_event_mc_err_type tp_event;
1148 char *optype, msg[PND2_MSG_SIZE];
1149 bool ripv = m->mcgstatus & MCG_STATUS_RIPV;
1150 bool overflow = m->status & MCI_STATUS_OVER;
1151 bool uc_err = m->status & MCI_STATUS_UC;
1152 bool recov = m->status & MCI_STATUS_S;
1153 u32 core_err_cnt = GET_BITFIELD(m->status, 38, 52);
1154 u32 mscod = GET_BITFIELD(m->status, 16, 31);
1155 u32 errcode = GET_BITFIELD(m->status, 0, 15);
1156 u32 optypenum = GET_BITFIELD(m->status, 4, 6);
1159 tp_event = uc_err ? (ripv ? HW_EVENT_ERR_FATAL : HW_EVENT_ERR_UNCORRECTED) :
1160 HW_EVENT_ERR_CORRECTED;
1163 * According with Table 15-9 of the Intel Architecture spec vol 3A,
1164 * memory errors should fit in this mask:
1165 * 000f 0000 1mmm cccc (binary)
1167 * f = Correction Report Filtering Bit. If 1, subsequent errors
1171 * If the mask doesn't match, report an error to the parsing logic
1173 if (!((errcode & 0xef80) == 0x80)) {
1174 optype = "Can't parse: it is not a mem";
1176 switch (optypenum) {
1178 optype = "generic undef request error";
1181 optype = "memory read error";
1184 optype = "memory write error";
1187 optype = "addr/cmd error";
1190 optype = "memory scrubbing error";
1193 optype = "reserved";
1198 /* Only decode errors with an valid address (ADDRV) */
1199 if (!(m->status & MCI_STATUS_ADDRV))
1202 rc = get_memory_error_data(mci, m->addr, daddr, msg);
1206 snprintf(msg, sizeof(msg),
1207 "%s%s err_code:%04x:%04x channel:%d DIMM:%d rank:%d row:%d bank:%d col:%d",
1208 overflow ? " OVERFLOW" : "", (uc_err && recov) ? " recoverable" : "", mscod,
1209 errcode, daddr->chan, daddr->dimm, daddr->rank, daddr->row, daddr->bank, daddr->col);
1211 edac_dbg(0, "%s\n", msg);
1213 /* Call the helper to output message */
1214 edac_mc_handle_error(tp_event, mci, core_err_cnt, m->addr >> PAGE_SHIFT,
1215 m->addr & ~PAGE_MASK, 0, daddr->chan, daddr->dimm, -1, optype, msg);
1220 edac_mc_handle_error(tp_event, mci, core_err_cnt, 0, 0, 0, -1, -1, -1, msg, "");
1223 static void apl_get_dimm_config(struct mem_ctl_info *mci)
1225 struct pnd2_pvt *pvt = mci->pvt_info;
1226 struct dimm_info *dimm;
1227 struct d_cr_drp0 *d;
1231 for (i = 0; i < APL_NUM_CHANNELS; i++) {
1232 if (!(chan_mask & BIT(i)))
1235 dimm = EDAC_DIMM_PTR(mci->layers, mci->dimms, mci->n_layers, i, 0, 0);
1237 edac_dbg(0, "No allocated DIMM for channel %d\n", i);
1242 for (g = 0; g < ARRAY_SIZE(dimms); g++)
1243 if (dimms[g].addrdec == d->addrdec &&
1244 dimms[g].dden == d->dden &&
1245 dimms[g].dwid == d->dwid)
1248 if (g == ARRAY_SIZE(dimms)) {
1249 edac_dbg(0, "Channel %d: unrecognized DIMM\n", i);
1253 pvt->dimm_geom[i] = g;
1254 capacity = (d->rken0 + d->rken1) * 8 * (1ul << dimms[g].rowbits) *
1255 (1ul << dimms[g].colbits);
1256 edac_dbg(0, "Channel %d: %lld MByte DIMM\n", i, capacity >> (20 - 3));
1257 dimm->nr_pages = MiB_TO_PAGES(capacity >> (20 - 3));
1259 dimm->dtype = (d->dwid == 0) ? DEV_X8 : DEV_X16;
1260 dimm->mtype = MEM_DDR3;
1261 dimm->edac_mode = EDAC_SECDED;
1262 snprintf(dimm->label, sizeof(dimm->label), "Slice#%d_Chan#%d", i / 2, i % 2);
1266 static const int dnv_dtypes[] = {
1267 DEV_X8, DEV_X4, DEV_X16, DEV_UNKNOWN
1270 static void dnv_get_dimm_config(struct mem_ctl_info *mci)
1272 int i, j, ranks_of_dimm[DNV_MAX_DIMMS], banks, rowbits, colbits, memtype;
1273 struct dimm_info *dimm;
1286 for (i = 0; i < DNV_NUM_CHANNELS; i++) {
1287 if (dmap4[i].row14 == 31)
1289 else if (dmap4[i].row15 == 31)
1291 else if (dmap4[i].row16 == 31)
1293 else if (dmap4[i].row17 == 31)
1298 if (memtype == MEM_DDR3) {
1299 if (dmap1[i].ca11 != 0x3f)
1306 /* DIMM0 is present if rank0 and/or rank1 is enabled */
1307 ranks_of_dimm[0] = d->rken0 + d->rken1;
1308 /* DIMM1 is present if rank2 and/or rank3 is enabled */
1309 ranks_of_dimm[1] = d->rken2 + d->rken3;
1311 for (j = 0; j < DNV_MAX_DIMMS; j++) {
1312 if (!ranks_of_dimm[j])
1315 dimm = EDAC_DIMM_PTR(mci->layers, mci->dimms, mci->n_layers, i, j, 0);
1317 edac_dbg(0, "No allocated DIMM for channel %d DIMM %d\n", i, j);
1321 capacity = ranks_of_dimm[j] * banks * (1ul << rowbits) * (1ul << colbits);
1322 edac_dbg(0, "Channel %d DIMM %d: %lld MByte DIMM\n", i, j, capacity >> (20 - 3));
1323 dimm->nr_pages = MiB_TO_PAGES(capacity >> (20 - 3));
1325 dimm->dtype = dnv_dtypes[j ? d->dimmdwid0 : d->dimmdwid1];
1326 dimm->mtype = memtype;
1327 dimm->edac_mode = EDAC_SECDED;
1328 snprintf(dimm->label, sizeof(dimm->label), "Chan#%d_DIMM#%d", i, j);
1333 static int pnd2_register_mci(struct mem_ctl_info **ppmci)
1335 struct edac_mc_layer layers[2];
1336 struct mem_ctl_info *mci;
1337 struct pnd2_pvt *pvt;
1340 rc = ops->check_ecc();
1344 /* Allocate a new MC control structure */
1345 layers[0].type = EDAC_MC_LAYER_CHANNEL;
1346 layers[0].size = ops->channels;
1347 layers[0].is_virt_csrow = false;
1348 layers[1].type = EDAC_MC_LAYER_SLOT;
1349 layers[1].size = ops->dimms_per_channel;
1350 layers[1].is_virt_csrow = true;
1351 mci = edac_mc_alloc(0, ARRAY_SIZE(layers), layers, sizeof(*pvt));
1355 pvt = mci->pvt_info;
1356 memset(pvt, 0, sizeof(*pvt));
1358 mci->mod_name = "pnd2_edac.c";
1359 mci->dev_name = ops->name;
1360 mci->ctl_name = "Pondicherry2";
1362 /* Get dimm basic config and the memory layout */
1363 ops->get_dimm_config(mci);
1365 if (edac_mc_add_mc(mci)) {
1366 edac_dbg(0, "MC: failed edac_mc_add_mc()\n");
1376 static void pnd2_unregister_mci(struct mem_ctl_info *mci)
1378 if (unlikely(!mci || !mci->pvt_info)) {
1379 pnd2_printk(KERN_ERR, "Couldn't find mci handler\n");
1383 /* Remove MC sysfs nodes */
1384 edac_mc_del_mc(NULL);
1385 edac_dbg(1, "%s: free mci struct\n", mci->ctl_name);
1390 * Callback function registered with core kernel mce code.
1391 * Called once for each logged error.
1393 static int pnd2_mce_check_error(struct notifier_block *nb, unsigned long val, void *data)
1395 struct mce *mce = (struct mce *)data;
1396 struct mem_ctl_info *mci;
1397 struct dram_addr daddr;
1400 if (edac_get_report_status() == EDAC_REPORTING_DISABLED)
1408 * Just let mcelog handle it if the error is
1409 * outside the memory controller. A memory error
1410 * is indicated by bit 7 = 1 and bits = 8-11,13-15 = 0.
1411 * bit 12 has an special meaning.
1413 if ((mce->status & 0xefff) >> 7 != 1)
1416 if (mce->mcgstatus & MCG_STATUS_MCIP)
1421 pnd2_mc_printk(mci, KERN_INFO, "HANDLING MCE MEMORY ERROR\n");
1422 pnd2_mc_printk(mci, KERN_INFO, "CPU %u: Machine Check %s: %llx Bank %u: %llx\n",
1423 mce->extcpu, type, mce->mcgstatus, mce->bank, mce->status);
1424 pnd2_mc_printk(mci, KERN_INFO, "TSC %llx ", mce->tsc);
1425 pnd2_mc_printk(mci, KERN_INFO, "ADDR %llx ", mce->addr);
1426 pnd2_mc_printk(mci, KERN_INFO, "MISC %llx ", mce->misc);
1427 pnd2_mc_printk(mci, KERN_INFO, "PROCESSOR %u:%x TIME %llu SOCKET %u APIC %x\n",
1428 mce->cpuvendor, mce->cpuid, mce->time, mce->socketid, mce->apicid);
1430 pnd2_mce_output_error(mci, mce, &daddr);
1432 /* Advice mcelog that the error were handled */
1436 static struct notifier_block pnd2_mce_dec = {
1437 .notifier_call = pnd2_mce_check_error,
1440 #ifdef CONFIG_EDAC_DEBUG
1442 * Write an address to this file to exercise the address decode
1443 * logic in this driver.
1445 static u64 pnd2_fake_addr;
1446 #define PND2_BLOB_SIZE 1024
1447 static char pnd2_result[PND2_BLOB_SIZE];
1448 static struct dentry *pnd2_test;
1449 static struct debugfs_blob_wrapper pnd2_blob = {
1450 .data = pnd2_result,
1454 static int debugfs_u64_set(void *data, u64 val)
1456 struct dram_addr daddr;
1461 /* ADDRV + MemRd + Unknown channel */
1462 m.status = MCI_STATUS_ADDRV + 0x9f;
1464 pnd2_mce_output_error(pnd2_mci, &m, &daddr);
1465 snprintf(pnd2_blob.data, PND2_BLOB_SIZE,
1466 "SysAddr=%llx Channel=%d DIMM=%d Rank=%d Bank=%d Row=%d Column=%d\n",
1467 m.addr, daddr.chan, daddr.dimm, daddr.rank, daddr.bank, daddr.row, daddr.col);
1468 pnd2_blob.size = strlen(pnd2_blob.data);
1472 DEFINE_DEBUGFS_ATTRIBUTE(fops_u64_wo, NULL, debugfs_u64_set, "%llu\n");
1474 static void setup_pnd2_debug(void)
1476 pnd2_test = edac_debugfs_create_dir("pnd2_test");
1477 edac_debugfs_create_file("pnd2_debug_addr", 0200, pnd2_test,
1478 &pnd2_fake_addr, &fops_u64_wo);
1479 debugfs_create_blob("pnd2_debug_results", 0400, pnd2_test, &pnd2_blob);
1482 static void teardown_pnd2_debug(void)
1484 debugfs_remove_recursive(pnd2_test);
1487 static void setup_pnd2_debug(void) {}
1488 static void teardown_pnd2_debug(void) {}
1489 #endif /* CONFIG_EDAC_DEBUG */
1492 static int pnd2_probe(void)
1497 rc = get_registers();
1501 return pnd2_register_mci(&pnd2_mci);
1504 static void pnd2_remove(void)
1507 pnd2_unregister_mci(pnd2_mci);
1510 static struct dunit_ops apl_ops = {
1513 .pmiaddr_shift = LOG2_PMI_ADDR_GRANULARITY,
1515 .channels = APL_NUM_CHANNELS,
1516 .dimms_per_channel = 1,
1517 .rd_reg = apl_rd_reg,
1518 .get_registers = apl_get_registers,
1519 .check_ecc = apl_check_ecc_active,
1520 .mk_region = apl_mk_region,
1521 .get_dimm_config = apl_get_dimm_config,
1522 .pmi2mem = apl_pmi2mem,
1525 static struct dunit_ops dnv_ops = {
1530 .channels = DNV_NUM_CHANNELS,
1531 .dimms_per_channel = 2,
1532 .rd_reg = dnv_rd_reg,
1533 .get_registers = dnv_get_registers,
1534 .check_ecc = dnv_check_ecc_active,
1535 .mk_region = dnv_mk_region,
1536 .get_dimm_config = dnv_get_dimm_config,
1537 .pmi2mem = dnv_pmi2mem,
1540 static const struct x86_cpu_id pnd2_cpuids[] = {
1541 { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_GOLDMONT, 0, (kernel_ulong_t)&apl_ops },
1542 { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_DENVERTON, 0, (kernel_ulong_t)&dnv_ops },
1545 MODULE_DEVICE_TABLE(x86cpu, pnd2_cpuids);
1547 static int __init pnd2_init(void)
1549 const struct x86_cpu_id *id;
1554 id = x86_match_cpu(pnd2_cpuids);
1558 ops = (struct dunit_ops *)id->driver_data;
1560 if (ops->type == APL) {
1561 p2sb_bus = pci_find_bus(0, 0);
1566 /* Ensure that the OPSTATE is set correctly for POLL or NMI */
1571 pnd2_printk(KERN_ERR, "Failed to register device with error %d.\n", rc);
1578 mce_register_decode_chain(&pnd2_mce_dec);
1584 static void __exit pnd2_exit(void)
1587 teardown_pnd2_debug();
1588 mce_unregister_decode_chain(&pnd2_mce_dec);
1592 module_init(pnd2_init);
1593 module_exit(pnd2_exit);
1595 module_param(edac_op_state, int, 0444);
1596 MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting state: 0=Poll,1=NMI");
1598 MODULE_LICENSE("GPL v2");
1599 MODULE_AUTHOR("Tony Luck");
1600 MODULE_DESCRIPTION("MC Driver for Intel SoC using Pondicherry memory controller");