1 // SPDX-License-Identifier: GPL-2.0-only
2 #include "amd64_edac.h"
3 #include <asm/amd_nb.h>
5 static struct edac_pci_ctl_info *pci_ctl;
8 * Set by command line parameter. If BIOS has enabled the ECC, this override is
9 * cleared to prevent re-enabling the hardware by this driver.
11 static int ecc_enable_override;
12 module_param(ecc_enable_override, int, 0644);
14 static struct msr __percpu *msrs;
16 static struct amd64_family_type *fam_type;
19 static struct ecc_settings **ecc_stngs;
21 /* Device for the PCI component */
22 static struct device *pci_ctl_dev;
25 * Valid scrub rates for the K8 hardware memory scrubber. We map the scrubbing
26 * bandwidth to a valid bit pattern. The 'set' operation finds the 'matching-
29 *FIXME: Produce a better mapping/linearisation.
31 static const struct scrubrate {
32 u32 scrubval; /* bit pattern for scrub rate */
33 u32 bandwidth; /* bandwidth consumed (bytes/sec) */
35 { 0x01, 1600000000UL},
57 { 0x00, 0UL}, /* scrubbing off */
60 int __amd64_read_pci_cfg_dword(struct pci_dev *pdev, int offset,
61 u32 *val, const char *func)
65 err = pci_read_config_dword(pdev, offset, val);
67 amd64_warn("%s: error reading F%dx%03x.\n",
68 func, PCI_FUNC(pdev->devfn), offset);
73 int __amd64_write_pci_cfg_dword(struct pci_dev *pdev, int offset,
74 u32 val, const char *func)
78 err = pci_write_config_dword(pdev, offset, val);
80 amd64_warn("%s: error writing to F%dx%03x.\n",
81 func, PCI_FUNC(pdev->devfn), offset);
87 * Select DCT to which PCI cfg accesses are routed
89 static void f15h_select_dct(struct amd64_pvt *pvt, u8 dct)
93 amd64_read_pci_cfg(pvt->F1, DCT_CFG_SEL, ®);
94 reg &= (pvt->model == 0x30) ? ~3 : ~1;
96 amd64_write_pci_cfg(pvt->F1, DCT_CFG_SEL, reg);
101 * Depending on the family, F2 DCT reads need special handling:
103 * K8: has a single DCT only and no address offsets >= 0x100
105 * F10h: each DCT has its own set of regs
109 * F16h: has only 1 DCT
111 * F15h: we select which DCT we access using F1x10C[DctCfgSel]
113 static inline int amd64_read_dct_pci_cfg(struct amd64_pvt *pvt, u8 dct,
114 int offset, u32 *val)
118 if (dct || offset >= 0x100)
125 * Note: If ganging is enabled, barring the regs
126 * F2x[1,0]98 and F2x[1,0]9C; reads reads to F2x1xx
127 * return 0. (cf. Section 2.8.1 F10h BKDG)
129 if (dct_ganging_enabled(pvt))
138 * F15h: F2x1xx addresses do not map explicitly to DCT1.
139 * We should select which DCT we access using F1x10C[DctCfgSel]
141 dct = (dct && pvt->model == 0x30) ? 3 : dct;
142 f15h_select_dct(pvt, dct);
153 return amd64_read_pci_cfg(pvt->F2, offset, val);
157 * Memory scrubber control interface. For K8, memory scrubbing is handled by
158 * hardware and can involve L2 cache, dcache as well as the main memory. With
159 * F10, this is extended to L3 cache scrubbing on CPU models sporting that
162 * This causes the "units" for the scrubbing speed to vary from 64 byte blocks
163 * (dram) over to cache lines. This is nasty, so we will use bandwidth in
164 * bytes/sec for the setting.
166 * Currently, we only do dram scrubbing. If the scrubbing is done in software on
167 * other archs, we might not have access to the caches directly.
170 static inline void __f17h_set_scrubval(struct amd64_pvt *pvt, u32 scrubval)
173 * Fam17h supports scrub values between 0x5 and 0x14. Also, the values
174 * are shifted down by 0x5, so scrubval 0x5 is written to the register
175 * as 0x0, scrubval 0x6 as 0x1, etc.
177 if (scrubval >= 0x5 && scrubval <= 0x14) {
179 pci_write_bits32(pvt->F6, F17H_SCR_LIMIT_ADDR, scrubval, 0xF);
180 pci_write_bits32(pvt->F6, F17H_SCR_BASE_ADDR, 1, 0x1);
182 pci_write_bits32(pvt->F6, F17H_SCR_BASE_ADDR, 0, 0x1);
186 * Scan the scrub rate mapping table for a close or matching bandwidth value to
187 * issue. If requested is too big, then use last maximum value found.
189 static int __set_scrub_rate(struct amd64_pvt *pvt, u32 new_bw, u32 min_rate)
195 * map the configured rate (new_bw) to a value specific to the AMD64
196 * memory controller and apply to register. Search for the first
197 * bandwidth entry that is greater or equal than the setting requested
198 * and program that. If at last entry, turn off DRAM scrubbing.
200 * If no suitable bandwidth is found, turn off DRAM scrubbing entirely
201 * by falling back to the last element in scrubrates[].
203 for (i = 0; i < ARRAY_SIZE(scrubrates) - 1; i++) {
205 * skip scrub rates which aren't recommended
206 * (see F10 BKDG, F3x58)
208 if (scrubrates[i].scrubval < min_rate)
211 if (scrubrates[i].bandwidth <= new_bw)
215 scrubval = scrubrates[i].scrubval;
218 __f17h_set_scrubval(pvt, scrubval);
219 } else if (pvt->fam == 0x15 && pvt->model == 0x60) {
220 f15h_select_dct(pvt, 0);
221 pci_write_bits32(pvt->F2, F15H_M60H_SCRCTRL, scrubval, 0x001F);
222 f15h_select_dct(pvt, 1);
223 pci_write_bits32(pvt->F2, F15H_M60H_SCRCTRL, scrubval, 0x001F);
225 pci_write_bits32(pvt->F3, SCRCTRL, scrubval, 0x001F);
229 return scrubrates[i].bandwidth;
234 static int set_scrub_rate(struct mem_ctl_info *mci, u32 bw)
236 struct amd64_pvt *pvt = mci->pvt_info;
237 u32 min_scrubrate = 0x5;
242 if (pvt->fam == 0x15) {
244 if (pvt->model < 0x10)
245 f15h_select_dct(pvt, 0);
247 if (pvt->model == 0x60)
250 return __set_scrub_rate(pvt, bw, min_scrubrate);
253 static int get_scrub_rate(struct mem_ctl_info *mci)
255 struct amd64_pvt *pvt = mci->pvt_info;
256 int i, retval = -EINVAL;
260 amd64_read_pci_cfg(pvt->F6, F17H_SCR_BASE_ADDR, &scrubval);
261 if (scrubval & BIT(0)) {
262 amd64_read_pci_cfg(pvt->F6, F17H_SCR_LIMIT_ADDR, &scrubval);
268 } else if (pvt->fam == 0x15) {
270 if (pvt->model < 0x10)
271 f15h_select_dct(pvt, 0);
273 if (pvt->model == 0x60)
274 amd64_read_pci_cfg(pvt->F2, F15H_M60H_SCRCTRL, &scrubval);
276 amd64_read_pci_cfg(pvt->F3, SCRCTRL, &scrubval);
278 amd64_read_pci_cfg(pvt->F3, SCRCTRL, &scrubval);
281 scrubval = scrubval & 0x001F;
283 for (i = 0; i < ARRAY_SIZE(scrubrates); i++) {
284 if (scrubrates[i].scrubval == scrubval) {
285 retval = scrubrates[i].bandwidth;
293 * returns true if the SysAddr given by sys_addr matches the
294 * DRAM base/limit associated with node_id
296 static bool base_limit_match(struct amd64_pvt *pvt, u64 sys_addr, u8 nid)
300 /* The K8 treats this as a 40-bit value. However, bits 63-40 will be
301 * all ones if the most significant implemented address bit is 1.
302 * Here we discard bits 63-40. See section 3.4.2 of AMD publication
303 * 24592: AMD x86-64 Architecture Programmer's Manual Volume 1
304 * Application Programming.
306 addr = sys_addr & 0x000000ffffffffffull;
308 return ((addr >= get_dram_base(pvt, nid)) &&
309 (addr <= get_dram_limit(pvt, nid)));
313 * Attempt to map a SysAddr to a node. On success, return a pointer to the
314 * mem_ctl_info structure for the node that the SysAddr maps to.
316 * On failure, return NULL.
318 static struct mem_ctl_info *find_mc_by_sys_addr(struct mem_ctl_info *mci,
321 struct amd64_pvt *pvt;
326 * Here we use the DRAM Base (section 3.4.4.1) and DRAM Limit (section
327 * 3.4.4.2) registers to map the SysAddr to a node ID.
332 * The value of this field should be the same for all DRAM Base
333 * registers. Therefore we arbitrarily choose to read it from the
334 * register for node 0.
336 intlv_en = dram_intlv_en(pvt, 0);
339 for (node_id = 0; node_id < DRAM_RANGES; node_id++) {
340 if (base_limit_match(pvt, sys_addr, node_id))
346 if (unlikely((intlv_en != 0x01) &&
347 (intlv_en != 0x03) &&
348 (intlv_en != 0x07))) {
349 amd64_warn("DRAM Base[IntlvEn] junk value: 0x%x, BIOS bug?\n", intlv_en);
353 bits = (((u32) sys_addr) >> 12) & intlv_en;
355 for (node_id = 0; ; ) {
356 if ((dram_intlv_sel(pvt, node_id) & intlv_en) == bits)
357 break; /* intlv_sel field matches */
359 if (++node_id >= DRAM_RANGES)
363 /* sanity test for sys_addr */
364 if (unlikely(!base_limit_match(pvt, sys_addr, node_id))) {
365 amd64_warn("%s: sys_addr 0x%llx falls outside base/limit address"
366 "range for node %d with node interleaving enabled.\n",
367 __func__, sys_addr, node_id);
372 return edac_mc_find((int)node_id);
375 edac_dbg(2, "sys_addr 0x%lx doesn't match any node\n",
376 (unsigned long)sys_addr);
382 * compute the CS base address of the @csrow on the DRAM controller @dct.
383 * For details see F2x[5C:40] in the processor's BKDG
385 static void get_cs_base_and_mask(struct amd64_pvt *pvt, int csrow, u8 dct,
386 u64 *base, u64 *mask)
388 u64 csbase, csmask, base_bits, mask_bits;
391 if (pvt->fam == 0xf && pvt->ext_model < K8_REV_F) {
392 csbase = pvt->csels[dct].csbases[csrow];
393 csmask = pvt->csels[dct].csmasks[csrow];
394 base_bits = GENMASK_ULL(31, 21) | GENMASK_ULL(15, 9);
395 mask_bits = GENMASK_ULL(29, 21) | GENMASK_ULL(15, 9);
399 * F16h and F15h, models 30h and later need two addr_shift values:
400 * 8 for high and 6 for low (cf. F16h BKDG).
402 } else if (pvt->fam == 0x16 ||
403 (pvt->fam == 0x15 && pvt->model >= 0x30)) {
404 csbase = pvt->csels[dct].csbases[csrow];
405 csmask = pvt->csels[dct].csmasks[csrow >> 1];
407 *base = (csbase & GENMASK_ULL(15, 5)) << 6;
408 *base |= (csbase & GENMASK_ULL(30, 19)) << 8;
411 /* poke holes for the csmask */
412 *mask &= ~((GENMASK_ULL(15, 5) << 6) |
413 (GENMASK_ULL(30, 19) << 8));
415 *mask |= (csmask & GENMASK_ULL(15, 5)) << 6;
416 *mask |= (csmask & GENMASK_ULL(30, 19)) << 8;
420 csbase = pvt->csels[dct].csbases[csrow];
421 csmask = pvt->csels[dct].csmasks[csrow >> 1];
424 if (pvt->fam == 0x15)
425 base_bits = mask_bits =
426 GENMASK_ULL(30,19) | GENMASK_ULL(13,5);
428 base_bits = mask_bits =
429 GENMASK_ULL(28,19) | GENMASK_ULL(13,5);
432 *base = (csbase & base_bits) << addr_shift;
435 /* poke holes for the csmask */
436 *mask &= ~(mask_bits << addr_shift);
438 *mask |= (csmask & mask_bits) << addr_shift;
441 #define for_each_chip_select(i, dct, pvt) \
442 for (i = 0; i < pvt->csels[dct].b_cnt; i++)
444 #define chip_select_base(i, dct, pvt) \
445 pvt->csels[dct].csbases[i]
447 #define for_each_chip_select_mask(i, dct, pvt) \
448 for (i = 0; i < pvt->csels[dct].m_cnt; i++)
450 #define for_each_umc(i) \
451 for (i = 0; i < fam_type->max_mcs; i++)
454 * @input_addr is an InputAddr associated with the node given by mci. Return the
455 * csrow that input_addr maps to, or -1 on failure (no csrow claims input_addr).
457 static int input_addr_to_csrow(struct mem_ctl_info *mci, u64 input_addr)
459 struct amd64_pvt *pvt;
465 for_each_chip_select(csrow, 0, pvt) {
466 if (!csrow_enabled(csrow, 0, pvt))
469 get_cs_base_and_mask(pvt, csrow, 0, &base, &mask);
473 if ((input_addr & mask) == (base & mask)) {
474 edac_dbg(2, "InputAddr 0x%lx matches csrow %d (node %d)\n",
475 (unsigned long)input_addr, csrow,
481 edac_dbg(2, "no matching csrow for InputAddr 0x%lx (MC node %d)\n",
482 (unsigned long)input_addr, pvt->mc_node_id);
488 * Obtain info from the DRAM Hole Address Register (section 3.4.8, pub #26094)
489 * for the node represented by mci. Info is passed back in *hole_base,
490 * *hole_offset, and *hole_size. Function returns 0 if info is valid or 1 if
491 * info is invalid. Info may be invalid for either of the following reasons:
493 * - The revision of the node is not E or greater. In this case, the DRAM Hole
494 * Address Register does not exist.
496 * - The DramHoleValid bit is cleared in the DRAM Hole Address Register,
497 * indicating that its contents are not valid.
499 * The values passed back in *hole_base, *hole_offset, and *hole_size are
500 * complete 32-bit values despite the fact that the bitfields in the DHAR
501 * only represent bits 31-24 of the base and offset values.
503 static int get_dram_hole_info(struct mem_ctl_info *mci, u64 *hole_base,
504 u64 *hole_offset, u64 *hole_size)
506 struct amd64_pvt *pvt = mci->pvt_info;
508 /* only revE and later have the DRAM Hole Address Register */
509 if (pvt->fam == 0xf && pvt->ext_model < K8_REV_E) {
510 edac_dbg(1, " revision %d for node %d does not support DHAR\n",
511 pvt->ext_model, pvt->mc_node_id);
515 /* valid for Fam10h and above */
516 if (pvt->fam >= 0x10 && !dhar_mem_hoist_valid(pvt)) {
517 edac_dbg(1, " Dram Memory Hoisting is DISABLED on this system\n");
521 if (!dhar_valid(pvt)) {
522 edac_dbg(1, " Dram Memory Hoisting is DISABLED on this node %d\n",
527 /* This node has Memory Hoisting */
529 /* +------------------+--------------------+--------------------+-----
530 * | memory | DRAM hole | relocated |
531 * | [0, (x - 1)] | [x, 0xffffffff] | addresses from |
533 * | | | [0x100000000, |
534 * | | | (0x100000000+ |
535 * | | | (0xffffffff-x))] |
536 * +------------------+--------------------+--------------------+-----
538 * Above is a diagram of physical memory showing the DRAM hole and the
539 * relocated addresses from the DRAM hole. As shown, the DRAM hole
540 * starts at address x (the base address) and extends through address
541 * 0xffffffff. The DRAM Hole Address Register (DHAR) relocates the
542 * addresses in the hole so that they start at 0x100000000.
545 *hole_base = dhar_base(pvt);
546 *hole_size = (1ULL << 32) - *hole_base;
548 *hole_offset = (pvt->fam > 0xf) ? f10_dhar_offset(pvt)
549 : k8_dhar_offset(pvt);
551 edac_dbg(1, " DHAR info for node %d base 0x%lx offset 0x%lx size 0x%lx\n",
552 pvt->mc_node_id, (unsigned long)*hole_base,
553 (unsigned long)*hole_offset, (unsigned long)*hole_size);
558 #ifdef CONFIG_EDAC_DEBUG
559 #define EDAC_DCT_ATTR_SHOW(reg) \
560 static ssize_t reg##_show(struct device *dev, \
561 struct device_attribute *mattr, char *data) \
563 struct mem_ctl_info *mci = to_mci(dev); \
564 struct amd64_pvt *pvt = mci->pvt_info; \
566 return sprintf(data, "0x%016llx\n", (u64)pvt->reg); \
569 EDAC_DCT_ATTR_SHOW(dhar);
570 EDAC_DCT_ATTR_SHOW(dbam0);
571 EDAC_DCT_ATTR_SHOW(top_mem);
572 EDAC_DCT_ATTR_SHOW(top_mem2);
574 static ssize_t dram_hole_show(struct device *dev, struct device_attribute *mattr,
577 struct mem_ctl_info *mci = to_mci(dev);
583 get_dram_hole_info(mci, &hole_base, &hole_offset, &hole_size);
585 return sprintf(data, "%llx %llx %llx\n", hole_base, hole_offset,
590 * update NUM_DBG_ATTRS in case you add new members
592 static DEVICE_ATTR(dhar, S_IRUGO, dhar_show, NULL);
593 static DEVICE_ATTR(dbam, S_IRUGO, dbam0_show, NULL);
594 static DEVICE_ATTR(topmem, S_IRUGO, top_mem_show, NULL);
595 static DEVICE_ATTR(topmem2, S_IRUGO, top_mem2_show, NULL);
596 static DEVICE_ATTR_RO(dram_hole);
598 static struct attribute *dbg_attrs[] = {
601 &dev_attr_topmem.attr,
602 &dev_attr_topmem2.attr,
603 &dev_attr_dram_hole.attr,
607 static const struct attribute_group dbg_group = {
611 static ssize_t inject_section_show(struct device *dev,
612 struct device_attribute *mattr, char *buf)
614 struct mem_ctl_info *mci = to_mci(dev);
615 struct amd64_pvt *pvt = mci->pvt_info;
616 return sprintf(buf, "0x%x\n", pvt->injection.section);
620 * store error injection section value which refers to one of 4 16-byte sections
621 * within a 64-byte cacheline
625 static ssize_t inject_section_store(struct device *dev,
626 struct device_attribute *mattr,
627 const char *data, size_t count)
629 struct mem_ctl_info *mci = to_mci(dev);
630 struct amd64_pvt *pvt = mci->pvt_info;
634 ret = kstrtoul(data, 10, &value);
639 amd64_warn("%s: invalid section 0x%lx\n", __func__, value);
643 pvt->injection.section = (u32) value;
647 static ssize_t inject_word_show(struct device *dev,
648 struct device_attribute *mattr, char *buf)
650 struct mem_ctl_info *mci = to_mci(dev);
651 struct amd64_pvt *pvt = mci->pvt_info;
652 return sprintf(buf, "0x%x\n", pvt->injection.word);
656 * store error injection word value which refers to one of 9 16-bit word of the
657 * 16-byte (128-bit + ECC bits) section
661 static ssize_t inject_word_store(struct device *dev,
662 struct device_attribute *mattr,
663 const char *data, size_t count)
665 struct mem_ctl_info *mci = to_mci(dev);
666 struct amd64_pvt *pvt = mci->pvt_info;
670 ret = kstrtoul(data, 10, &value);
675 amd64_warn("%s: invalid word 0x%lx\n", __func__, value);
679 pvt->injection.word = (u32) value;
683 static ssize_t inject_ecc_vector_show(struct device *dev,
684 struct device_attribute *mattr,
687 struct mem_ctl_info *mci = to_mci(dev);
688 struct amd64_pvt *pvt = mci->pvt_info;
689 return sprintf(buf, "0x%x\n", pvt->injection.bit_map);
693 * store 16 bit error injection vector which enables injecting errors to the
694 * corresponding bit within the error injection word above. When used during a
695 * DRAM ECC read, it holds the contents of the of the DRAM ECC bits.
697 static ssize_t inject_ecc_vector_store(struct device *dev,
698 struct device_attribute *mattr,
699 const char *data, size_t count)
701 struct mem_ctl_info *mci = to_mci(dev);
702 struct amd64_pvt *pvt = mci->pvt_info;
706 ret = kstrtoul(data, 16, &value);
710 if (value & 0xFFFF0000) {
711 amd64_warn("%s: invalid EccVector: 0x%lx\n", __func__, value);
715 pvt->injection.bit_map = (u32) value;
720 * Do a DRAM ECC read. Assemble staged values in the pvt area, format into
721 * fields needed by the injection registers and read the NB Array Data Port.
723 static ssize_t inject_read_store(struct device *dev,
724 struct device_attribute *mattr,
725 const char *data, size_t count)
727 struct mem_ctl_info *mci = to_mci(dev);
728 struct amd64_pvt *pvt = mci->pvt_info;
730 u32 section, word_bits;
733 ret = kstrtoul(data, 10, &value);
737 /* Form value to choose 16-byte section of cacheline */
738 section = F10_NB_ARRAY_DRAM | SET_NB_ARRAY_ADDR(pvt->injection.section);
740 amd64_write_pci_cfg(pvt->F3, F10_NB_ARRAY_ADDR, section);
742 word_bits = SET_NB_DRAM_INJECTION_READ(pvt->injection);
744 /* Issue 'word' and 'bit' along with the READ request */
745 amd64_write_pci_cfg(pvt->F3, F10_NB_ARRAY_DATA, word_bits);
747 edac_dbg(0, "section=0x%x word_bits=0x%x\n", section, word_bits);
753 * Do a DRAM ECC write. Assemble staged values in the pvt area and format into
754 * fields needed by the injection registers.
756 static ssize_t inject_write_store(struct device *dev,
757 struct device_attribute *mattr,
758 const char *data, size_t count)
760 struct mem_ctl_info *mci = to_mci(dev);
761 struct amd64_pvt *pvt = mci->pvt_info;
762 u32 section, word_bits, tmp;
766 ret = kstrtoul(data, 10, &value);
770 /* Form value to choose 16-byte section of cacheline */
771 section = F10_NB_ARRAY_DRAM | SET_NB_ARRAY_ADDR(pvt->injection.section);
773 amd64_write_pci_cfg(pvt->F3, F10_NB_ARRAY_ADDR, section);
775 word_bits = SET_NB_DRAM_INJECTION_WRITE(pvt->injection);
777 pr_notice_once("Don't forget to decrease MCE polling interval in\n"
778 "/sys/bus/machinecheck/devices/machinecheck<CPUNUM>/check_interval\n"
779 "so that you can get the error report faster.\n");
781 on_each_cpu(disable_caches, NULL, 1);
783 /* Issue 'word' and 'bit' along with the READ request */
784 amd64_write_pci_cfg(pvt->F3, F10_NB_ARRAY_DATA, word_bits);
787 /* wait until injection happens */
788 amd64_read_pci_cfg(pvt->F3, F10_NB_ARRAY_DATA, &tmp);
789 if (tmp & F10_NB_ARR_ECC_WR_REQ) {
794 on_each_cpu(enable_caches, NULL, 1);
796 edac_dbg(0, "section=0x%x word_bits=0x%x\n", section, word_bits);
802 * update NUM_INJ_ATTRS in case you add new members
805 static DEVICE_ATTR_RW(inject_section);
806 static DEVICE_ATTR_RW(inject_word);
807 static DEVICE_ATTR_RW(inject_ecc_vector);
808 static DEVICE_ATTR_WO(inject_write);
809 static DEVICE_ATTR_WO(inject_read);
811 static struct attribute *inj_attrs[] = {
812 &dev_attr_inject_section.attr,
813 &dev_attr_inject_word.attr,
814 &dev_attr_inject_ecc_vector.attr,
815 &dev_attr_inject_write.attr,
816 &dev_attr_inject_read.attr,
820 static umode_t inj_is_visible(struct kobject *kobj, struct attribute *attr, int idx)
822 struct device *dev = kobj_to_dev(kobj);
823 struct mem_ctl_info *mci = container_of(dev, struct mem_ctl_info, dev);
824 struct amd64_pvt *pvt = mci->pvt_info;
826 /* Families which have that injection hw */
827 if (pvt->fam >= 0x10 && pvt->fam <= 0x16)
833 static const struct attribute_group inj_group = {
835 .is_visible = inj_is_visible,
837 #endif /* CONFIG_EDAC_DEBUG */
840 * Return the DramAddr that the SysAddr given by @sys_addr maps to. It is
841 * assumed that sys_addr maps to the node given by mci.
843 * The first part of section 3.4.4 (p. 70) shows how the DRAM Base (section
844 * 3.4.4.1) and DRAM Limit (section 3.4.4.2) registers are used to translate a
845 * SysAddr to a DramAddr. If the DRAM Hole Address Register (DHAR) is enabled,
846 * then it is also involved in translating a SysAddr to a DramAddr. Sections
847 * 3.4.8 and 3.5.8.2 describe the DHAR and how it is used for memory hoisting.
848 * These parts of the documentation are unclear. I interpret them as follows:
850 * When node n receives a SysAddr, it processes the SysAddr as follows:
852 * 1. It extracts the DRAMBase and DRAMLimit values from the DRAM Base and DRAM
853 * Limit registers for node n. If the SysAddr is not within the range
854 * specified by the base and limit values, then node n ignores the Sysaddr
855 * (since it does not map to node n). Otherwise continue to step 2 below.
857 * 2. If the DramHoleValid bit of the DHAR for node n is clear, the DHAR is
858 * disabled so skip to step 3 below. Otherwise see if the SysAddr is within
859 * the range of relocated addresses (starting at 0x100000000) from the DRAM
860 * hole. If not, skip to step 3 below. Else get the value of the
861 * DramHoleOffset field from the DHAR. To obtain the DramAddr, subtract the
862 * offset defined by this value from the SysAddr.
864 * 3. Obtain the base address for node n from the DRAMBase field of the DRAM
865 * Base register for node n. To obtain the DramAddr, subtract the base
866 * address from the SysAddr, as shown near the start of section 3.4.4 (p.70).
868 static u64 sys_addr_to_dram_addr(struct mem_ctl_info *mci, u64 sys_addr)
870 struct amd64_pvt *pvt = mci->pvt_info;
871 u64 dram_base, hole_base, hole_offset, hole_size, dram_addr;
874 dram_base = get_dram_base(pvt, pvt->mc_node_id);
876 ret = get_dram_hole_info(mci, &hole_base, &hole_offset, &hole_size);
878 if ((sys_addr >= (1ULL << 32)) &&
879 (sys_addr < ((1ULL << 32) + hole_size))) {
880 /* use DHAR to translate SysAddr to DramAddr */
881 dram_addr = sys_addr - hole_offset;
883 edac_dbg(2, "using DHAR to translate SysAddr 0x%lx to DramAddr 0x%lx\n",
884 (unsigned long)sys_addr,
885 (unsigned long)dram_addr);
892 * Translate the SysAddr to a DramAddr as shown near the start of
893 * section 3.4.4 (p. 70). Although sys_addr is a 64-bit value, the k8
894 * only deals with 40-bit values. Therefore we discard bits 63-40 of
895 * sys_addr below. If bit 39 of sys_addr is 1 then the bits we
896 * discard are all 1s. Otherwise the bits we discard are all 0s. See
897 * section 3.4.2 of AMD publication 24592: AMD x86-64 Architecture
898 * Programmer's Manual Volume 1 Application Programming.
900 dram_addr = (sys_addr & GENMASK_ULL(39, 0)) - dram_base;
902 edac_dbg(2, "using DRAM Base register to translate SysAddr 0x%lx to DramAddr 0x%lx\n",
903 (unsigned long)sys_addr, (unsigned long)dram_addr);
908 * @intlv_en is the value of the IntlvEn field from a DRAM Base register
909 * (section 3.4.4.1). Return the number of bits from a SysAddr that are used
910 * for node interleaving.
912 static int num_node_interleave_bits(unsigned intlv_en)
914 static const int intlv_shift_table[] = { 0, 1, 0, 2, 0, 0, 0, 3 };
917 BUG_ON(intlv_en > 7);
918 n = intlv_shift_table[intlv_en];
922 /* Translate the DramAddr given by @dram_addr to an InputAddr. */
923 static u64 dram_addr_to_input_addr(struct mem_ctl_info *mci, u64 dram_addr)
925 struct amd64_pvt *pvt;
932 * See the start of section 3.4.4 (p. 70, BKDG #26094, K8, revA-E)
933 * concerning translating a DramAddr to an InputAddr.
935 intlv_shift = num_node_interleave_bits(dram_intlv_en(pvt, 0));
936 input_addr = ((dram_addr >> intlv_shift) & GENMASK_ULL(35, 12)) +
939 edac_dbg(2, " Intlv Shift=%d DramAddr=0x%lx maps to InputAddr=0x%lx\n",
940 intlv_shift, (unsigned long)dram_addr,
941 (unsigned long)input_addr);
947 * Translate the SysAddr represented by @sys_addr to an InputAddr. It is
948 * assumed that @sys_addr maps to the node given by mci.
950 static u64 sys_addr_to_input_addr(struct mem_ctl_info *mci, u64 sys_addr)
955 dram_addr_to_input_addr(mci, sys_addr_to_dram_addr(mci, sys_addr));
957 edac_dbg(2, "SysAddr 0x%lx translates to InputAddr 0x%lx\n",
958 (unsigned long)sys_addr, (unsigned long)input_addr);
963 /* Map the Error address to a PAGE and PAGE OFFSET. */
964 static inline void error_address_to_page_and_offset(u64 error_address,
965 struct err_info *err)
967 err->page = (u32) (error_address >> PAGE_SHIFT);
968 err->offset = ((u32) error_address) & ~PAGE_MASK;
972 * @sys_addr is an error address (a SysAddr) extracted from the MCA NB Address
973 * Low (section 3.6.4.5) and MCA NB Address High (section 3.6.4.6) registers
974 * of a node that detected an ECC memory error. mci represents the node that
975 * the error address maps to (possibly different from the node that detected
976 * the error). Return the number of the csrow that sys_addr maps to, or -1 on
979 static int sys_addr_to_csrow(struct mem_ctl_info *mci, u64 sys_addr)
983 csrow = input_addr_to_csrow(mci, sys_addr_to_input_addr(mci, sys_addr));
986 amd64_mc_err(mci, "Failed to translate InputAddr to csrow for "
987 "address 0x%lx\n", (unsigned long)sys_addr);
991 /* Protect the PCI config register pairs used for DF indirect access. */
992 static DEFINE_MUTEX(df_indirect_mutex);
995 * Data Fabric Indirect Access uses FICAA/FICAD.
997 * Fabric Indirect Configuration Access Address (FICAA): Constructed based
998 * on the device's Instance Id and the PCI function and register offset of
999 * the desired register.
1001 * Fabric Indirect Configuration Access Data (FICAD): There are FICAD LO
1002 * and FICAD HI registers but so far we only need the LO register.
1004 * Use Instance Id 0xFF to indicate a broadcast read.
1006 #define DF_BROADCAST 0xFF
1007 static int __df_indirect_read(u16 node, u8 func, u16 reg, u8 instance_id, u32 *lo)
1013 if (node >= amd_nb_num())
1016 F4 = node_to_amd_nb(node)->link;
1020 ficaa = (instance_id == DF_BROADCAST) ? 0 : 1;
1021 ficaa |= reg & 0x3FC;
1022 ficaa |= (func & 0x7) << 11;
1023 ficaa |= instance_id << 16;
1025 mutex_lock(&df_indirect_mutex);
1027 err = pci_write_config_dword(F4, 0x5C, ficaa);
1029 pr_warn("Error writing DF Indirect FICAA, FICAA=0x%x\n", ficaa);
1033 err = pci_read_config_dword(F4, 0x98, lo);
1035 pr_warn("Error reading DF Indirect FICAD LO, FICAA=0x%x.\n", ficaa);
1038 mutex_unlock(&df_indirect_mutex);
1044 static int df_indirect_read_instance(u16 node, u8 func, u16 reg, u8 instance_id, u32 *lo)
1046 return __df_indirect_read(node, func, reg, instance_id, lo);
1049 static int df_indirect_read_broadcast(u16 node, u8 func, u16 reg, u32 *lo)
1051 return __df_indirect_read(node, func, reg, DF_BROADCAST, lo);
1061 static int umc_normaddr_to_sysaddr(u64 norm_addr, u16 nid, u8 umc, u64 *sys_addr)
1063 u64 dram_base_addr, dram_limit_addr, dram_hole_base;
1065 u8 die_id_shift, die_id_mask, socket_id_shift, socket_id_mask;
1066 u8 intlv_num_dies, intlv_num_chan, intlv_num_sockets;
1067 u8 intlv_addr_sel, intlv_addr_bit;
1068 u8 num_intlv_bits, hashed_bit;
1069 u8 lgcy_mmio_hole_en, base = 0;
1070 u8 cs_mask, cs_id = 0;
1071 bool hash_enabled = false;
1073 struct addr_ctx ctx;
1075 memset(&ctx, 0, sizeof(ctx));
1077 /* Start from the normalized address */
1078 ctx.ret_addr = norm_addr;
1083 /* Read D18F0x1B4 (DramOffset), check if base 1 is used. */
1084 if (df_indirect_read_instance(nid, 0, 0x1B4, umc, &ctx.tmp))
1087 /* Remove HiAddrOffset from normalized address, if enabled: */
1088 if (ctx.tmp & BIT(0)) {
1089 u64 hi_addr_offset = (ctx.tmp & GENMASK_ULL(31, 20)) << 8;
1091 if (norm_addr >= hi_addr_offset) {
1092 ctx.ret_addr -= hi_addr_offset;
1097 /* Read D18F0x110 (DramBaseAddress). */
1098 if (df_indirect_read_instance(nid, 0, 0x110 + (8 * base), umc, &ctx.tmp))
1101 /* Check if address range is valid. */
1102 if (!(ctx.tmp & BIT(0))) {
1103 pr_err("%s: Invalid DramBaseAddress range: 0x%x.\n",
1108 lgcy_mmio_hole_en = ctx.tmp & BIT(1);
1109 intlv_num_chan = (ctx.tmp >> 4) & 0xF;
1110 intlv_addr_sel = (ctx.tmp >> 8) & 0x7;
1111 dram_base_addr = (ctx.tmp & GENMASK_ULL(31, 12)) << 16;
1113 /* {0, 1, 2, 3} map to address bits {8, 9, 10, 11} respectively */
1114 if (intlv_addr_sel > 3) {
1115 pr_err("%s: Invalid interleave address select %d.\n",
1116 __func__, intlv_addr_sel);
1120 /* Read D18F0x114 (DramLimitAddress). */
1121 if (df_indirect_read_instance(nid, 0, 0x114 + (8 * base), umc, &ctx.tmp))
1124 intlv_num_sockets = (ctx.tmp >> 8) & 0x1;
1125 intlv_num_dies = (ctx.tmp >> 10) & 0x3;
1126 dram_limit_addr = ((ctx.tmp & GENMASK_ULL(31, 12)) << 16) | GENMASK_ULL(27, 0);
1128 intlv_addr_bit = intlv_addr_sel + 8;
1130 /* Re-use intlv_num_chan by setting it equal to log2(#channels) */
1131 switch (intlv_num_chan) {
1132 case 0: intlv_num_chan = 0; break;
1133 case 1: intlv_num_chan = 1; break;
1134 case 3: intlv_num_chan = 2; break;
1135 case 5: intlv_num_chan = 3; break;
1136 case 7: intlv_num_chan = 4; break;
1138 case 8: intlv_num_chan = 1;
1139 hash_enabled = true;
1142 pr_err("%s: Invalid number of interleaved channels %d.\n",
1143 __func__, intlv_num_chan);
1147 num_intlv_bits = intlv_num_chan;
1149 if (intlv_num_dies > 2) {
1150 pr_err("%s: Invalid number of interleaved nodes/dies %d.\n",
1151 __func__, intlv_num_dies);
1155 num_intlv_bits += intlv_num_dies;
1157 /* Add a bit if sockets are interleaved. */
1158 num_intlv_bits += intlv_num_sockets;
1160 /* Assert num_intlv_bits <= 4 */
1161 if (num_intlv_bits > 4) {
1162 pr_err("%s: Invalid interleave bits %d.\n",
1163 __func__, num_intlv_bits);
1167 if (num_intlv_bits > 0) {
1168 u64 temp_addr_x, temp_addr_i, temp_addr_y;
1169 u8 die_id_bit, sock_id_bit, cs_fabric_id;
1172 * Read FabricBlockInstanceInformation3_CS[BlockFabricID].
1173 * This is the fabric id for this coherent slave. Use
1174 * umc/channel# as instance id of the coherent slave
1177 if (df_indirect_read_instance(nid, 0, 0x50, umc, &ctx.tmp))
1180 cs_fabric_id = (ctx.tmp >> 8) & 0xFF;
1183 /* If interleaved over more than 1 channel: */
1184 if (intlv_num_chan) {
1185 die_id_bit = intlv_num_chan;
1186 cs_mask = (1 << die_id_bit) - 1;
1187 cs_id = cs_fabric_id & cs_mask;
1190 sock_id_bit = die_id_bit;
1192 /* Read D18F1x208 (SystemFabricIdMask). */
1193 if (intlv_num_dies || intlv_num_sockets)
1194 if (df_indirect_read_broadcast(nid, 1, 0x208, &ctx.tmp))
1197 /* If interleaved over more than 1 die. */
1198 if (intlv_num_dies) {
1199 sock_id_bit = die_id_bit + intlv_num_dies;
1200 die_id_shift = (ctx.tmp >> 24) & 0xF;
1201 die_id_mask = (ctx.tmp >> 8) & 0xFF;
1203 cs_id |= ((cs_fabric_id & die_id_mask) >> die_id_shift) << die_id_bit;
1206 /* If interleaved over more than 1 socket. */
1207 if (intlv_num_sockets) {
1208 socket_id_shift = (ctx.tmp >> 28) & 0xF;
1209 socket_id_mask = (ctx.tmp >> 16) & 0xFF;
1211 cs_id |= ((cs_fabric_id & socket_id_mask) >> socket_id_shift) << sock_id_bit;
1215 * The pre-interleaved address consists of XXXXXXIIIYYYYY
1216 * where III is the ID for this CS, and XXXXXXYYYYY are the
1217 * address bits from the post-interleaved address.
1218 * "num_intlv_bits" has been calculated to tell us how many "I"
1219 * bits there are. "intlv_addr_bit" tells us how many "Y" bits
1220 * there are (where "I" starts).
1222 temp_addr_y = ctx.ret_addr & GENMASK_ULL(intlv_addr_bit - 1, 0);
1223 temp_addr_i = (cs_id << intlv_addr_bit);
1224 temp_addr_x = (ctx.ret_addr & GENMASK_ULL(63, intlv_addr_bit)) << num_intlv_bits;
1225 ctx.ret_addr = temp_addr_x | temp_addr_i | temp_addr_y;
1228 /* Add dram base address */
1229 ctx.ret_addr += dram_base_addr;
1231 /* If legacy MMIO hole enabled */
1232 if (lgcy_mmio_hole_en) {
1233 if (df_indirect_read_broadcast(nid, 0, 0x104, &ctx.tmp))
1236 dram_hole_base = ctx.tmp & GENMASK(31, 24);
1237 if (ctx.ret_addr >= dram_hole_base)
1238 ctx.ret_addr += (BIT_ULL(32) - dram_hole_base);
1242 /* Save some parentheses and grab ls-bit at the end. */
1243 hashed_bit = (ctx.ret_addr >> 12) ^
1244 (ctx.ret_addr >> 18) ^
1245 (ctx.ret_addr >> 21) ^
1246 (ctx.ret_addr >> 30) ^
1249 hashed_bit &= BIT(0);
1251 if (hashed_bit != ((ctx.ret_addr >> intlv_addr_bit) & BIT(0)))
1252 ctx.ret_addr ^= BIT(intlv_addr_bit);
1255 /* Is calculated system address is above DRAM limit address? */
1256 if (ctx.ret_addr > dram_limit_addr)
1259 *sys_addr = ctx.ret_addr;
1266 static int get_channel_from_ecc_syndrome(struct mem_ctl_info *, u16);
1269 * Determine if the DIMMs have ECC enabled. ECC is enabled ONLY if all the DIMMs
1272 static unsigned long determine_edac_cap(struct amd64_pvt *pvt)
1274 unsigned long edac_cap = EDAC_FLAG_NONE;
1278 u8 i, umc_en_mask = 0, dimm_ecc_en_mask = 0;
1281 if (!(pvt->umc[i].sdp_ctrl & UMC_SDP_INIT))
1284 umc_en_mask |= BIT(i);
1286 /* UMC Configuration bit 12 (DimmEccEn) */
1287 if (pvt->umc[i].umc_cfg & BIT(12))
1288 dimm_ecc_en_mask |= BIT(i);
1291 if (umc_en_mask == dimm_ecc_en_mask)
1292 edac_cap = EDAC_FLAG_SECDED;
1294 bit = (pvt->fam > 0xf || pvt->ext_model >= K8_REV_F)
1298 if (pvt->dclr0 & BIT(bit))
1299 edac_cap = EDAC_FLAG_SECDED;
1305 static void debug_display_dimm_sizes(struct amd64_pvt *, u8);
1307 static void debug_dump_dramcfg_low(struct amd64_pvt *pvt, u32 dclr, int chan)
1309 edac_dbg(1, "F2x%d90 (DRAM Cfg Low): 0x%08x\n", chan, dclr);
1311 if (pvt->dram_type == MEM_LRDDR3) {
1312 u32 dcsm = pvt->csels[chan].csmasks[0];
1314 * It's assumed all LRDIMMs in a DCT are going to be of
1315 * same 'type' until proven otherwise. So, use a cs
1316 * value of '0' here to get dcsm value.
1318 edac_dbg(1, " LRDIMM %dx rank multiply\n", (dcsm & 0x3));
1321 edac_dbg(1, "All DIMMs support ECC:%s\n",
1322 (dclr & BIT(19)) ? "yes" : "no");
1325 edac_dbg(1, " PAR/ERR parity: %s\n",
1326 (dclr & BIT(8)) ? "enabled" : "disabled");
1328 if (pvt->fam == 0x10)
1329 edac_dbg(1, " DCT 128bit mode width: %s\n",
1330 (dclr & BIT(11)) ? "128b" : "64b");
1332 edac_dbg(1, " x4 logical DIMMs present: L0: %s L1: %s L2: %s L3: %s\n",
1333 (dclr & BIT(12)) ? "yes" : "no",
1334 (dclr & BIT(13)) ? "yes" : "no",
1335 (dclr & BIT(14)) ? "yes" : "no",
1336 (dclr & BIT(15)) ? "yes" : "no");
1339 #define CS_EVEN_PRIMARY BIT(0)
1340 #define CS_ODD_PRIMARY BIT(1)
1341 #define CS_EVEN_SECONDARY BIT(2)
1342 #define CS_ODD_SECONDARY BIT(3)
1343 #define CS_3R_INTERLEAVE BIT(4)
1345 #define CS_EVEN (CS_EVEN_PRIMARY | CS_EVEN_SECONDARY)
1346 #define CS_ODD (CS_ODD_PRIMARY | CS_ODD_SECONDARY)
1348 static int f17_get_cs_mode(int dimm, u8 ctrl, struct amd64_pvt *pvt)
1353 if (csrow_enabled(2 * dimm, ctrl, pvt))
1354 cs_mode |= CS_EVEN_PRIMARY;
1356 if (csrow_enabled(2 * dimm + 1, ctrl, pvt))
1357 cs_mode |= CS_ODD_PRIMARY;
1359 /* Asymmetric dual-rank DIMM support. */
1360 if (csrow_sec_enabled(2 * dimm + 1, ctrl, pvt))
1361 cs_mode |= CS_ODD_SECONDARY;
1364 * 3 Rank inteleaving support.
1365 * There should be only three bases enabled and their two masks should
1368 for_each_chip_select(base, ctrl, pvt)
1369 count += csrow_enabled(base, ctrl, pvt);
1372 pvt->csels[ctrl].csmasks[0] == pvt->csels[ctrl].csmasks[1]) {
1373 edac_dbg(1, "3R interleaving in use.\n");
1374 cs_mode |= CS_3R_INTERLEAVE;
1380 static void debug_display_dimm_sizes_df(struct amd64_pvt *pvt, u8 ctrl)
1382 int dimm, size0, size1, cs0, cs1, cs_mode;
1384 edac_printk(KERN_DEBUG, EDAC_MC, "UMC%d chip selects:\n", ctrl);
1386 for (dimm = 0; dimm < 2; dimm++) {
1390 cs_mode = f17_get_cs_mode(dimm, ctrl, pvt);
1392 size0 = pvt->ops->dbam_to_cs(pvt, ctrl, cs_mode, cs0);
1393 size1 = pvt->ops->dbam_to_cs(pvt, ctrl, cs_mode, cs1);
1395 amd64_info(EDAC_MC ": %d: %5dMB %d: %5dMB\n",
1401 static void __dump_misc_regs_df(struct amd64_pvt *pvt)
1403 struct amd64_umc *umc;
1404 u32 i, tmp, umc_base;
1407 umc_base = get_umc_base(i);
1410 edac_dbg(1, "UMC%d DIMM cfg: 0x%x\n", i, umc->dimm_cfg);
1411 edac_dbg(1, "UMC%d UMC cfg: 0x%x\n", i, umc->umc_cfg);
1412 edac_dbg(1, "UMC%d SDP ctrl: 0x%x\n", i, umc->sdp_ctrl);
1413 edac_dbg(1, "UMC%d ECC ctrl: 0x%x\n", i, umc->ecc_ctrl);
1415 amd_smn_read(pvt->mc_node_id, umc_base + UMCCH_ECC_BAD_SYMBOL, &tmp);
1416 edac_dbg(1, "UMC%d ECC bad symbol: 0x%x\n", i, tmp);
1418 amd_smn_read(pvt->mc_node_id, umc_base + UMCCH_UMC_CAP, &tmp);
1419 edac_dbg(1, "UMC%d UMC cap: 0x%x\n", i, tmp);
1420 edac_dbg(1, "UMC%d UMC cap high: 0x%x\n", i, umc->umc_cap_hi);
1422 edac_dbg(1, "UMC%d ECC capable: %s, ChipKill ECC capable: %s\n",
1423 i, (umc->umc_cap_hi & BIT(30)) ? "yes" : "no",
1424 (umc->umc_cap_hi & BIT(31)) ? "yes" : "no");
1425 edac_dbg(1, "UMC%d All DIMMs support ECC: %s\n",
1426 i, (umc->umc_cfg & BIT(12)) ? "yes" : "no");
1427 edac_dbg(1, "UMC%d x4 DIMMs present: %s\n",
1428 i, (umc->dimm_cfg & BIT(6)) ? "yes" : "no");
1429 edac_dbg(1, "UMC%d x16 DIMMs present: %s\n",
1430 i, (umc->dimm_cfg & BIT(7)) ? "yes" : "no");
1432 if (pvt->dram_type == MEM_LRDDR4) {
1433 amd_smn_read(pvt->mc_node_id, umc_base + UMCCH_ADDR_CFG, &tmp);
1434 edac_dbg(1, "UMC%d LRDIMM %dx rank multiply\n",
1435 i, 1 << ((tmp >> 4) & 0x3));
1438 debug_display_dimm_sizes_df(pvt, i);
1441 edac_dbg(1, "F0x104 (DRAM Hole Address): 0x%08x, base: 0x%08x\n",
1442 pvt->dhar, dhar_base(pvt));
1445 /* Display and decode various NB registers for debug purposes. */
1446 static void __dump_misc_regs(struct amd64_pvt *pvt)
1448 edac_dbg(1, "F3xE8 (NB Cap): 0x%08x\n", pvt->nbcap);
1450 edac_dbg(1, " NB two channel DRAM capable: %s\n",
1451 (pvt->nbcap & NBCAP_DCT_DUAL) ? "yes" : "no");
1453 edac_dbg(1, " ECC capable: %s, ChipKill ECC capable: %s\n",
1454 (pvt->nbcap & NBCAP_SECDED) ? "yes" : "no",
1455 (pvt->nbcap & NBCAP_CHIPKILL) ? "yes" : "no");
1457 debug_dump_dramcfg_low(pvt, pvt->dclr0, 0);
1459 edac_dbg(1, "F3xB0 (Online Spare): 0x%08x\n", pvt->online_spare);
1461 edac_dbg(1, "F1xF0 (DRAM Hole Address): 0x%08x, base: 0x%08x, offset: 0x%08x\n",
1462 pvt->dhar, dhar_base(pvt),
1463 (pvt->fam == 0xf) ? k8_dhar_offset(pvt)
1464 : f10_dhar_offset(pvt));
1466 debug_display_dimm_sizes(pvt, 0);
1468 /* everything below this point is Fam10h and above */
1469 if (pvt->fam == 0xf)
1472 debug_display_dimm_sizes(pvt, 1);
1474 /* Only if NOT ganged does dclr1 have valid info */
1475 if (!dct_ganging_enabled(pvt))
1476 debug_dump_dramcfg_low(pvt, pvt->dclr1, 1);
1479 /* Display and decode various NB registers for debug purposes. */
1480 static void dump_misc_regs(struct amd64_pvt *pvt)
1483 __dump_misc_regs_df(pvt);
1485 __dump_misc_regs(pvt);
1487 edac_dbg(1, " DramHoleValid: %s\n", dhar_valid(pvt) ? "yes" : "no");
1489 amd64_info("using x%u syndromes.\n", pvt->ecc_sym_sz);
1493 * See BKDG, F2x[1,0][5C:40], F2[1,0][6C:60]
1495 static void prep_chip_selects(struct amd64_pvt *pvt)
1497 if (pvt->fam == 0xf && pvt->ext_model < K8_REV_F) {
1498 pvt->csels[0].b_cnt = pvt->csels[1].b_cnt = 8;
1499 pvt->csels[0].m_cnt = pvt->csels[1].m_cnt = 8;
1500 } else if (pvt->fam == 0x15 && pvt->model == 0x30) {
1501 pvt->csels[0].b_cnt = pvt->csels[1].b_cnt = 4;
1502 pvt->csels[0].m_cnt = pvt->csels[1].m_cnt = 2;
1503 } else if (pvt->fam >= 0x17) {
1507 pvt->csels[umc].b_cnt = 4;
1508 pvt->csels[umc].m_cnt = 2;
1512 pvt->csels[0].b_cnt = pvt->csels[1].b_cnt = 8;
1513 pvt->csels[0].m_cnt = pvt->csels[1].m_cnt = 4;
1517 static void read_umc_base_mask(struct amd64_pvt *pvt)
1519 u32 umc_base_reg, umc_base_reg_sec;
1520 u32 umc_mask_reg, umc_mask_reg_sec;
1521 u32 base_reg, base_reg_sec;
1522 u32 mask_reg, mask_reg_sec;
1523 u32 *base, *base_sec;
1524 u32 *mask, *mask_sec;
1528 umc_base_reg = get_umc_base(umc) + UMCCH_BASE_ADDR;
1529 umc_base_reg_sec = get_umc_base(umc) + UMCCH_BASE_ADDR_SEC;
1531 for_each_chip_select(cs, umc, pvt) {
1532 base = &pvt->csels[umc].csbases[cs];
1533 base_sec = &pvt->csels[umc].csbases_sec[cs];
1535 base_reg = umc_base_reg + (cs * 4);
1536 base_reg_sec = umc_base_reg_sec + (cs * 4);
1538 if (!amd_smn_read(pvt->mc_node_id, base_reg, base))
1539 edac_dbg(0, " DCSB%d[%d]=0x%08x reg: 0x%x\n",
1540 umc, cs, *base, base_reg);
1542 if (!amd_smn_read(pvt->mc_node_id, base_reg_sec, base_sec))
1543 edac_dbg(0, " DCSB_SEC%d[%d]=0x%08x reg: 0x%x\n",
1544 umc, cs, *base_sec, base_reg_sec);
1547 umc_mask_reg = get_umc_base(umc) + UMCCH_ADDR_MASK;
1548 umc_mask_reg_sec = get_umc_base(umc) + UMCCH_ADDR_MASK_SEC;
1550 for_each_chip_select_mask(cs, umc, pvt) {
1551 mask = &pvt->csels[umc].csmasks[cs];
1552 mask_sec = &pvt->csels[umc].csmasks_sec[cs];
1554 mask_reg = umc_mask_reg + (cs * 4);
1555 mask_reg_sec = umc_mask_reg_sec + (cs * 4);
1557 if (!amd_smn_read(pvt->mc_node_id, mask_reg, mask))
1558 edac_dbg(0, " DCSM%d[%d]=0x%08x reg: 0x%x\n",
1559 umc, cs, *mask, mask_reg);
1561 if (!amd_smn_read(pvt->mc_node_id, mask_reg_sec, mask_sec))
1562 edac_dbg(0, " DCSM_SEC%d[%d]=0x%08x reg: 0x%x\n",
1563 umc, cs, *mask_sec, mask_reg_sec);
1569 * Function 2 Offset F10_DCSB0; read in the DCS Base and DCS Mask registers
1571 static void read_dct_base_mask(struct amd64_pvt *pvt)
1575 prep_chip_selects(pvt);
1578 return read_umc_base_mask(pvt);
1580 for_each_chip_select(cs, 0, pvt) {
1581 int reg0 = DCSB0 + (cs * 4);
1582 int reg1 = DCSB1 + (cs * 4);
1583 u32 *base0 = &pvt->csels[0].csbases[cs];
1584 u32 *base1 = &pvt->csels[1].csbases[cs];
1586 if (!amd64_read_dct_pci_cfg(pvt, 0, reg0, base0))
1587 edac_dbg(0, " DCSB0[%d]=0x%08x reg: F2x%x\n",
1590 if (pvt->fam == 0xf)
1593 if (!amd64_read_dct_pci_cfg(pvt, 1, reg0, base1))
1594 edac_dbg(0, " DCSB1[%d]=0x%08x reg: F2x%x\n",
1595 cs, *base1, (pvt->fam == 0x10) ? reg1
1599 for_each_chip_select_mask(cs, 0, pvt) {
1600 int reg0 = DCSM0 + (cs * 4);
1601 int reg1 = DCSM1 + (cs * 4);
1602 u32 *mask0 = &pvt->csels[0].csmasks[cs];
1603 u32 *mask1 = &pvt->csels[1].csmasks[cs];
1605 if (!amd64_read_dct_pci_cfg(pvt, 0, reg0, mask0))
1606 edac_dbg(0, " DCSM0[%d]=0x%08x reg: F2x%x\n",
1609 if (pvt->fam == 0xf)
1612 if (!amd64_read_dct_pci_cfg(pvt, 1, reg0, mask1))
1613 edac_dbg(0, " DCSM1[%d]=0x%08x reg: F2x%x\n",
1614 cs, *mask1, (pvt->fam == 0x10) ? reg1
1619 static void determine_memory_type(struct amd64_pvt *pvt)
1621 u32 dram_ctrl, dcsm;
1624 if ((pvt->umc[0].dimm_cfg | pvt->umc[1].dimm_cfg) & BIT(5))
1625 pvt->dram_type = MEM_LRDDR4;
1626 else if ((pvt->umc[0].dimm_cfg | pvt->umc[1].dimm_cfg) & BIT(4))
1627 pvt->dram_type = MEM_RDDR4;
1629 pvt->dram_type = MEM_DDR4;
1635 if (pvt->ext_model >= K8_REV_F)
1638 pvt->dram_type = (pvt->dclr0 & BIT(18)) ? MEM_DDR : MEM_RDDR;
1642 if (pvt->dchr0 & DDR3_MODE)
1645 pvt->dram_type = (pvt->dclr0 & BIT(16)) ? MEM_DDR2 : MEM_RDDR2;
1649 if (pvt->model < 0x60)
1653 * Model 0x60h needs special handling:
1655 * We use a Chip Select value of '0' to obtain dcsm.
1656 * Theoretically, it is possible to populate LRDIMMs of different
1657 * 'Rank' value on a DCT. But this is not the common case. So,
1658 * it's reasonable to assume all DIMMs are going to be of same
1659 * 'type' until proven otherwise.
1661 amd64_read_dct_pci_cfg(pvt, 0, DRAM_CONTROL, &dram_ctrl);
1662 dcsm = pvt->csels[0].csmasks[0];
1664 if (((dram_ctrl >> 8) & 0x7) == 0x2)
1665 pvt->dram_type = MEM_DDR4;
1666 else if (pvt->dclr0 & BIT(16))
1667 pvt->dram_type = MEM_DDR3;
1668 else if (dcsm & 0x3)
1669 pvt->dram_type = MEM_LRDDR3;
1671 pvt->dram_type = MEM_RDDR3;
1679 WARN(1, KERN_ERR "%s: Family??? 0x%x\n", __func__, pvt->fam);
1680 pvt->dram_type = MEM_EMPTY;
1685 pvt->dram_type = (pvt->dclr0 & BIT(16)) ? MEM_DDR3 : MEM_RDDR3;
1688 /* Get the number of DCT channels the memory controller is using. */
1689 static int k8_early_channel_count(struct amd64_pvt *pvt)
1693 if (pvt->ext_model >= K8_REV_F)
1694 /* RevF (NPT) and later */
1695 flag = pvt->dclr0 & WIDTH_128;
1697 /* RevE and earlier */
1698 flag = pvt->dclr0 & REVE_WIDTH_128;
1703 return (flag) ? 2 : 1;
1706 /* On F10h and later ErrAddr is MC4_ADDR[47:1] */
1707 static u64 get_error_address(struct amd64_pvt *pvt, struct mce *m)
1709 u16 mce_nid = topology_die_id(m->extcpu);
1710 struct mem_ctl_info *mci;
1715 mci = edac_mc_find(mce_nid);
1719 pvt = mci->pvt_info;
1721 if (pvt->fam == 0xf) {
1726 addr = m->addr & GENMASK_ULL(end_bit, start_bit);
1729 * Erratum 637 workaround
1731 if (pvt->fam == 0x15) {
1732 u64 cc6_base, tmp_addr;
1736 if ((addr & GENMASK_ULL(47, 24)) >> 24 != 0x00fdf7)
1740 amd64_read_pci_cfg(pvt->F1, DRAM_LOCAL_NODE_LIM, &tmp);
1741 intlv_en = tmp >> 21 & 0x7;
1743 /* add [47:27] + 3 trailing bits */
1744 cc6_base = (tmp & GENMASK_ULL(20, 0)) << 3;
1746 /* reverse and add DramIntlvEn */
1747 cc6_base |= intlv_en ^ 0x7;
1749 /* pin at [47:24] */
1753 return cc6_base | (addr & GENMASK_ULL(23, 0));
1755 amd64_read_pci_cfg(pvt->F1, DRAM_LOCAL_NODE_BASE, &tmp);
1758 tmp_addr = (addr & GENMASK_ULL(23, 12)) << __fls(intlv_en + 1);
1760 /* OR DramIntlvSel into bits [14:12] */
1761 tmp_addr |= (tmp & GENMASK_ULL(23, 21)) >> 9;
1763 /* add remaining [11:0] bits from original MC4_ADDR */
1764 tmp_addr |= addr & GENMASK_ULL(11, 0);
1766 return cc6_base | tmp_addr;
1772 static struct pci_dev *pci_get_related_function(unsigned int vendor,
1773 unsigned int device,
1774 struct pci_dev *related)
1776 struct pci_dev *dev = NULL;
1778 while ((dev = pci_get_device(vendor, device, dev))) {
1779 if (pci_domain_nr(dev->bus) == pci_domain_nr(related->bus) &&
1780 (dev->bus->number == related->bus->number) &&
1781 (PCI_SLOT(dev->devfn) == PCI_SLOT(related->devfn)))
1788 static void read_dram_base_limit_regs(struct amd64_pvt *pvt, unsigned range)
1790 struct amd_northbridge *nb;
1791 struct pci_dev *f1 = NULL;
1792 unsigned int pci_func;
1793 int off = range << 3;
1796 amd64_read_pci_cfg(pvt->F1, DRAM_BASE_LO + off, &pvt->ranges[range].base.lo);
1797 amd64_read_pci_cfg(pvt->F1, DRAM_LIMIT_LO + off, &pvt->ranges[range].lim.lo);
1799 if (pvt->fam == 0xf)
1802 if (!dram_rw(pvt, range))
1805 amd64_read_pci_cfg(pvt->F1, DRAM_BASE_HI + off, &pvt->ranges[range].base.hi);
1806 amd64_read_pci_cfg(pvt->F1, DRAM_LIMIT_HI + off, &pvt->ranges[range].lim.hi);
1808 /* F15h: factor in CC6 save area by reading dst node's limit reg */
1809 if (pvt->fam != 0x15)
1812 nb = node_to_amd_nb(dram_dst_node(pvt, range));
1816 if (pvt->model == 0x60)
1817 pci_func = PCI_DEVICE_ID_AMD_15H_M60H_NB_F1;
1818 else if (pvt->model == 0x30)
1819 pci_func = PCI_DEVICE_ID_AMD_15H_M30H_NB_F1;
1821 pci_func = PCI_DEVICE_ID_AMD_15H_NB_F1;
1823 f1 = pci_get_related_function(nb->misc->vendor, pci_func, nb->misc);
1827 amd64_read_pci_cfg(f1, DRAM_LOCAL_NODE_LIM, &llim);
1829 pvt->ranges[range].lim.lo &= GENMASK_ULL(15, 0);
1831 /* {[39:27],111b} */
1832 pvt->ranges[range].lim.lo |= ((llim & 0x1fff) << 3 | 0x7) << 16;
1834 pvt->ranges[range].lim.hi &= GENMASK_ULL(7, 0);
1837 pvt->ranges[range].lim.hi |= llim >> 13;
1842 static void k8_map_sysaddr_to_csrow(struct mem_ctl_info *mci, u64 sys_addr,
1843 struct err_info *err)
1845 struct amd64_pvt *pvt = mci->pvt_info;
1847 error_address_to_page_and_offset(sys_addr, err);
1850 * Find out which node the error address belongs to. This may be
1851 * different from the node that detected the error.
1853 err->src_mci = find_mc_by_sys_addr(mci, sys_addr);
1854 if (!err->src_mci) {
1855 amd64_mc_err(mci, "failed to map error addr 0x%lx to a node\n",
1856 (unsigned long)sys_addr);
1857 err->err_code = ERR_NODE;
1861 /* Now map the sys_addr to a CSROW */
1862 err->csrow = sys_addr_to_csrow(err->src_mci, sys_addr);
1863 if (err->csrow < 0) {
1864 err->err_code = ERR_CSROW;
1868 /* CHIPKILL enabled */
1869 if (pvt->nbcfg & NBCFG_CHIPKILL) {
1870 err->channel = get_channel_from_ecc_syndrome(mci, err->syndrome);
1871 if (err->channel < 0) {
1873 * Syndrome didn't map, so we don't know which of the
1874 * 2 DIMMs is in error. So we need to ID 'both' of them
1877 amd64_mc_warn(err->src_mci, "unknown syndrome 0x%04x - "
1878 "possible error reporting race\n",
1880 err->err_code = ERR_CHANNEL;
1885 * non-chipkill ecc mode
1887 * The k8 documentation is unclear about how to determine the
1888 * channel number when using non-chipkill memory. This method
1889 * was obtained from email communication with someone at AMD.
1890 * (Wish the email was placed in this comment - norsk)
1892 err->channel = ((sys_addr & BIT(3)) != 0);
1896 static int ddr2_cs_size(unsigned i, bool dct_width)
1902 else if (!(i & 0x1))
1905 shift = (i + 1) >> 1;
1907 return 128 << (shift + !!dct_width);
1910 static int k8_dbam_to_chip_select(struct amd64_pvt *pvt, u8 dct,
1911 unsigned cs_mode, int cs_mask_nr)
1913 u32 dclr = dct ? pvt->dclr1 : pvt->dclr0;
1915 if (pvt->ext_model >= K8_REV_F) {
1916 WARN_ON(cs_mode > 11);
1917 return ddr2_cs_size(cs_mode, dclr & WIDTH_128);
1919 else if (pvt->ext_model >= K8_REV_D) {
1921 WARN_ON(cs_mode > 10);
1924 * the below calculation, besides trying to win an obfuscated C
1925 * contest, maps cs_mode values to DIMM chip select sizes. The
1928 * cs_mode CS size (mb)
1929 * ======= ============
1942 * Basically, it calculates a value with which to shift the
1943 * smallest CS size of 32MB.
1945 * ddr[23]_cs_size have a similar purpose.
1947 diff = cs_mode/3 + (unsigned)(cs_mode > 5);
1949 return 32 << (cs_mode - diff);
1952 WARN_ON(cs_mode > 6);
1953 return 32 << cs_mode;
1958 * Get the number of DCT channels in use.
1961 * number of Memory Channels in operation
1963 * contents of the DCL0_LOW register
1965 static int f1x_early_channel_count(struct amd64_pvt *pvt)
1967 int i, j, channels = 0;
1969 /* On F10h, if we are in 128 bit mode, then we are using 2 channels */
1970 if (pvt->fam == 0x10 && (pvt->dclr0 & WIDTH_128))
1974 * Need to check if in unganged mode: In such, there are 2 channels,
1975 * but they are not in 128 bit mode and thus the above 'dclr0' status
1978 * Need to check DCT0[0] and DCT1[0] to see if only one of them has
1979 * their CSEnable bit on. If so, then SINGLE DIMM case.
1981 edac_dbg(0, "Data width is not 128 bits - need more decoding\n");
1984 * Check DRAM Bank Address Mapping values for each DIMM to see if there
1985 * is more than just one DIMM present in unganged mode. Need to check
1986 * both controllers since DIMMs can be placed in either one.
1988 for (i = 0; i < 2; i++) {
1989 u32 dbam = (i ? pvt->dbam1 : pvt->dbam0);
1991 for (j = 0; j < 4; j++) {
1992 if (DBAM_DIMM(j, dbam) > 0) {
2002 amd64_info("MCT channel count: %d\n", channels);
2007 static int f17_early_channel_count(struct amd64_pvt *pvt)
2009 int i, channels = 0;
2011 /* SDP Control bit 31 (SdpInit) is clear for unused UMC channels */
2013 channels += !!(pvt->umc[i].sdp_ctrl & UMC_SDP_INIT);
2015 amd64_info("MCT channel count: %d\n", channels);
2020 static int ddr3_cs_size(unsigned i, bool dct_width)
2025 if (i == 0 || i == 3 || i == 4)
2031 else if (!(i & 0x1))
2034 shift = (i + 1) >> 1;
2037 cs_size = (128 * (1 << !!dct_width)) << shift;
2042 static int ddr3_lrdimm_cs_size(unsigned i, unsigned rank_multiply)
2047 if (i < 4 || i == 6)
2051 else if (!(i & 0x1))
2054 shift = (i + 1) >> 1;
2057 cs_size = rank_multiply * (128 << shift);
2062 static int ddr4_cs_size(unsigned i)
2071 /* Min cs_size = 1G */
2072 cs_size = 1024 * (1 << (i >> 1));
2077 static int f10_dbam_to_chip_select(struct amd64_pvt *pvt, u8 dct,
2078 unsigned cs_mode, int cs_mask_nr)
2080 u32 dclr = dct ? pvt->dclr1 : pvt->dclr0;
2082 WARN_ON(cs_mode > 11);
2084 if (pvt->dchr0 & DDR3_MODE || pvt->dchr1 & DDR3_MODE)
2085 return ddr3_cs_size(cs_mode, dclr & WIDTH_128);
2087 return ddr2_cs_size(cs_mode, dclr & WIDTH_128);
2091 * F15h supports only 64bit DCT interfaces
2093 static int f15_dbam_to_chip_select(struct amd64_pvt *pvt, u8 dct,
2094 unsigned cs_mode, int cs_mask_nr)
2096 WARN_ON(cs_mode > 12);
2098 return ddr3_cs_size(cs_mode, false);
2101 /* F15h M60h supports DDR4 mapping as well.. */
2102 static int f15_m60h_dbam_to_chip_select(struct amd64_pvt *pvt, u8 dct,
2103 unsigned cs_mode, int cs_mask_nr)
2106 u32 dcsm = pvt->csels[dct].csmasks[cs_mask_nr];
2108 WARN_ON(cs_mode > 12);
2110 if (pvt->dram_type == MEM_DDR4) {
2114 cs_size = ddr4_cs_size(cs_mode);
2115 } else if (pvt->dram_type == MEM_LRDDR3) {
2116 unsigned rank_multiply = dcsm & 0xf;
2118 if (rank_multiply == 3)
2120 cs_size = ddr3_lrdimm_cs_size(cs_mode, rank_multiply);
2122 /* Minimum cs size is 512mb for F15hM60h*/
2126 cs_size = ddr3_cs_size(cs_mode, false);
2133 * F16h and F15h model 30h have only limited cs_modes.
2135 static int f16_dbam_to_chip_select(struct amd64_pvt *pvt, u8 dct,
2136 unsigned cs_mode, int cs_mask_nr)
2138 WARN_ON(cs_mode > 12);
2140 if (cs_mode == 6 || cs_mode == 8 ||
2141 cs_mode == 9 || cs_mode == 12)
2144 return ddr3_cs_size(cs_mode, false);
2147 static int f17_addr_mask_to_cs_size(struct amd64_pvt *pvt, u8 umc,
2148 unsigned int cs_mode, int csrow_nr)
2150 u32 addr_mask_orig, addr_mask_deinterleaved;
2151 u32 msb, weight, num_zero_bits;
2154 /* No Chip Selects are enabled. */
2158 /* Requested size of an even CS but none are enabled. */
2159 if (!(cs_mode & CS_EVEN) && !(csrow_nr & 1))
2162 /* Requested size of an odd CS but none are enabled. */
2163 if (!(cs_mode & CS_ODD) && (csrow_nr & 1))
2167 * There is one mask per DIMM, and two Chip Selects per DIMM.
2168 * CS0 and CS1 -> DIMM0
2169 * CS2 and CS3 -> DIMM1
2171 dimm = csrow_nr >> 1;
2173 /* Asymmetric dual-rank DIMM support. */
2174 if ((csrow_nr & 1) && (cs_mode & CS_ODD_SECONDARY))
2175 addr_mask_orig = pvt->csels[umc].csmasks_sec[dimm];
2177 addr_mask_orig = pvt->csels[umc].csmasks[dimm];
2180 * The number of zero bits in the mask is equal to the number of bits
2181 * in a full mask minus the number of bits in the current mask.
2183 * The MSB is the number of bits in the full mask because BIT[0] is
2186 * In the special 3 Rank interleaving case, a single bit is flipped
2187 * without swapping with the most significant bit. This can be handled
2188 * by keeping the MSB where it is and ignoring the single zero bit.
2190 msb = fls(addr_mask_orig) - 1;
2191 weight = hweight_long(addr_mask_orig);
2192 num_zero_bits = msb - weight - !!(cs_mode & CS_3R_INTERLEAVE);
2194 /* Take the number of zero bits off from the top of the mask. */
2195 addr_mask_deinterleaved = GENMASK_ULL(msb - num_zero_bits, 1);
2197 edac_dbg(1, "CS%d DIMM%d AddrMasks:\n", csrow_nr, dimm);
2198 edac_dbg(1, " Original AddrMask: 0x%x\n", addr_mask_orig);
2199 edac_dbg(1, " Deinterleaved AddrMask: 0x%x\n", addr_mask_deinterleaved);
2201 /* Register [31:1] = Address [39:9]. Size is in kBs here. */
2202 size = (addr_mask_deinterleaved >> 2) + 1;
2204 /* Return size in MBs. */
2208 static void read_dram_ctl_register(struct amd64_pvt *pvt)
2211 if (pvt->fam == 0xf)
2214 if (!amd64_read_pci_cfg(pvt->F2, DCT_SEL_LO, &pvt->dct_sel_lo)) {
2215 edac_dbg(0, "F2x110 (DCTSelLow): 0x%08x, High range addrs at: 0x%x\n",
2216 pvt->dct_sel_lo, dct_sel_baseaddr(pvt));
2218 edac_dbg(0, " DCTs operate in %s mode\n",
2219 (dct_ganging_enabled(pvt) ? "ganged" : "unganged"));
2221 if (!dct_ganging_enabled(pvt))
2222 edac_dbg(0, " Address range split per DCT: %s\n",
2223 (dct_high_range_enabled(pvt) ? "yes" : "no"));
2225 edac_dbg(0, " data interleave for ECC: %s, DRAM cleared since last warm reset: %s\n",
2226 (dct_data_intlv_enabled(pvt) ? "enabled" : "disabled"),
2227 (dct_memory_cleared(pvt) ? "yes" : "no"));
2229 edac_dbg(0, " channel interleave: %s, "
2230 "interleave bits selector: 0x%x\n",
2231 (dct_interleave_enabled(pvt) ? "enabled" : "disabled"),
2232 dct_sel_interleave_addr(pvt));
2235 amd64_read_pci_cfg(pvt->F2, DCT_SEL_HI, &pvt->dct_sel_hi);
2239 * Determine channel (DCT) based on the interleaving mode (see F15h M30h BKDG,
2240 * 2.10.12 Memory Interleaving Modes).
2242 static u8 f15_m30h_determine_channel(struct amd64_pvt *pvt, u64 sys_addr,
2243 u8 intlv_en, int num_dcts_intlv,
2250 return (u8)(dct_sel);
2252 if (num_dcts_intlv == 2) {
2253 select = (sys_addr >> 8) & 0x3;
2254 channel = select ? 0x3 : 0;
2255 } else if (num_dcts_intlv == 4) {
2256 u8 intlv_addr = dct_sel_interleave_addr(pvt);
2257 switch (intlv_addr) {
2259 channel = (sys_addr >> 8) & 0x3;
2262 channel = (sys_addr >> 9) & 0x3;
2270 * Determine channel (DCT) based on the interleaving mode: F10h BKDG, 2.8.9 Memory
2271 * Interleaving Modes.
2273 static u8 f1x_determine_channel(struct amd64_pvt *pvt, u64 sys_addr,
2274 bool hi_range_sel, u8 intlv_en)
2276 u8 dct_sel_high = (pvt->dct_sel_lo >> 1) & 1;
2278 if (dct_ganging_enabled(pvt))
2282 return dct_sel_high;
2285 * see F2x110[DctSelIntLvAddr] - channel interleave mode
2287 if (dct_interleave_enabled(pvt)) {
2288 u8 intlv_addr = dct_sel_interleave_addr(pvt);
2290 /* return DCT select function: 0=DCT0, 1=DCT1 */
2292 return sys_addr >> 6 & 1;
2294 if (intlv_addr & 0x2) {
2295 u8 shift = intlv_addr & 0x1 ? 9 : 6;
2296 u32 temp = hweight_long((u32) ((sys_addr >> 16) & 0x1F)) & 1;
2298 return ((sys_addr >> shift) & 1) ^ temp;
2301 if (intlv_addr & 0x4) {
2302 u8 shift = intlv_addr & 0x1 ? 9 : 8;
2304 return (sys_addr >> shift) & 1;
2307 return (sys_addr >> (12 + hweight8(intlv_en))) & 1;
2310 if (dct_high_range_enabled(pvt))
2311 return ~dct_sel_high & 1;
2316 /* Convert the sys_addr to the normalized DCT address */
2317 static u64 f1x_get_norm_dct_addr(struct amd64_pvt *pvt, u8 range,
2318 u64 sys_addr, bool hi_rng,
2319 u32 dct_sel_base_addr)
2322 u64 dram_base = get_dram_base(pvt, range);
2323 u64 hole_off = f10_dhar_offset(pvt);
2324 u64 dct_sel_base_off = (u64)(pvt->dct_sel_hi & 0xFFFFFC00) << 16;
2329 * base address of high range is below 4Gb
2330 * (bits [47:27] at [31:11])
2331 * DRAM address space on this DCT is hoisted above 4Gb &&
2334 * remove hole offset from sys_addr
2336 * remove high range offset from sys_addr
2338 if ((!(dct_sel_base_addr >> 16) ||
2339 dct_sel_base_addr < dhar_base(pvt)) &&
2341 (sys_addr >= BIT_64(32)))
2342 chan_off = hole_off;
2344 chan_off = dct_sel_base_off;
2348 * we have a valid hole &&
2353 * remove dram base to normalize to DCT address
2355 if (dhar_valid(pvt) && (sys_addr >= BIT_64(32)))
2356 chan_off = hole_off;
2358 chan_off = dram_base;
2361 return (sys_addr & GENMASK_ULL(47,6)) - (chan_off & GENMASK_ULL(47,23));
2365 * checks if the csrow passed in is marked as SPARED, if so returns the new
2368 static int f10_process_possible_spare(struct amd64_pvt *pvt, u8 dct, int csrow)
2372 if (online_spare_swap_done(pvt, dct) &&
2373 csrow == online_spare_bad_dramcs(pvt, dct)) {
2375 for_each_chip_select(tmp_cs, dct, pvt) {
2376 if (chip_select_base(tmp_cs, dct, pvt) & 0x2) {
2386 * Iterate over the DRAM DCT "base" and "mask" registers looking for a
2387 * SystemAddr match on the specified 'ChannelSelect' and 'NodeID'
2390 * -EINVAL: NOT FOUND
2391 * 0..csrow = Chip-Select Row
2393 static int f1x_lookup_addr_in_dct(u64 in_addr, u8 nid, u8 dct)
2395 struct mem_ctl_info *mci;
2396 struct amd64_pvt *pvt;
2397 u64 cs_base, cs_mask;
2398 int cs_found = -EINVAL;
2401 mci = edac_mc_find(nid);
2405 pvt = mci->pvt_info;
2407 edac_dbg(1, "input addr: 0x%llx, DCT: %d\n", in_addr, dct);
2409 for_each_chip_select(csrow, dct, pvt) {
2410 if (!csrow_enabled(csrow, dct, pvt))
2413 get_cs_base_and_mask(pvt, csrow, dct, &cs_base, &cs_mask);
2415 edac_dbg(1, " CSROW=%d CSBase=0x%llx CSMask=0x%llx\n",
2416 csrow, cs_base, cs_mask);
2420 edac_dbg(1, " (InputAddr & ~CSMask)=0x%llx (CSBase & ~CSMask)=0x%llx\n",
2421 (in_addr & cs_mask), (cs_base & cs_mask));
2423 if ((in_addr & cs_mask) == (cs_base & cs_mask)) {
2424 if (pvt->fam == 0x15 && pvt->model >= 0x30) {
2428 cs_found = f10_process_possible_spare(pvt, dct, csrow);
2430 edac_dbg(1, " MATCH csrow=%d\n", cs_found);
2438 * See F2x10C. Non-interleaved graphics framebuffer memory under the 16G is
2439 * swapped with a region located at the bottom of memory so that the GPU can use
2440 * the interleaved region and thus two channels.
2442 static u64 f1x_swap_interleaved_region(struct amd64_pvt *pvt, u64 sys_addr)
2444 u32 swap_reg, swap_base, swap_limit, rgn_size, tmp_addr;
2446 if (pvt->fam == 0x10) {
2447 /* only revC3 and revE have that feature */
2448 if (pvt->model < 4 || (pvt->model < 0xa && pvt->stepping < 3))
2452 amd64_read_pci_cfg(pvt->F2, SWAP_INTLV_REG, &swap_reg);
2454 if (!(swap_reg & 0x1))
2457 swap_base = (swap_reg >> 3) & 0x7f;
2458 swap_limit = (swap_reg >> 11) & 0x7f;
2459 rgn_size = (swap_reg >> 20) & 0x7f;
2460 tmp_addr = sys_addr >> 27;
2462 if (!(sys_addr >> 34) &&
2463 (((tmp_addr >= swap_base) &&
2464 (tmp_addr <= swap_limit)) ||
2465 (tmp_addr < rgn_size)))
2466 return sys_addr ^ (u64)swap_base << 27;
2471 /* For a given @dram_range, check if @sys_addr falls within it. */
2472 static int f1x_match_to_this_node(struct amd64_pvt *pvt, unsigned range,
2473 u64 sys_addr, int *chan_sel)
2475 int cs_found = -EINVAL;
2479 bool high_range = false;
2481 u8 node_id = dram_dst_node(pvt, range);
2482 u8 intlv_en = dram_intlv_en(pvt, range);
2483 u32 intlv_sel = dram_intlv_sel(pvt, range);
2485 edac_dbg(1, "(range %d) SystemAddr= 0x%llx Limit=0x%llx\n",
2486 range, sys_addr, get_dram_limit(pvt, range));
2488 if (dhar_valid(pvt) &&
2489 dhar_base(pvt) <= sys_addr &&
2490 sys_addr < BIT_64(32)) {
2491 amd64_warn("Huh? Address is in the MMIO hole: 0x%016llx\n",
2496 if (intlv_en && (intlv_sel != ((sys_addr >> 12) & intlv_en)))
2499 sys_addr = f1x_swap_interleaved_region(pvt, sys_addr);
2501 dct_sel_base = dct_sel_baseaddr(pvt);
2504 * check whether addresses >= DctSelBaseAddr[47:27] are to be used to
2505 * select between DCT0 and DCT1.
2507 if (dct_high_range_enabled(pvt) &&
2508 !dct_ganging_enabled(pvt) &&
2509 ((sys_addr >> 27) >= (dct_sel_base >> 11)))
2512 channel = f1x_determine_channel(pvt, sys_addr, high_range, intlv_en);
2514 chan_addr = f1x_get_norm_dct_addr(pvt, range, sys_addr,
2515 high_range, dct_sel_base);
2517 /* Remove node interleaving, see F1x120 */
2519 chan_addr = ((chan_addr >> (12 + hweight8(intlv_en))) << 12) |
2520 (chan_addr & 0xfff);
2522 /* remove channel interleave */
2523 if (dct_interleave_enabled(pvt) &&
2524 !dct_high_range_enabled(pvt) &&
2525 !dct_ganging_enabled(pvt)) {
2527 if (dct_sel_interleave_addr(pvt) != 1) {
2528 if (dct_sel_interleave_addr(pvt) == 0x3)
2530 chan_addr = ((chan_addr >> 10) << 9) |
2531 (chan_addr & 0x1ff);
2533 /* A[6] or hash 6 */
2534 chan_addr = ((chan_addr >> 7) << 6) |
2538 chan_addr = ((chan_addr >> 13) << 12) |
2539 (chan_addr & 0xfff);
2542 edac_dbg(1, " Normalized DCT addr: 0x%llx\n", chan_addr);
2544 cs_found = f1x_lookup_addr_in_dct(chan_addr, node_id, channel);
2547 *chan_sel = channel;
2552 static int f15_m30h_match_to_this_node(struct amd64_pvt *pvt, unsigned range,
2553 u64 sys_addr, int *chan_sel)
2555 int cs_found = -EINVAL;
2556 int num_dcts_intlv = 0;
2557 u64 chan_addr, chan_offset;
2558 u64 dct_base, dct_limit;
2559 u32 dct_cont_base_reg, dct_cont_limit_reg, tmp;
2560 u8 channel, alias_channel, leg_mmio_hole, dct_sel, dct_offset_en;
2562 u64 dhar_offset = f10_dhar_offset(pvt);
2563 u8 intlv_addr = dct_sel_interleave_addr(pvt);
2564 u8 node_id = dram_dst_node(pvt, range);
2565 u8 intlv_en = dram_intlv_en(pvt, range);
2567 amd64_read_pci_cfg(pvt->F1, DRAM_CONT_BASE, &dct_cont_base_reg);
2568 amd64_read_pci_cfg(pvt->F1, DRAM_CONT_LIMIT, &dct_cont_limit_reg);
2570 dct_offset_en = (u8) ((dct_cont_base_reg >> 3) & BIT(0));
2571 dct_sel = (u8) ((dct_cont_base_reg >> 4) & 0x7);
2573 edac_dbg(1, "(range %d) SystemAddr= 0x%llx Limit=0x%llx\n",
2574 range, sys_addr, get_dram_limit(pvt, range));
2576 if (!(get_dram_base(pvt, range) <= sys_addr) &&
2577 !(get_dram_limit(pvt, range) >= sys_addr))
2580 if (dhar_valid(pvt) &&
2581 dhar_base(pvt) <= sys_addr &&
2582 sys_addr < BIT_64(32)) {
2583 amd64_warn("Huh? Address is in the MMIO hole: 0x%016llx\n",
2588 /* Verify sys_addr is within DCT Range. */
2589 dct_base = (u64) dct_sel_baseaddr(pvt);
2590 dct_limit = (dct_cont_limit_reg >> 11) & 0x1FFF;
2592 if (!(dct_cont_base_reg & BIT(0)) &&
2593 !(dct_base <= (sys_addr >> 27) &&
2594 dct_limit >= (sys_addr >> 27)))
2597 /* Verify number of dct's that participate in channel interleaving. */
2598 num_dcts_intlv = (int) hweight8(intlv_en);
2600 if (!(num_dcts_intlv % 2 == 0) || (num_dcts_intlv > 4))
2603 if (pvt->model >= 0x60)
2604 channel = f1x_determine_channel(pvt, sys_addr, false, intlv_en);
2606 channel = f15_m30h_determine_channel(pvt, sys_addr, intlv_en,
2607 num_dcts_intlv, dct_sel);
2609 /* Verify we stay within the MAX number of channels allowed */
2613 leg_mmio_hole = (u8) (dct_cont_base_reg >> 1 & BIT(0));
2615 /* Get normalized DCT addr */
2616 if (leg_mmio_hole && (sys_addr >= BIT_64(32)))
2617 chan_offset = dhar_offset;
2619 chan_offset = dct_base << 27;
2621 chan_addr = sys_addr - chan_offset;
2623 /* remove channel interleave */
2624 if (num_dcts_intlv == 2) {
2625 if (intlv_addr == 0x4)
2626 chan_addr = ((chan_addr >> 9) << 8) |
2628 else if (intlv_addr == 0x5)
2629 chan_addr = ((chan_addr >> 10) << 9) |
2630 (chan_addr & 0x1ff);
2634 } else if (num_dcts_intlv == 4) {
2635 if (intlv_addr == 0x4)
2636 chan_addr = ((chan_addr >> 10) << 8) |
2638 else if (intlv_addr == 0x5)
2639 chan_addr = ((chan_addr >> 11) << 9) |
2640 (chan_addr & 0x1ff);
2645 if (dct_offset_en) {
2646 amd64_read_pci_cfg(pvt->F1,
2647 DRAM_CONT_HIGH_OFF + (int) channel * 4,
2649 chan_addr += (u64) ((tmp >> 11) & 0xfff) << 27;
2652 f15h_select_dct(pvt, channel);
2654 edac_dbg(1, " Normalized DCT addr: 0x%llx\n", chan_addr);
2658 * if channel = 3, then alias it to 1. This is because, in F15 M30h,
2659 * there is support for 4 DCT's, but only 2 are currently functional.
2660 * They are DCT0 and DCT3. But we have read all registers of DCT3 into
2661 * pvt->csels[1]. So we need to use '1' here to get correct info.
2662 * Refer F15 M30h BKDG Section 2.10 and 2.10.3 for clarifications.
2664 alias_channel = (channel == 3) ? 1 : channel;
2666 cs_found = f1x_lookup_addr_in_dct(chan_addr, node_id, alias_channel);
2669 *chan_sel = alias_channel;
2674 static int f1x_translate_sysaddr_to_cs(struct amd64_pvt *pvt,
2678 int cs_found = -EINVAL;
2681 for (range = 0; range < DRAM_RANGES; range++) {
2682 if (!dram_rw(pvt, range))
2685 if (pvt->fam == 0x15 && pvt->model >= 0x30)
2686 cs_found = f15_m30h_match_to_this_node(pvt, range,
2690 else if ((get_dram_base(pvt, range) <= sys_addr) &&
2691 (get_dram_limit(pvt, range) >= sys_addr)) {
2692 cs_found = f1x_match_to_this_node(pvt, range,
2693 sys_addr, chan_sel);
2702 * For reference see "2.8.5 Routing DRAM Requests" in F10 BKDG. This code maps
2703 * a @sys_addr to NodeID, DCT (channel) and chip select (CSROW).
2705 * The @sys_addr is usually an error address received from the hardware
2708 static void f1x_map_sysaddr_to_csrow(struct mem_ctl_info *mci, u64 sys_addr,
2709 struct err_info *err)
2711 struct amd64_pvt *pvt = mci->pvt_info;
2713 error_address_to_page_and_offset(sys_addr, err);
2715 err->csrow = f1x_translate_sysaddr_to_cs(pvt, sys_addr, &err->channel);
2716 if (err->csrow < 0) {
2717 err->err_code = ERR_CSROW;
2722 * We need the syndromes for channel detection only when we're
2723 * ganged. Otherwise @chan should already contain the channel at
2726 if (dct_ganging_enabled(pvt))
2727 err->channel = get_channel_from_ecc_syndrome(mci, err->syndrome);
2731 * debug routine to display the memory sizes of all logical DIMMs and its
2734 static void debug_display_dimm_sizes(struct amd64_pvt *pvt, u8 ctrl)
2736 int dimm, size0, size1;
2737 u32 *dcsb = ctrl ? pvt->csels[1].csbases : pvt->csels[0].csbases;
2738 u32 dbam = ctrl ? pvt->dbam1 : pvt->dbam0;
2740 if (pvt->fam == 0xf) {
2741 /* K8 families < revF not supported yet */
2742 if (pvt->ext_model < K8_REV_F)
2748 if (pvt->fam == 0x10) {
2749 dbam = (ctrl && !dct_ganging_enabled(pvt)) ? pvt->dbam1
2751 dcsb = (ctrl && !dct_ganging_enabled(pvt)) ?
2752 pvt->csels[1].csbases :
2753 pvt->csels[0].csbases;
2756 dcsb = pvt->csels[1].csbases;
2758 edac_dbg(1, "F2x%d80 (DRAM Bank Address Mapping): 0x%08x\n",
2761 edac_printk(KERN_DEBUG, EDAC_MC, "DCT%d chip selects:\n", ctrl);
2763 /* Dump memory sizes for DIMM and its CSROWs */
2764 for (dimm = 0; dimm < 4; dimm++) {
2767 if (dcsb[dimm*2] & DCSB_CS_ENABLE)
2769 * For F15m60h, we need multiplier for LRDIMM cs_size
2770 * calculation. We pass dimm value to the dbam_to_cs
2771 * mapper so we can find the multiplier from the
2772 * corresponding DCSM.
2774 size0 = pvt->ops->dbam_to_cs(pvt, ctrl,
2775 DBAM_DIMM(dimm, dbam),
2779 if (dcsb[dimm*2 + 1] & DCSB_CS_ENABLE)
2780 size1 = pvt->ops->dbam_to_cs(pvt, ctrl,
2781 DBAM_DIMM(dimm, dbam),
2784 amd64_info(EDAC_MC ": %d: %5dMB %d: %5dMB\n",
2786 dimm * 2 + 1, size1);
2790 static struct amd64_family_type family_types[] = {
2793 .f1_id = PCI_DEVICE_ID_AMD_K8_NB_ADDRMAP,
2794 .f2_id = PCI_DEVICE_ID_AMD_K8_NB_MEMCTL,
2797 .early_channel_count = k8_early_channel_count,
2798 .map_sysaddr_to_csrow = k8_map_sysaddr_to_csrow,
2799 .dbam_to_cs = k8_dbam_to_chip_select,
2804 .f1_id = PCI_DEVICE_ID_AMD_10H_NB_MAP,
2805 .f2_id = PCI_DEVICE_ID_AMD_10H_NB_DRAM,
2808 .early_channel_count = f1x_early_channel_count,
2809 .map_sysaddr_to_csrow = f1x_map_sysaddr_to_csrow,
2810 .dbam_to_cs = f10_dbam_to_chip_select,
2815 .f1_id = PCI_DEVICE_ID_AMD_15H_NB_F1,
2816 .f2_id = PCI_DEVICE_ID_AMD_15H_NB_F2,
2819 .early_channel_count = f1x_early_channel_count,
2820 .map_sysaddr_to_csrow = f1x_map_sysaddr_to_csrow,
2821 .dbam_to_cs = f15_dbam_to_chip_select,
2825 .ctl_name = "F15h_M30h",
2826 .f1_id = PCI_DEVICE_ID_AMD_15H_M30H_NB_F1,
2827 .f2_id = PCI_DEVICE_ID_AMD_15H_M30H_NB_F2,
2830 .early_channel_count = f1x_early_channel_count,
2831 .map_sysaddr_to_csrow = f1x_map_sysaddr_to_csrow,
2832 .dbam_to_cs = f16_dbam_to_chip_select,
2836 .ctl_name = "F15h_M60h",
2837 .f1_id = PCI_DEVICE_ID_AMD_15H_M60H_NB_F1,
2838 .f2_id = PCI_DEVICE_ID_AMD_15H_M60H_NB_F2,
2841 .early_channel_count = f1x_early_channel_count,
2842 .map_sysaddr_to_csrow = f1x_map_sysaddr_to_csrow,
2843 .dbam_to_cs = f15_m60h_dbam_to_chip_select,
2848 .f1_id = PCI_DEVICE_ID_AMD_16H_NB_F1,
2849 .f2_id = PCI_DEVICE_ID_AMD_16H_NB_F2,
2852 .early_channel_count = f1x_early_channel_count,
2853 .map_sysaddr_to_csrow = f1x_map_sysaddr_to_csrow,
2854 .dbam_to_cs = f16_dbam_to_chip_select,
2858 .ctl_name = "F16h_M30h",
2859 .f1_id = PCI_DEVICE_ID_AMD_16H_M30H_NB_F1,
2860 .f2_id = PCI_DEVICE_ID_AMD_16H_M30H_NB_F2,
2863 .early_channel_count = f1x_early_channel_count,
2864 .map_sysaddr_to_csrow = f1x_map_sysaddr_to_csrow,
2865 .dbam_to_cs = f16_dbam_to_chip_select,
2870 .f0_id = PCI_DEVICE_ID_AMD_17H_DF_F0,
2871 .f6_id = PCI_DEVICE_ID_AMD_17H_DF_F6,
2874 .early_channel_count = f17_early_channel_count,
2875 .dbam_to_cs = f17_addr_mask_to_cs_size,
2879 .ctl_name = "F17h_M10h",
2880 .f0_id = PCI_DEVICE_ID_AMD_17H_M10H_DF_F0,
2881 .f6_id = PCI_DEVICE_ID_AMD_17H_M10H_DF_F6,
2884 .early_channel_count = f17_early_channel_count,
2885 .dbam_to_cs = f17_addr_mask_to_cs_size,
2889 .ctl_name = "F17h_M30h",
2890 .f0_id = PCI_DEVICE_ID_AMD_17H_M30H_DF_F0,
2891 .f6_id = PCI_DEVICE_ID_AMD_17H_M30H_DF_F6,
2894 .early_channel_count = f17_early_channel_count,
2895 .dbam_to_cs = f17_addr_mask_to_cs_size,
2899 .ctl_name = "F17h_M60h",
2900 .f0_id = PCI_DEVICE_ID_AMD_17H_M60H_DF_F0,
2901 .f6_id = PCI_DEVICE_ID_AMD_17H_M60H_DF_F6,
2904 .early_channel_count = f17_early_channel_count,
2905 .dbam_to_cs = f17_addr_mask_to_cs_size,
2909 .ctl_name = "F17h_M70h",
2910 .f0_id = PCI_DEVICE_ID_AMD_17H_M70H_DF_F0,
2911 .f6_id = PCI_DEVICE_ID_AMD_17H_M70H_DF_F6,
2914 .early_channel_count = f17_early_channel_count,
2915 .dbam_to_cs = f17_addr_mask_to_cs_size,
2920 .f0_id = PCI_DEVICE_ID_AMD_19H_DF_F0,
2921 .f6_id = PCI_DEVICE_ID_AMD_19H_DF_F6,
2924 .early_channel_count = f17_early_channel_count,
2925 .dbam_to_cs = f17_addr_mask_to_cs_size,
2929 .ctl_name = "F19h_M10h",
2930 .f0_id = PCI_DEVICE_ID_AMD_19H_M10H_DF_F0,
2931 .f6_id = PCI_DEVICE_ID_AMD_19H_M10H_DF_F6,
2934 .early_channel_count = f17_early_channel_count,
2935 .dbam_to_cs = f17_addr_mask_to_cs_size,
2939 .ctl_name = "F19h_M50h",
2940 .f0_id = PCI_DEVICE_ID_AMD_19H_M50H_DF_F0,
2941 .f6_id = PCI_DEVICE_ID_AMD_19H_M50H_DF_F6,
2944 .early_channel_count = f17_early_channel_count,
2945 .dbam_to_cs = f17_addr_mask_to_cs_size,
2951 * These are tables of eigenvectors (one per line) which can be used for the
2952 * construction of the syndrome tables. The modified syndrome search algorithm
2953 * uses those to find the symbol in error and thus the DIMM.
2955 * Algorithm courtesy of Ross LaFetra from AMD.
2957 static const u16 x4_vectors[] = {
2958 0x2f57, 0x1afe, 0x66cc, 0xdd88,
2959 0x11eb, 0x3396, 0x7f4c, 0xeac8,
2960 0x0001, 0x0002, 0x0004, 0x0008,
2961 0x1013, 0x3032, 0x4044, 0x8088,
2962 0x106b, 0x30d6, 0x70fc, 0xe0a8,
2963 0x4857, 0xc4fe, 0x13cc, 0x3288,
2964 0x1ac5, 0x2f4a, 0x5394, 0xa1e8,
2965 0x1f39, 0x251e, 0xbd6c, 0x6bd8,
2966 0x15c1, 0x2a42, 0x89ac, 0x4758,
2967 0x2b03, 0x1602, 0x4f0c, 0xca08,
2968 0x1f07, 0x3a0e, 0x6b04, 0xbd08,
2969 0x8ba7, 0x465e, 0x244c, 0x1cc8,
2970 0x2b87, 0x164e, 0x642c, 0xdc18,
2971 0x40b9, 0x80de, 0x1094, 0x20e8,
2972 0x27db, 0x1eb6, 0x9dac, 0x7b58,
2973 0x11c1, 0x2242, 0x84ac, 0x4c58,
2974 0x1be5, 0x2d7a, 0x5e34, 0xa718,
2975 0x4b39, 0x8d1e, 0x14b4, 0x28d8,
2976 0x4c97, 0xc87e, 0x11fc, 0x33a8,
2977 0x8e97, 0x497e, 0x2ffc, 0x1aa8,
2978 0x16b3, 0x3d62, 0x4f34, 0x8518,
2979 0x1e2f, 0x391a, 0x5cac, 0xf858,
2980 0x1d9f, 0x3b7a, 0x572c, 0xfe18,
2981 0x15f5, 0x2a5a, 0x5264, 0xa3b8,
2982 0x1dbb, 0x3b66, 0x715c, 0xe3f8,
2983 0x4397, 0xc27e, 0x17fc, 0x3ea8,
2984 0x1617, 0x3d3e, 0x6464, 0xb8b8,
2985 0x23ff, 0x12aa, 0xab6c, 0x56d8,
2986 0x2dfb, 0x1ba6, 0x913c, 0x7328,
2987 0x185d, 0x2ca6, 0x7914, 0x9e28,
2988 0x171b, 0x3e36, 0x7d7c, 0xebe8,
2989 0x4199, 0x82ee, 0x19f4, 0x2e58,
2990 0x4807, 0xc40e, 0x130c, 0x3208,
2991 0x1905, 0x2e0a, 0x5804, 0xac08,
2992 0x213f, 0x132a, 0xadfc, 0x5ba8,
2993 0x19a9, 0x2efe, 0xb5cc, 0x6f88,
2996 static const u16 x8_vectors[] = {
2997 0x0145, 0x028a, 0x2374, 0x43c8, 0xa1f0, 0x0520, 0x0a40, 0x1480,
2998 0x0211, 0x0422, 0x0844, 0x1088, 0x01b0, 0x44e0, 0x23c0, 0xed80,
2999 0x1011, 0x0116, 0x022c, 0x0458, 0x08b0, 0x8c60, 0x2740, 0x4e80,
3000 0x0411, 0x0822, 0x1044, 0x0158, 0x02b0, 0x2360, 0x46c0, 0xab80,
3001 0x0811, 0x1022, 0x012c, 0x0258, 0x04b0, 0x4660, 0x8cc0, 0x2780,
3002 0x2071, 0x40e2, 0xa0c4, 0x0108, 0x0210, 0x0420, 0x0840, 0x1080,
3003 0x4071, 0x80e2, 0x0104, 0x0208, 0x0410, 0x0820, 0x1040, 0x2080,
3004 0x8071, 0x0102, 0x0204, 0x0408, 0x0810, 0x1020, 0x2040, 0x4080,
3005 0x019d, 0x03d6, 0x136c, 0x2198, 0x50b0, 0xb2e0, 0x0740, 0x0e80,
3006 0x0189, 0x03ea, 0x072c, 0x0e58, 0x1cb0, 0x56e0, 0x37c0, 0xf580,
3007 0x01fd, 0x0376, 0x06ec, 0x0bb8, 0x1110, 0x2220, 0x4440, 0x8880,
3008 0x0163, 0x02c6, 0x1104, 0x0758, 0x0eb0, 0x2be0, 0x6140, 0xc280,
3009 0x02fd, 0x01c6, 0x0b5c, 0x1108, 0x07b0, 0x25a0, 0x8840, 0x6180,
3010 0x0801, 0x012e, 0x025c, 0x04b8, 0x1370, 0x26e0, 0x57c0, 0xb580,
3011 0x0401, 0x0802, 0x015c, 0x02b8, 0x22b0, 0x13e0, 0x7140, 0xe280,
3012 0x0201, 0x0402, 0x0804, 0x01b8, 0x11b0, 0x31a0, 0x8040, 0x7180,
3013 0x0101, 0x0202, 0x0404, 0x0808, 0x1010, 0x2020, 0x4040, 0x8080,
3014 0x0001, 0x0002, 0x0004, 0x0008, 0x0010, 0x0020, 0x0040, 0x0080,
3015 0x0100, 0x0200, 0x0400, 0x0800, 0x1000, 0x2000, 0x4000, 0x8000,
3018 static int decode_syndrome(u16 syndrome, const u16 *vectors, unsigned num_vecs,
3021 unsigned int i, err_sym;
3023 for (err_sym = 0; err_sym < num_vecs / v_dim; err_sym++) {
3025 unsigned v_idx = err_sym * v_dim;
3026 unsigned v_end = (err_sym + 1) * v_dim;
3028 /* walk over all 16 bits of the syndrome */
3029 for (i = 1; i < (1U << 16); i <<= 1) {
3031 /* if bit is set in that eigenvector... */
3032 if (v_idx < v_end && vectors[v_idx] & i) {
3033 u16 ev_comp = vectors[v_idx++];
3035 /* ... and bit set in the modified syndrome, */
3045 /* can't get to zero, move to next symbol */
3050 edac_dbg(0, "syndrome(%x) not found\n", syndrome);
3054 static int map_err_sym_to_channel(int err_sym, int sym_size)
3065 return err_sym >> 4;
3070 /* imaginary bits not in a DIMM */
3072 WARN(1, KERN_ERR "Invalid error symbol: 0x%x\n",
3080 return err_sym >> 3;
3085 static int get_channel_from_ecc_syndrome(struct mem_ctl_info *mci, u16 syndrome)
3087 struct amd64_pvt *pvt = mci->pvt_info;
3090 if (pvt->ecc_sym_sz == 8)
3091 err_sym = decode_syndrome(syndrome, x8_vectors,
3092 ARRAY_SIZE(x8_vectors),
3094 else if (pvt->ecc_sym_sz == 4)
3095 err_sym = decode_syndrome(syndrome, x4_vectors,
3096 ARRAY_SIZE(x4_vectors),
3099 amd64_warn("Illegal syndrome type: %u\n", pvt->ecc_sym_sz);
3103 return map_err_sym_to_channel(err_sym, pvt->ecc_sym_sz);
3106 static void __log_ecc_error(struct mem_ctl_info *mci, struct err_info *err,
3109 enum hw_event_mc_err_type err_type;
3113 err_type = HW_EVENT_ERR_CORRECTED;
3114 else if (ecc_type == 1)
3115 err_type = HW_EVENT_ERR_UNCORRECTED;
3116 else if (ecc_type == 3)
3117 err_type = HW_EVENT_ERR_DEFERRED;
3119 WARN(1, "Something is rotten in the state of Denmark.\n");
3123 switch (err->err_code) {
3128 string = "Failed to map error addr to a node";
3131 string = "Failed to map error addr to a csrow";
3134 string = "Unknown syndrome - possible error reporting race";
3137 string = "MCA_SYND not valid - unknown syndrome and csrow";
3140 string = "Cannot decode normalized address";
3143 string = "WTF error";
3147 edac_mc_handle_error(err_type, mci, 1,
3148 err->page, err->offset, err->syndrome,
3149 err->csrow, err->channel, -1,
3153 static inline void decode_bus_error(int node_id, struct mce *m)
3155 struct mem_ctl_info *mci;
3156 struct amd64_pvt *pvt;
3157 u8 ecc_type = (m->status >> 45) & 0x3;
3158 u8 xec = XEC(m->status, 0x1f);
3159 u16 ec = EC(m->status);
3161 struct err_info err;
3163 mci = edac_mc_find(node_id);
3167 pvt = mci->pvt_info;
3169 /* Bail out early if this was an 'observed' error */
3170 if (PP(ec) == NBSL_PP_OBS)
3173 /* Do only ECC errors */
3174 if (xec && xec != F10_NBSL_EXT_ERR_ECC)
3177 memset(&err, 0, sizeof(err));
3179 sys_addr = get_error_address(pvt, m);
3182 err.syndrome = extract_syndrome(m->status);
3184 pvt->ops->map_sysaddr_to_csrow(mci, sys_addr, &err);
3186 __log_ecc_error(mci, &err, ecc_type);
3190 * To find the UMC channel represented by this bank we need to match on its
3191 * instance_id. The instance_id of a bank is held in the lower 32 bits of its
3194 * Currently, we can derive the channel number by looking at the 6th nibble in
3195 * the instance_id. For example, instance_id=0xYXXXXX where Y is the channel
3198 static int find_umc_channel(struct mce *m)
3200 return (m->ipid & GENMASK(31, 0)) >> 20;
3203 static void decode_umc_error(int node_id, struct mce *m)
3205 u8 ecc_type = (m->status >> 45) & 0x3;
3206 struct mem_ctl_info *mci;
3207 struct amd64_pvt *pvt;
3208 struct err_info err;
3211 mci = edac_mc_find(node_id);
3215 pvt = mci->pvt_info;
3217 memset(&err, 0, sizeof(err));
3219 if (m->status & MCI_STATUS_DEFERRED)
3222 err.channel = find_umc_channel(m);
3224 if (!(m->status & MCI_STATUS_SYNDV)) {
3225 err.err_code = ERR_SYND;
3229 if (ecc_type == 2) {
3230 u8 length = (m->synd >> 18) & 0x3f;
3233 err.syndrome = (m->synd >> 32) & GENMASK(length - 1, 0);
3235 err.err_code = ERR_CHANNEL;
3238 err.csrow = m->synd & 0x7;
3240 if (umc_normaddr_to_sysaddr(m->addr, pvt->mc_node_id, err.channel, &sys_addr)) {
3241 err.err_code = ERR_NORM_ADDR;
3245 error_address_to_page_and_offset(sys_addr, &err);
3248 __log_ecc_error(mci, &err, ecc_type);
3252 * Use pvt->F3 which contains the F3 CPU PCI device to get the related
3253 * F1 (AddrMap) and F2 (Dct) devices. Return negative value on error.
3254 * Reserve F0 and F6 on systems with a UMC.
3257 reserve_mc_sibling_devs(struct amd64_pvt *pvt, u16 pci_id1, u16 pci_id2)
3260 pvt->F0 = pci_get_related_function(pvt->F3->vendor, pci_id1, pvt->F3);
3262 edac_dbg(1, "F0 not found, device 0x%x\n", pci_id1);
3266 pvt->F6 = pci_get_related_function(pvt->F3->vendor, pci_id2, pvt->F3);
3268 pci_dev_put(pvt->F0);
3271 edac_dbg(1, "F6 not found: device 0x%x\n", pci_id2);
3276 pci_ctl_dev = &pvt->F0->dev;
3278 edac_dbg(1, "F0: %s\n", pci_name(pvt->F0));
3279 edac_dbg(1, "F3: %s\n", pci_name(pvt->F3));
3280 edac_dbg(1, "F6: %s\n", pci_name(pvt->F6));
3285 /* Reserve the ADDRESS MAP Device */
3286 pvt->F1 = pci_get_related_function(pvt->F3->vendor, pci_id1, pvt->F3);
3288 edac_dbg(1, "F1 not found: device 0x%x\n", pci_id1);
3292 /* Reserve the DCT Device */
3293 pvt->F2 = pci_get_related_function(pvt->F3->vendor, pci_id2, pvt->F3);
3295 pci_dev_put(pvt->F1);
3298 edac_dbg(1, "F2 not found: device 0x%x\n", pci_id2);
3303 pci_ctl_dev = &pvt->F2->dev;
3305 edac_dbg(1, "F1: %s\n", pci_name(pvt->F1));
3306 edac_dbg(1, "F2: %s\n", pci_name(pvt->F2));
3307 edac_dbg(1, "F3: %s\n", pci_name(pvt->F3));
3312 static void free_mc_sibling_devs(struct amd64_pvt *pvt)
3315 pci_dev_put(pvt->F0);
3316 pci_dev_put(pvt->F6);
3318 pci_dev_put(pvt->F1);
3319 pci_dev_put(pvt->F2);
3323 static void determine_ecc_sym_sz(struct amd64_pvt *pvt)
3325 pvt->ecc_sym_sz = 4;
3331 /* Check enabled channels only: */
3332 if (pvt->umc[i].sdp_ctrl & UMC_SDP_INIT) {
3333 if (pvt->umc[i].ecc_ctrl & BIT(9)) {
3334 pvt->ecc_sym_sz = 16;
3336 } else if (pvt->umc[i].ecc_ctrl & BIT(7)) {
3337 pvt->ecc_sym_sz = 8;
3342 } else if (pvt->fam >= 0x10) {
3345 amd64_read_pci_cfg(pvt->F3, EXT_NB_MCA_CFG, &tmp);
3346 /* F16h has only DCT0, so no need to read dbam1. */
3347 if (pvt->fam != 0x16)
3348 amd64_read_dct_pci_cfg(pvt, 1, DBAM0, &pvt->dbam1);
3350 /* F10h, revD and later can do x8 ECC too. */
3351 if ((pvt->fam > 0x10 || pvt->model > 7) && tmp & BIT(25))
3352 pvt->ecc_sym_sz = 8;
3357 * Retrieve the hardware registers of the memory controller.
3359 static void __read_mc_regs_df(struct amd64_pvt *pvt)
3361 u8 nid = pvt->mc_node_id;
3362 struct amd64_umc *umc;
3365 /* Read registers from each UMC */
3368 umc_base = get_umc_base(i);
3371 amd_smn_read(nid, umc_base + UMCCH_DIMM_CFG, &umc->dimm_cfg);
3372 amd_smn_read(nid, umc_base + UMCCH_UMC_CFG, &umc->umc_cfg);
3373 amd_smn_read(nid, umc_base + UMCCH_SDP_CTRL, &umc->sdp_ctrl);
3374 amd_smn_read(nid, umc_base + UMCCH_ECC_CTRL, &umc->ecc_ctrl);
3375 amd_smn_read(nid, umc_base + UMCCH_UMC_CAP_HI, &umc->umc_cap_hi);
3380 * Retrieve the hardware registers of the memory controller (this includes the
3381 * 'Address Map' and 'Misc' device regs)
3383 static void read_mc_regs(struct amd64_pvt *pvt)
3389 * Retrieve TOP_MEM and TOP_MEM2; no masking off of reserved bits since
3390 * those are Read-As-Zero.
3392 rdmsrl(MSR_K8_TOP_MEM1, pvt->top_mem);
3393 edac_dbg(0, " TOP_MEM: 0x%016llx\n", pvt->top_mem);
3395 /* Check first whether TOP_MEM2 is enabled: */
3396 rdmsrl(MSR_AMD64_SYSCFG, msr_val);
3397 if (msr_val & BIT(21)) {
3398 rdmsrl(MSR_K8_TOP_MEM2, pvt->top_mem2);
3399 edac_dbg(0, " TOP_MEM2: 0x%016llx\n", pvt->top_mem2);
3401 edac_dbg(0, " TOP_MEM2 disabled\n");
3405 __read_mc_regs_df(pvt);
3406 amd64_read_pci_cfg(pvt->F0, DF_DHAR, &pvt->dhar);
3411 amd64_read_pci_cfg(pvt->F3, NBCAP, &pvt->nbcap);
3413 read_dram_ctl_register(pvt);
3415 for (range = 0; range < DRAM_RANGES; range++) {
3418 /* read settings for this DRAM range */
3419 read_dram_base_limit_regs(pvt, range);
3421 rw = dram_rw(pvt, range);
3425 edac_dbg(1, " DRAM range[%d], base: 0x%016llx; limit: 0x%016llx\n",
3427 get_dram_base(pvt, range),
3428 get_dram_limit(pvt, range));
3430 edac_dbg(1, " IntlvEn=%s; Range access: %s%s IntlvSel=%d DstNode=%d\n",
3431 dram_intlv_en(pvt, range) ? "Enabled" : "Disabled",
3432 (rw & 0x1) ? "R" : "-",
3433 (rw & 0x2) ? "W" : "-",
3434 dram_intlv_sel(pvt, range),
3435 dram_dst_node(pvt, range));
3438 amd64_read_pci_cfg(pvt->F1, DHAR, &pvt->dhar);
3439 amd64_read_dct_pci_cfg(pvt, 0, DBAM0, &pvt->dbam0);
3441 amd64_read_pci_cfg(pvt->F3, F10_ONLINE_SPARE, &pvt->online_spare);
3443 amd64_read_dct_pci_cfg(pvt, 0, DCLR0, &pvt->dclr0);
3444 amd64_read_dct_pci_cfg(pvt, 0, DCHR0, &pvt->dchr0);
3446 if (!dct_ganging_enabled(pvt)) {
3447 amd64_read_dct_pci_cfg(pvt, 1, DCLR0, &pvt->dclr1);
3448 amd64_read_dct_pci_cfg(pvt, 1, DCHR0, &pvt->dchr1);
3452 read_dct_base_mask(pvt);
3454 determine_memory_type(pvt);
3455 edac_dbg(1, " DIMM type: %s\n", edac_mem_types[pvt->dram_type]);
3457 determine_ecc_sym_sz(pvt);
3461 * NOTE: CPU Revision Dependent code
3464 * @csrow_nr ChipSelect Row Number (0..NUM_CHIPSELECTS-1)
3465 * k8 private pointer to -->
3466 * DRAM Bank Address mapping register
3468 * DCL register where dual_channel_active is
3470 * The DBAM register consists of 4 sets of 4 bits each definitions:
3473 * 0-3 CSROWs 0 and 1
3474 * 4-7 CSROWs 2 and 3
3475 * 8-11 CSROWs 4 and 5
3476 * 12-15 CSROWs 6 and 7
3478 * Values range from: 0 to 15
3479 * The meaning of the values depends on CPU revision and dual-channel state,
3480 * see relevant BKDG more info.
3482 * The memory controller provides for total of only 8 CSROWs in its current
3483 * architecture. Each "pair" of CSROWs normally represents just one DIMM in
3484 * single channel or two (2) DIMMs in dual channel mode.
3486 * The following code logic collapses the various tables for CSROW based on CPU
3490 * The number of PAGE_SIZE pages on the specified CSROW number it
3494 static u32 get_csrow_nr_pages(struct amd64_pvt *pvt, u8 dct, int csrow_nr_orig)
3496 u32 dbam = dct ? pvt->dbam1 : pvt->dbam0;
3497 int csrow_nr = csrow_nr_orig;
3498 u32 cs_mode, nr_pages;
3502 cs_mode = DBAM_DIMM(csrow_nr, dbam);
3504 cs_mode = f17_get_cs_mode(csrow_nr >> 1, dct, pvt);
3507 nr_pages = pvt->ops->dbam_to_cs(pvt, dct, cs_mode, csrow_nr);
3508 nr_pages <<= 20 - PAGE_SHIFT;
3510 edac_dbg(0, "csrow: %d, channel: %d, DBAM idx: %d\n",
3511 csrow_nr_orig, dct, cs_mode);
3512 edac_dbg(0, "nr_pages/channel: %u\n", nr_pages);
3517 static int init_csrows_df(struct mem_ctl_info *mci)
3519 struct amd64_pvt *pvt = mci->pvt_info;
3520 enum edac_type edac_mode = EDAC_NONE;
3521 enum dev_type dev_type = DEV_UNKNOWN;
3522 struct dimm_info *dimm;
3526 if (mci->edac_ctl_cap & EDAC_FLAG_S16ECD16ED) {
3527 edac_mode = EDAC_S16ECD16ED;
3529 } else if (mci->edac_ctl_cap & EDAC_FLAG_S8ECD8ED) {
3530 edac_mode = EDAC_S8ECD8ED;
3532 } else if (mci->edac_ctl_cap & EDAC_FLAG_S4ECD4ED) {
3533 edac_mode = EDAC_S4ECD4ED;
3535 } else if (mci->edac_ctl_cap & EDAC_FLAG_SECDED) {
3536 edac_mode = EDAC_SECDED;
3540 for_each_chip_select(cs, umc, pvt) {
3541 if (!csrow_enabled(cs, umc, pvt))
3545 dimm = mci->csrows[cs]->channels[umc]->dimm;
3547 edac_dbg(1, "MC node: %d, csrow: %d\n",
3548 pvt->mc_node_id, cs);
3550 dimm->nr_pages = get_csrow_nr_pages(pvt, umc, cs);
3551 dimm->mtype = pvt->dram_type;
3552 dimm->edac_mode = edac_mode;
3553 dimm->dtype = dev_type;
3562 * Initialize the array of csrow attribute instances, based on the values
3563 * from pci config hardware registers.
3565 static int init_csrows(struct mem_ctl_info *mci)
3567 struct amd64_pvt *pvt = mci->pvt_info;
3568 enum edac_type edac_mode = EDAC_NONE;
3569 struct csrow_info *csrow;
3570 struct dimm_info *dimm;
3571 int i, j, empty = 1;
3576 return init_csrows_df(mci);
3578 amd64_read_pci_cfg(pvt->F3, NBCFG, &val);
3582 edac_dbg(0, "node %d, NBCFG=0x%08x[ChipKillEccCap: %d|DramEccEn: %d]\n",
3583 pvt->mc_node_id, val,
3584 !!(val & NBCFG_CHIPKILL), !!(val & NBCFG_ECC_ENABLE));
3587 * We iterate over DCT0 here but we look at DCT1 in parallel, if needed.
3589 for_each_chip_select(i, 0, pvt) {
3590 bool row_dct0 = !!csrow_enabled(i, 0, pvt);
3591 bool row_dct1 = false;
3593 if (pvt->fam != 0xf)
3594 row_dct1 = !!csrow_enabled(i, 1, pvt);
3596 if (!row_dct0 && !row_dct1)
3599 csrow = mci->csrows[i];
3602 edac_dbg(1, "MC node: %d, csrow: %d\n",
3603 pvt->mc_node_id, i);
3606 nr_pages = get_csrow_nr_pages(pvt, 0, i);
3607 csrow->channels[0]->dimm->nr_pages = nr_pages;
3610 /* K8 has only one DCT */
3611 if (pvt->fam != 0xf && row_dct1) {
3612 int row_dct1_pages = get_csrow_nr_pages(pvt, 1, i);
3614 csrow->channels[1]->dimm->nr_pages = row_dct1_pages;
3615 nr_pages += row_dct1_pages;
3618 edac_dbg(1, "Total csrow%d pages: %u\n", i, nr_pages);
3620 /* Determine DIMM ECC mode: */
3621 if (pvt->nbcfg & NBCFG_ECC_ENABLE) {
3622 edac_mode = (pvt->nbcfg & NBCFG_CHIPKILL)
3627 for (j = 0; j < pvt->channel_count; j++) {
3628 dimm = csrow->channels[j]->dimm;
3629 dimm->mtype = pvt->dram_type;
3630 dimm->edac_mode = edac_mode;
3638 /* get all cores on this DCT */
3639 static void get_cpus_on_this_dct_cpumask(struct cpumask *mask, u16 nid)
3643 for_each_online_cpu(cpu)
3644 if (topology_die_id(cpu) == nid)
3645 cpumask_set_cpu(cpu, mask);
3648 /* check MCG_CTL on all the cpus on this node */
3649 static bool nb_mce_bank_enabled_on_node(u16 nid)
3655 if (!zalloc_cpumask_var(&mask, GFP_KERNEL)) {
3656 amd64_warn("%s: Error allocating mask\n", __func__);
3660 get_cpus_on_this_dct_cpumask(mask, nid);
3662 rdmsr_on_cpus(mask, MSR_IA32_MCG_CTL, msrs);
3664 for_each_cpu(cpu, mask) {
3665 struct msr *reg = per_cpu_ptr(msrs, cpu);
3666 nbe = reg->l & MSR_MCGCTL_NBE;
3668 edac_dbg(0, "core: %u, MCG_CTL: 0x%llx, NB MSR is %s\n",
3670 (nbe ? "enabled" : "disabled"));
3678 free_cpumask_var(mask);
3682 static int toggle_ecc_err_reporting(struct ecc_settings *s, u16 nid, bool on)
3684 cpumask_var_t cmask;
3687 if (!zalloc_cpumask_var(&cmask, GFP_KERNEL)) {
3688 amd64_warn("%s: error allocating mask\n", __func__);
3692 get_cpus_on_this_dct_cpumask(cmask, nid);
3694 rdmsr_on_cpus(cmask, MSR_IA32_MCG_CTL, msrs);
3696 for_each_cpu(cpu, cmask) {
3698 struct msr *reg = per_cpu_ptr(msrs, cpu);
3701 if (reg->l & MSR_MCGCTL_NBE)
3702 s->flags.nb_mce_enable = 1;
3704 reg->l |= MSR_MCGCTL_NBE;
3707 * Turn off NB MCE reporting only when it was off before
3709 if (!s->flags.nb_mce_enable)
3710 reg->l &= ~MSR_MCGCTL_NBE;
3713 wrmsr_on_cpus(cmask, MSR_IA32_MCG_CTL, msrs);
3715 free_cpumask_var(cmask);
3720 static bool enable_ecc_error_reporting(struct ecc_settings *s, u16 nid,
3724 u32 value, mask = 0x3; /* UECC/CECC enable */
3726 if (toggle_ecc_err_reporting(s, nid, ON)) {
3727 amd64_warn("Error enabling ECC reporting over MCGCTL!\n");
3731 amd64_read_pci_cfg(F3, NBCTL, &value);
3733 s->old_nbctl = value & mask;
3734 s->nbctl_valid = true;
3737 amd64_write_pci_cfg(F3, NBCTL, value);
3739 amd64_read_pci_cfg(F3, NBCFG, &value);
3741 edac_dbg(0, "1: node %d, NBCFG=0x%08x[DramEccEn: %d]\n",
3742 nid, value, !!(value & NBCFG_ECC_ENABLE));
3744 if (!(value & NBCFG_ECC_ENABLE)) {
3745 amd64_warn("DRAM ECC disabled on this node, enabling...\n");
3747 s->flags.nb_ecc_prev = 0;
3749 /* Attempt to turn on DRAM ECC Enable */
3750 value |= NBCFG_ECC_ENABLE;
3751 amd64_write_pci_cfg(F3, NBCFG, value);
3753 amd64_read_pci_cfg(F3, NBCFG, &value);
3755 if (!(value & NBCFG_ECC_ENABLE)) {
3756 amd64_warn("Hardware rejected DRAM ECC enable,"
3757 "check memory DIMM configuration.\n");
3760 amd64_info("Hardware accepted DRAM ECC Enable\n");
3763 s->flags.nb_ecc_prev = 1;
3766 edac_dbg(0, "2: node %d, NBCFG=0x%08x[DramEccEn: %d]\n",
3767 nid, value, !!(value & NBCFG_ECC_ENABLE));
3772 static void restore_ecc_error_reporting(struct ecc_settings *s, u16 nid,
3775 u32 value, mask = 0x3; /* UECC/CECC enable */
3777 if (!s->nbctl_valid)
3780 amd64_read_pci_cfg(F3, NBCTL, &value);
3782 value |= s->old_nbctl;
3784 amd64_write_pci_cfg(F3, NBCTL, value);
3786 /* restore previous BIOS DRAM ECC "off" setting we force-enabled */
3787 if (!s->flags.nb_ecc_prev) {
3788 amd64_read_pci_cfg(F3, NBCFG, &value);
3789 value &= ~NBCFG_ECC_ENABLE;
3790 amd64_write_pci_cfg(F3, NBCFG, value);
3793 /* restore the NB Enable MCGCTL bit */
3794 if (toggle_ecc_err_reporting(s, nid, OFF))
3795 amd64_warn("Error restoring NB MCGCTL settings!\n");
3798 static bool ecc_enabled(struct amd64_pvt *pvt)
3800 u16 nid = pvt->mc_node_id;
3801 bool nb_mce_en = false;
3805 if (boot_cpu_data.x86 >= 0x17) {
3806 u8 umc_en_mask = 0, ecc_en_mask = 0;
3807 struct amd64_umc *umc;
3812 /* Only check enabled UMCs. */
3813 if (!(umc->sdp_ctrl & UMC_SDP_INIT))
3816 umc_en_mask |= BIT(i);
3818 if (umc->umc_cap_hi & UMC_ECC_ENABLED)
3819 ecc_en_mask |= BIT(i);
3822 /* Check whether at least one UMC is enabled: */
3824 ecc_en = umc_en_mask == ecc_en_mask;
3826 edac_dbg(0, "Node %d: No enabled UMCs.\n", nid);
3828 /* Assume UMC MCA banks are enabled. */
3831 amd64_read_pci_cfg(pvt->F3, NBCFG, &value);
3833 ecc_en = !!(value & NBCFG_ECC_ENABLE);
3835 nb_mce_en = nb_mce_bank_enabled_on_node(nid);
3837 edac_dbg(0, "NB MCE bank disabled, set MSR 0x%08x[4] on node %d to enable.\n",
3838 MSR_IA32_MCG_CTL, nid);
3841 edac_dbg(3, "Node %d: DRAM ECC %s.\n", nid, (ecc_en ? "enabled" : "disabled"));
3843 if (!ecc_en || !nb_mce_en)
3850 f17h_determine_edac_ctl_cap(struct mem_ctl_info *mci, struct amd64_pvt *pvt)
3852 u8 i, ecc_en = 1, cpk_en = 1, dev_x4 = 1, dev_x16 = 1;
3855 if (pvt->umc[i].sdp_ctrl & UMC_SDP_INIT) {
3856 ecc_en &= !!(pvt->umc[i].umc_cap_hi & UMC_ECC_ENABLED);
3857 cpk_en &= !!(pvt->umc[i].umc_cap_hi & UMC_ECC_CHIPKILL_CAP);
3859 dev_x4 &= !!(pvt->umc[i].dimm_cfg & BIT(6));
3860 dev_x16 &= !!(pvt->umc[i].dimm_cfg & BIT(7));
3864 /* Set chipkill only if ECC is enabled: */
3866 mci->edac_ctl_cap |= EDAC_FLAG_SECDED;
3872 mci->edac_ctl_cap |= EDAC_FLAG_S4ECD4ED;
3874 mci->edac_ctl_cap |= EDAC_FLAG_S16ECD16ED;
3876 mci->edac_ctl_cap |= EDAC_FLAG_S8ECD8ED;
3880 static void setup_mci_misc_attrs(struct mem_ctl_info *mci)
3882 struct amd64_pvt *pvt = mci->pvt_info;
3884 mci->mtype_cap = MEM_FLAG_DDR2 | MEM_FLAG_RDDR2;
3885 mci->edac_ctl_cap = EDAC_FLAG_NONE;
3888 f17h_determine_edac_ctl_cap(mci, pvt);
3890 if (pvt->nbcap & NBCAP_SECDED)
3891 mci->edac_ctl_cap |= EDAC_FLAG_SECDED;
3893 if (pvt->nbcap & NBCAP_CHIPKILL)
3894 mci->edac_ctl_cap |= EDAC_FLAG_S4ECD4ED;
3897 mci->edac_cap = determine_edac_cap(pvt);
3898 mci->mod_name = EDAC_MOD_STR;
3899 mci->ctl_name = fam_type->ctl_name;
3900 mci->dev_name = pci_name(pvt->F3);
3901 mci->ctl_page_to_phys = NULL;
3903 /* memory scrubber interface */
3904 mci->set_sdram_scrub_rate = set_scrub_rate;
3905 mci->get_sdram_scrub_rate = get_scrub_rate;
3909 * returns a pointer to the family descriptor on success, NULL otherwise.
3911 static struct amd64_family_type *per_family_init(struct amd64_pvt *pvt)
3913 pvt->ext_model = boot_cpu_data.x86_model >> 4;
3914 pvt->stepping = boot_cpu_data.x86_stepping;
3915 pvt->model = boot_cpu_data.x86_model;
3916 pvt->fam = boot_cpu_data.x86;
3920 fam_type = &family_types[K8_CPUS];
3921 pvt->ops = &family_types[K8_CPUS].ops;
3925 fam_type = &family_types[F10_CPUS];
3926 pvt->ops = &family_types[F10_CPUS].ops;
3930 if (pvt->model == 0x30) {
3931 fam_type = &family_types[F15_M30H_CPUS];
3932 pvt->ops = &family_types[F15_M30H_CPUS].ops;
3934 } else if (pvt->model == 0x60) {
3935 fam_type = &family_types[F15_M60H_CPUS];
3936 pvt->ops = &family_types[F15_M60H_CPUS].ops;
3938 /* Richland is only client */
3939 } else if (pvt->model == 0x13) {
3942 fam_type = &family_types[F15_CPUS];
3943 pvt->ops = &family_types[F15_CPUS].ops;
3948 if (pvt->model == 0x30) {
3949 fam_type = &family_types[F16_M30H_CPUS];
3950 pvt->ops = &family_types[F16_M30H_CPUS].ops;
3953 fam_type = &family_types[F16_CPUS];
3954 pvt->ops = &family_types[F16_CPUS].ops;
3958 if (pvt->model >= 0x10 && pvt->model <= 0x2f) {
3959 fam_type = &family_types[F17_M10H_CPUS];
3960 pvt->ops = &family_types[F17_M10H_CPUS].ops;
3962 } else if (pvt->model >= 0x30 && pvt->model <= 0x3f) {
3963 fam_type = &family_types[F17_M30H_CPUS];
3964 pvt->ops = &family_types[F17_M30H_CPUS].ops;
3966 } else if (pvt->model >= 0x60 && pvt->model <= 0x6f) {
3967 fam_type = &family_types[F17_M60H_CPUS];
3968 pvt->ops = &family_types[F17_M60H_CPUS].ops;
3970 } else if (pvt->model >= 0x70 && pvt->model <= 0x7f) {
3971 fam_type = &family_types[F17_M70H_CPUS];
3972 pvt->ops = &family_types[F17_M70H_CPUS].ops;
3977 fam_type = &family_types[F17_CPUS];
3978 pvt->ops = &family_types[F17_CPUS].ops;
3980 if (pvt->fam == 0x18)
3981 family_types[F17_CPUS].ctl_name = "F18h";
3985 if (pvt->model >= 0x10 && pvt->model <= 0x1f) {
3986 fam_type = &family_types[F19_M10H_CPUS];
3987 pvt->ops = &family_types[F19_M10H_CPUS].ops;
3989 } else if (pvt->model >= 0x20 && pvt->model <= 0x2f) {
3990 fam_type = &family_types[F17_M70H_CPUS];
3991 pvt->ops = &family_types[F17_M70H_CPUS].ops;
3992 fam_type->ctl_name = "F19h_M20h";
3994 } else if (pvt->model >= 0x50 && pvt->model <= 0x5f) {
3995 fam_type = &family_types[F19_M50H_CPUS];
3996 pvt->ops = &family_types[F19_M50H_CPUS].ops;
3997 fam_type->ctl_name = "F19h_M50h";
3999 } else if (pvt->model >= 0xa0 && pvt->model <= 0xaf) {
4000 fam_type = &family_types[F19_M10H_CPUS];
4001 pvt->ops = &family_types[F19_M10H_CPUS].ops;
4002 fam_type->ctl_name = "F19h_MA0h";
4005 fam_type = &family_types[F19_CPUS];
4006 pvt->ops = &family_types[F19_CPUS].ops;
4007 family_types[F19_CPUS].ctl_name = "F19h";
4011 amd64_err("Unsupported family!\n");
4018 static const struct attribute_group *amd64_edac_attr_groups[] = {
4019 #ifdef CONFIG_EDAC_DEBUG
4026 static int hw_info_get(struct amd64_pvt *pvt)
4028 u16 pci_id1, pci_id2;
4031 if (pvt->fam >= 0x17) {
4032 pvt->umc = kcalloc(fam_type->max_mcs, sizeof(struct amd64_umc), GFP_KERNEL);
4036 pci_id1 = fam_type->f0_id;
4037 pci_id2 = fam_type->f6_id;
4039 pci_id1 = fam_type->f1_id;
4040 pci_id2 = fam_type->f2_id;
4043 ret = reserve_mc_sibling_devs(pvt, pci_id1, pci_id2);
4052 static void hw_info_put(struct amd64_pvt *pvt)
4054 if (pvt->F0 || pvt->F1)
4055 free_mc_sibling_devs(pvt);
4060 static int init_one_instance(struct amd64_pvt *pvt)
4062 struct mem_ctl_info *mci = NULL;
4063 struct edac_mc_layer layers[2];
4067 * We need to determine how many memory channels there are. Then use
4068 * that information for calculating the size of the dynamic instance
4069 * tables in the 'mci' structure.
4071 pvt->channel_count = pvt->ops->early_channel_count(pvt);
4072 if (pvt->channel_count < 0)
4076 layers[0].type = EDAC_MC_LAYER_CHIP_SELECT;
4077 layers[0].size = pvt->csels[0].b_cnt;
4078 layers[0].is_virt_csrow = true;
4079 layers[1].type = EDAC_MC_LAYER_CHANNEL;
4082 * Always allocate two channels since we can have setups with DIMMs on
4083 * only one channel. Also, this simplifies handling later for the price
4084 * of a couple of KBs tops.
4086 layers[1].size = fam_type->max_mcs;
4087 layers[1].is_virt_csrow = false;
4089 mci = edac_mc_alloc(pvt->mc_node_id, ARRAY_SIZE(layers), layers, 0);
4093 mci->pvt_info = pvt;
4094 mci->pdev = &pvt->F3->dev;
4096 setup_mci_misc_attrs(mci);
4098 if (init_csrows(mci))
4099 mci->edac_cap = EDAC_FLAG_NONE;
4102 if (edac_mc_add_mc_with_groups(mci, amd64_edac_attr_groups)) {
4103 edac_dbg(1, "failed edac_mc_add_mc()\n");
4111 static bool instance_has_memory(struct amd64_pvt *pvt)
4113 bool cs_enabled = false;
4114 int cs = 0, dct = 0;
4116 for (dct = 0; dct < fam_type->max_mcs; dct++) {
4117 for_each_chip_select(cs, dct, pvt)
4118 cs_enabled |= csrow_enabled(cs, dct, pvt);
4124 static int probe_one_instance(unsigned int nid)
4126 struct pci_dev *F3 = node_to_amd_nb(nid)->misc;
4127 struct amd64_pvt *pvt = NULL;
4128 struct ecc_settings *s;
4132 s = kzalloc(sizeof(struct ecc_settings), GFP_KERNEL);
4138 pvt = kzalloc(sizeof(struct amd64_pvt), GFP_KERNEL);
4142 pvt->mc_node_id = nid;
4146 fam_type = per_family_init(pvt);
4150 ret = hw_info_get(pvt);
4155 if (!instance_has_memory(pvt)) {
4156 amd64_info("Node %d: No DIMMs detected.\n", nid);
4160 if (!ecc_enabled(pvt)) {
4163 if (!ecc_enable_override)
4166 if (boot_cpu_data.x86 >= 0x17) {
4167 amd64_warn("Forcing ECC on is not recommended on newer systems. Please enable ECC in BIOS.");
4170 amd64_warn("Forcing ECC on!\n");
4172 if (!enable_ecc_error_reporting(s, nid, F3))
4176 ret = init_one_instance(pvt);
4178 amd64_err("Error probing instance: %d\n", nid);
4180 if (boot_cpu_data.x86 < 0x17)
4181 restore_ecc_error_reporting(s, nid, F3);
4186 amd64_info("%s %sdetected (node %d).\n", fam_type->ctl_name,
4188 (pvt->ext_model >= K8_REV_F ? "revF or later "
4189 : "revE or earlier ")
4190 : ""), pvt->mc_node_id);
4192 dump_misc_regs(pvt);
4202 ecc_stngs[nid] = NULL;
4208 static void remove_one_instance(unsigned int nid)
4210 struct pci_dev *F3 = node_to_amd_nb(nid)->misc;
4211 struct ecc_settings *s = ecc_stngs[nid];
4212 struct mem_ctl_info *mci;
4213 struct amd64_pvt *pvt;
4215 /* Remove from EDAC CORE tracking list */
4216 mci = edac_mc_del_mc(&F3->dev);
4220 pvt = mci->pvt_info;
4222 restore_ecc_error_reporting(s, nid, F3);
4224 kfree(ecc_stngs[nid]);
4225 ecc_stngs[nid] = NULL;
4227 /* Free the EDAC CORE resources */
4228 mci->pvt_info = NULL;
4235 static void setup_pci_device(void)
4240 pci_ctl = edac_pci_create_generic_ctl(pci_ctl_dev, EDAC_MOD_STR);
4242 pr_warn("%s(): Unable to create PCI control\n", __func__);
4243 pr_warn("%s(): PCI error report via EDAC not set\n", __func__);
4247 static const struct x86_cpu_id amd64_cpuids[] = {
4248 X86_MATCH_VENDOR_FAM(AMD, 0x0F, NULL),
4249 X86_MATCH_VENDOR_FAM(AMD, 0x10, NULL),
4250 X86_MATCH_VENDOR_FAM(AMD, 0x15, NULL),
4251 X86_MATCH_VENDOR_FAM(AMD, 0x16, NULL),
4252 X86_MATCH_VENDOR_FAM(AMD, 0x17, NULL),
4253 X86_MATCH_VENDOR_FAM(HYGON, 0x18, NULL),
4254 X86_MATCH_VENDOR_FAM(AMD, 0x19, NULL),
4257 MODULE_DEVICE_TABLE(x86cpu, amd64_cpuids);
4259 static int __init amd64_edac_init(void)
4265 owner = edac_get_owner();
4266 if (owner && strncmp(owner, EDAC_MOD_STR, sizeof(EDAC_MOD_STR)))
4269 if (!x86_match_cpu(amd64_cpuids))
4272 if (amd_cache_northbridges() < 0)
4278 ecc_stngs = kcalloc(amd_nb_num(), sizeof(ecc_stngs[0]), GFP_KERNEL);
4282 msrs = msrs_alloc();
4286 for (i = 0; i < amd_nb_num(); i++) {
4287 err = probe_one_instance(i);
4289 /* unwind properly */
4291 remove_one_instance(i);
4297 if (!edac_has_mcs()) {
4302 /* register stuff with EDAC MCE */
4303 if (boot_cpu_data.x86 >= 0x17)
4304 amd_register_ecc_decoder(decode_umc_error);
4306 amd_register_ecc_decoder(decode_bus_error);
4310 #ifdef CONFIG_X86_32
4311 amd64_err("%s on 32-bit is unsupported. USE AT YOUR OWN RISK!\n", EDAC_MOD_STR);
4314 printk(KERN_INFO "AMD64 EDAC driver v%s\n", EDAC_AMD64_VERSION);
4331 static void __exit amd64_edac_exit(void)
4336 edac_pci_release_generic_ctl(pci_ctl);
4338 /* unregister from EDAC MCE */
4339 if (boot_cpu_data.x86 >= 0x17)
4340 amd_unregister_ecc_decoder(decode_umc_error);
4342 amd_unregister_ecc_decoder(decode_bus_error);
4344 for (i = 0; i < amd_nb_num(); i++)
4345 remove_one_instance(i);
4356 module_init(amd64_edac_init);
4357 module_exit(amd64_edac_exit);
4359 MODULE_LICENSE("GPL");
4360 MODULE_AUTHOR("SoftwareBitMaker: Doug Thompson, "
4361 "Dave Peterson, Thayne Harbaugh");
4362 MODULE_DESCRIPTION("MC support for AMD64 memory controllers - "
4363 EDAC_AMD64_VERSION);
4365 module_param(edac_op_state, int, 0444);
4366 MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting state: 0=Poll,1=NMI");