2 * edac_mc kernel module
3 * (C) 2005, 2006 Linux Networx (http://lnxi.com)
4 * This file may be distributed under the terms of the
5 * GNU General Public License.
7 * Written by Thayne Harbaugh
8 * Based on work by Dan Hollis <goemon at anime dot net> and others.
9 * http://www.anime.net/~goemon/linux-ecc/
11 * Modified by Dave Peterson and Doug Thompson
15 #include <linux/module.h>
16 #include <linux/proc_fs.h>
17 #include <linux/kernel.h>
18 #include <linux/types.h>
19 #include <linux/smp.h>
20 #include <linux/init.h>
21 #include <linux/sysctl.h>
22 #include <linux/highmem.h>
23 #include <linux/timer.h>
24 #include <linux/slab.h>
25 #include <linux/jiffies.h>
26 #include <linux/spinlock.h>
27 #include <linux/list.h>
28 #include <linux/ctype.h>
29 #include <linux/edac.h>
30 #include <linux/bitops.h>
31 #include <linux/uaccess.h>
34 #include "edac_module.h"
35 #include <ras/ras_event.h>
37 #ifdef CONFIG_EDAC_ATOMIC_SCRUB
40 #define edac_atomic_scrub(va, size) do { } while (0)
43 int edac_op_state = EDAC_OPSTATE_INVAL;
44 EXPORT_SYMBOL_GPL(edac_op_state);
46 /* lock to memory controller's control array */
47 static DEFINE_MUTEX(mem_ctls_mutex);
48 static LIST_HEAD(mc_devices);
51 * Used to lock EDAC MC to just one module, avoiding two drivers e. g.
52 * apei/ghes and i7core_edac to be used at the same time.
54 static const char *edac_mc_owner;
56 static struct mem_ctl_info *error_desc_to_mci(struct edac_raw_error_desc *e)
58 return container_of(e, struct mem_ctl_info, error_desc);
61 unsigned int edac_dimm_info_location(struct dimm_info *dimm, char *buf,
64 struct mem_ctl_info *mci = dimm->mci;
68 for (i = 0; i < mci->n_layers; i++) {
69 n = scnprintf(p, len, "%s %d ",
70 edac_layer_name[mci->layers[i].type],
80 #ifdef CONFIG_EDAC_DEBUG
82 static void edac_mc_dump_channel(struct rank_info *chan)
84 edac_dbg(4, " channel->chan_idx = %d\n", chan->chan_idx);
85 edac_dbg(4, " channel = %p\n", chan);
86 edac_dbg(4, " channel->csrow = %p\n", chan->csrow);
87 edac_dbg(4, " channel->dimm = %p\n", chan->dimm);
90 static void edac_mc_dump_dimm(struct dimm_info *dimm)
97 edac_dimm_info_location(dimm, location, sizeof(location));
99 edac_dbg(4, "%s%i: %smapped as virtual row %d, chan %d\n",
100 dimm->mci->csbased ? "rank" : "dimm",
101 dimm->idx, location, dimm->csrow, dimm->cschannel);
102 edac_dbg(4, " dimm = %p\n", dimm);
103 edac_dbg(4, " dimm->label = '%s'\n", dimm->label);
104 edac_dbg(4, " dimm->nr_pages = 0x%x\n", dimm->nr_pages);
105 edac_dbg(4, " dimm->grain = %d\n", dimm->grain);
106 edac_dbg(4, " dimm->nr_pages = 0x%x\n", dimm->nr_pages);
109 static void edac_mc_dump_csrow(struct csrow_info *csrow)
111 edac_dbg(4, "csrow->csrow_idx = %d\n", csrow->csrow_idx);
112 edac_dbg(4, " csrow = %p\n", csrow);
113 edac_dbg(4, " csrow->first_page = 0x%lx\n", csrow->first_page);
114 edac_dbg(4, " csrow->last_page = 0x%lx\n", csrow->last_page);
115 edac_dbg(4, " csrow->page_mask = 0x%lx\n", csrow->page_mask);
116 edac_dbg(4, " csrow->nr_channels = %d\n", csrow->nr_channels);
117 edac_dbg(4, " csrow->channels = %p\n", csrow->channels);
118 edac_dbg(4, " csrow->mci = %p\n", csrow->mci);
121 static void edac_mc_dump_mci(struct mem_ctl_info *mci)
123 edac_dbg(3, "\tmci = %p\n", mci);
124 edac_dbg(3, "\tmci->mtype_cap = %lx\n", mci->mtype_cap);
125 edac_dbg(3, "\tmci->edac_ctl_cap = %lx\n", mci->edac_ctl_cap);
126 edac_dbg(3, "\tmci->edac_cap = %lx\n", mci->edac_cap);
127 edac_dbg(4, "\tmci->edac_check = %p\n", mci->edac_check);
128 edac_dbg(3, "\tmci->nr_csrows = %d, csrows = %p\n",
129 mci->nr_csrows, mci->csrows);
130 edac_dbg(3, "\tmci->nr_dimms = %d, dimms = %p\n",
131 mci->tot_dimms, mci->dimms);
132 edac_dbg(3, "\tdev = %p\n", mci->pdev);
133 edac_dbg(3, "\tmod_name:ctl_name = %s:%s\n",
134 mci->mod_name, mci->ctl_name);
135 edac_dbg(3, "\tpvt_info = %p\n\n", mci->pvt_info);
138 #endif /* CONFIG_EDAC_DEBUG */
140 const char * const edac_mem_types[] = {
141 [MEM_EMPTY] = "Empty",
142 [MEM_RESERVED] = "Reserved",
143 [MEM_UNKNOWN] = "Unknown",
147 [MEM_SDR] = "Unbuffered-SDR",
148 [MEM_RDR] = "Registered-SDR",
149 [MEM_DDR] = "Unbuffered-DDR",
150 [MEM_RDDR] = "Registered-DDR",
152 [MEM_DDR2] = "Unbuffered-DDR2",
153 [MEM_FB_DDR2] = "FullyBuffered-DDR2",
154 [MEM_RDDR2] = "Registered-DDR2",
156 [MEM_DDR3] = "Unbuffered-DDR3",
157 [MEM_RDDR3] = "Registered-DDR3",
158 [MEM_LRDDR3] = "Load-Reduced-DDR3-RAM",
159 [MEM_LPDDR3] = "Low-Power-DDR3-RAM",
160 [MEM_DDR4] = "Unbuffered-DDR4",
161 [MEM_RDDR4] = "Registered-DDR4",
162 [MEM_LPDDR4] = "Low-Power-DDR4-RAM",
163 [MEM_LRDDR4] = "Load-Reduced-DDR4-RAM",
164 [MEM_DDR5] = "Unbuffered-DDR5",
165 [MEM_RDDR5] = "Registered-DDR5",
166 [MEM_LRDDR5] = "Load-Reduced-DDR5-RAM",
167 [MEM_NVDIMM] = "Non-volatile-RAM",
168 [MEM_WIO2] = "Wide-IO-2",
169 [MEM_HBM2] = "High-bandwidth-memory-Gen2",
171 EXPORT_SYMBOL_GPL(edac_mem_types);
174 * edac_align_ptr - Prepares the pointer offsets for a single-shot allocation
175 * @p: pointer to a pointer with the memory offset to be used. At
176 * return, this will be incremented to point to the next offset
177 * @size: Size of the data structure to be reserved
178 * @n_elems: Number of elements that should be reserved
180 * If 'size' is a constant, the compiler will optimize this whole function
181 * down to either a no-op or the addition of a constant to the value of '*p'.
183 * The 'p' pointer is absolutely needed to keep the proper advancing
184 * further in memory to the proper offsets when allocating the struct along
185 * with its embedded structs, as edac_device_alloc_ctl_info() does it
186 * above, for example.
188 * At return, the pointer 'p' will be incremented to be used on a next call
191 void *edac_align_ptr(void **p, unsigned int size, int n_elems)
193 unsigned int align, r;
196 *p += size * n_elems;
199 * 'p' can possibly be an unaligned item X such that sizeof(X) is
200 * 'size'. Adjust 'p' so that its alignment is at least as
201 * stringent as what the compiler would provide for X and return
202 * the aligned result.
203 * Here we assume that the alignment of a "long long" is the most
204 * stringent alignment that the compiler will ever provide by default.
205 * As far as I know, this is a reasonable assumption.
207 if (size > sizeof(long))
208 align = sizeof(long long);
209 else if (size > sizeof(int))
210 align = sizeof(long);
211 else if (size > sizeof(short))
213 else if (size > sizeof(char))
214 align = sizeof(short);
218 r = (unsigned long)ptr % align;
225 return (void *)(((unsigned long)ptr) + align - r);
228 static void _edac_mc_free(struct mem_ctl_info *mci)
230 put_device(&mci->dev);
233 static void mci_release(struct device *dev)
235 struct mem_ctl_info *mci = container_of(dev, struct mem_ctl_info, dev);
236 struct csrow_info *csr;
240 for (i = 0; i < mci->tot_dimms; i++)
241 kfree(mci->dimms[i]);
246 for (row = 0; row < mci->nr_csrows; row++) {
247 csr = mci->csrows[row];
252 for (chn = 0; chn < mci->num_cschannel; chn++)
253 kfree(csr->channels[chn]);
254 kfree(csr->channels);
263 static int edac_mc_alloc_csrows(struct mem_ctl_info *mci)
265 unsigned int tot_channels = mci->num_cschannel;
266 unsigned int tot_csrows = mci->nr_csrows;
267 unsigned int row, chn;
270 * Alocate and fill the csrow/channels structs
272 mci->csrows = kcalloc(tot_csrows, sizeof(*mci->csrows), GFP_KERNEL);
276 for (row = 0; row < tot_csrows; row++) {
277 struct csrow_info *csr;
279 csr = kzalloc(sizeof(**mci->csrows), GFP_KERNEL);
283 mci->csrows[row] = csr;
284 csr->csrow_idx = row;
286 csr->nr_channels = tot_channels;
287 csr->channels = kcalloc(tot_channels, sizeof(*csr->channels),
292 for (chn = 0; chn < tot_channels; chn++) {
293 struct rank_info *chan;
295 chan = kzalloc(sizeof(**csr->channels), GFP_KERNEL);
299 csr->channels[chn] = chan;
300 chan->chan_idx = chn;
308 static int edac_mc_alloc_dimms(struct mem_ctl_info *mci)
310 unsigned int pos[EDAC_MAX_LAYERS];
311 unsigned int row, chn, idx;
316 * Allocate and fill the dimm structs
318 mci->dimms = kcalloc(mci->tot_dimms, sizeof(*mci->dimms), GFP_KERNEL);
322 memset(&pos, 0, sizeof(pos));
325 for (idx = 0; idx < mci->tot_dimms; idx++) {
326 struct dimm_info *dimm;
327 struct rank_info *chan;
330 chan = mci->csrows[row]->channels[chn];
332 dimm = kzalloc(sizeof(**mci->dimms), GFP_KERNEL);
335 mci->dimms[idx] = dimm;
340 * Copy DIMM location and initialize it.
342 len = sizeof(dimm->label);
344 n = scnprintf(p, len, "mc#%u", mci->mc_idx);
347 for (layer = 0; layer < mci->n_layers; layer++) {
348 n = scnprintf(p, len, "%s#%u",
349 edac_layer_name[mci->layers[layer].type],
353 dimm->location[layer] = pos[layer];
356 /* Link it to the csrows old API data */
359 dimm->cschannel = chn;
361 /* Increment csrow location */
362 if (mci->layers[0].is_virt_csrow) {
364 if (chn == mci->num_cschannel) {
370 if (row == mci->nr_csrows) {
376 /* Increment dimm location */
377 for (layer = mci->n_layers - 1; layer >= 0; layer--) {
379 if (pos[layer] < mci->layers[layer].size)
388 struct mem_ctl_info *edac_mc_alloc(unsigned int mc_num,
389 unsigned int n_layers,
390 struct edac_mc_layer *layers,
393 struct mem_ctl_info *mci;
394 struct edac_mc_layer *layer;
395 unsigned int idx, size, tot_dimms = 1;
396 unsigned int tot_csrows = 1, tot_channels = 1;
397 void *pvt, *ptr = NULL;
398 bool per_rank = false;
400 if (WARN_ON(n_layers > EDAC_MAX_LAYERS || n_layers == 0))
404 * Calculate the total amount of dimms and csrows/cschannels while
405 * in the old API emulation mode
407 for (idx = 0; idx < n_layers; idx++) {
408 tot_dimms *= layers[idx].size;
410 if (layers[idx].is_virt_csrow)
411 tot_csrows *= layers[idx].size;
413 tot_channels *= layers[idx].size;
415 if (layers[idx].type == EDAC_MC_LAYER_CHIP_SELECT)
419 /* Figure out the offsets of the various items from the start of an mc
420 * structure. We want the alignment of each item to be at least as
421 * stringent as what the compiler would provide if we could simply
422 * hardcode everything into a single struct.
424 mci = edac_align_ptr(&ptr, sizeof(*mci), 1);
425 layer = edac_align_ptr(&ptr, sizeof(*layer), n_layers);
426 pvt = edac_align_ptr(&ptr, sz_pvt, 1);
427 size = ((unsigned long)pvt) + sz_pvt;
429 edac_dbg(1, "allocating %u bytes for mci data (%d %s, %d csrows/channels)\n",
432 per_rank ? "ranks" : "dimms",
433 tot_csrows * tot_channels);
435 mci = kzalloc(size, GFP_KERNEL);
439 mci->dev.release = mci_release;
440 device_initialize(&mci->dev);
442 /* Adjust pointers so they point within the memory we just allocated
443 * rather than an imaginary chunk of memory located at address 0.
445 layer = (struct edac_mc_layer *)(((char *)mci) + ((unsigned long)layer));
446 pvt = sz_pvt ? (((char *)mci) + ((unsigned long)pvt)) : NULL;
448 /* setup index and various internal pointers */
449 mci->mc_idx = mc_num;
450 mci->tot_dimms = tot_dimms;
452 mci->n_layers = n_layers;
454 memcpy(mci->layers, layers, sizeof(*layer) * n_layers);
455 mci->nr_csrows = tot_csrows;
456 mci->num_cschannel = tot_channels;
457 mci->csbased = per_rank;
459 if (edac_mc_alloc_csrows(mci))
462 if (edac_mc_alloc_dimms(mci))
465 mci->op_state = OP_ALLOC;
474 EXPORT_SYMBOL_GPL(edac_mc_alloc);
476 void edac_mc_free(struct mem_ctl_info *mci)
482 EXPORT_SYMBOL_GPL(edac_mc_free);
484 bool edac_has_mcs(void)
488 mutex_lock(&mem_ctls_mutex);
490 ret = list_empty(&mc_devices);
492 mutex_unlock(&mem_ctls_mutex);
496 EXPORT_SYMBOL_GPL(edac_has_mcs);
498 /* Caller must hold mem_ctls_mutex */
499 static struct mem_ctl_info *__find_mci_by_dev(struct device *dev)
501 struct mem_ctl_info *mci;
502 struct list_head *item;
506 list_for_each(item, &mc_devices) {
507 mci = list_entry(item, struct mem_ctl_info, link);
509 if (mci->pdev == dev)
519 * scan list of controllers looking for the one that manages
521 * @dev: pointer to a struct device related with the MCI
523 struct mem_ctl_info *find_mci_by_dev(struct device *dev)
525 struct mem_ctl_info *ret;
527 mutex_lock(&mem_ctls_mutex);
528 ret = __find_mci_by_dev(dev);
529 mutex_unlock(&mem_ctls_mutex);
533 EXPORT_SYMBOL_GPL(find_mci_by_dev);
536 * edac_mc_workq_function
537 * performs the operation scheduled by a workq request
539 static void edac_mc_workq_function(struct work_struct *work_req)
541 struct delayed_work *d_work = to_delayed_work(work_req);
542 struct mem_ctl_info *mci = to_edac_mem_ctl_work(d_work);
544 mutex_lock(&mem_ctls_mutex);
546 if (mci->op_state != OP_RUNNING_POLL) {
547 mutex_unlock(&mem_ctls_mutex);
551 if (edac_op_state == EDAC_OPSTATE_POLL)
552 mci->edac_check(mci);
554 mutex_unlock(&mem_ctls_mutex);
556 /* Queue ourselves again. */
557 edac_queue_work(&mci->work, msecs_to_jiffies(edac_mc_get_poll_msec()));
561 * edac_mc_reset_delay_period(unsigned long value)
563 * user space has updated our poll period value, need to
564 * reset our workq delays
566 void edac_mc_reset_delay_period(unsigned long value)
568 struct mem_ctl_info *mci;
569 struct list_head *item;
571 mutex_lock(&mem_ctls_mutex);
573 list_for_each(item, &mc_devices) {
574 mci = list_entry(item, struct mem_ctl_info, link);
576 if (mci->op_state == OP_RUNNING_POLL)
577 edac_mod_work(&mci->work, value);
579 mutex_unlock(&mem_ctls_mutex);
584 /* Return 0 on success, 1 on failure.
585 * Before calling this function, caller must
586 * assign a unique value to mci->mc_idx.
590 * called with the mem_ctls_mutex lock held
592 static int add_mc_to_global_list(struct mem_ctl_info *mci)
594 struct list_head *item, *insert_before;
595 struct mem_ctl_info *p;
597 insert_before = &mc_devices;
599 p = __find_mci_by_dev(mci->pdev);
600 if (unlikely(p != NULL))
603 list_for_each(item, &mc_devices) {
604 p = list_entry(item, struct mem_ctl_info, link);
606 if (p->mc_idx >= mci->mc_idx) {
607 if (unlikely(p->mc_idx == mci->mc_idx))
610 insert_before = item;
615 list_add_tail_rcu(&mci->link, insert_before);
619 edac_printk(KERN_WARNING, EDAC_MC,
620 "%s (%s) %s %s already assigned %d\n", dev_name(p->pdev),
621 edac_dev_name(mci), p->mod_name, p->ctl_name, p->mc_idx);
625 edac_printk(KERN_WARNING, EDAC_MC,
626 "bug in low-level driver: attempt to assign\n"
627 " duplicate mc_idx %d in %s()\n", p->mc_idx, __func__);
631 static int del_mc_from_global_list(struct mem_ctl_info *mci)
633 list_del_rcu(&mci->link);
635 /* these are for safe removal of devices from global list while
636 * NMI handlers may be traversing list
639 INIT_LIST_HEAD(&mci->link);
641 return list_empty(&mc_devices);
644 struct mem_ctl_info *edac_mc_find(int idx)
646 struct mem_ctl_info *mci;
647 struct list_head *item;
649 mutex_lock(&mem_ctls_mutex);
651 list_for_each(item, &mc_devices) {
652 mci = list_entry(item, struct mem_ctl_info, link);
653 if (mci->mc_idx == idx)
659 mutex_unlock(&mem_ctls_mutex);
662 EXPORT_SYMBOL(edac_mc_find);
664 const char *edac_get_owner(void)
666 return edac_mc_owner;
668 EXPORT_SYMBOL_GPL(edac_get_owner);
670 /* FIXME - should a warning be printed if no error detection? correction? */
671 int edac_mc_add_mc_with_groups(struct mem_ctl_info *mci,
672 const struct attribute_group **groups)
677 #ifdef CONFIG_EDAC_DEBUG
678 if (edac_debug_level >= 3)
679 edac_mc_dump_mci(mci);
681 if (edac_debug_level >= 4) {
682 struct dimm_info *dimm;
685 for (i = 0; i < mci->nr_csrows; i++) {
686 struct csrow_info *csrow = mci->csrows[i];
690 for (j = 0; j < csrow->nr_channels; j++)
691 nr_pages += csrow->channels[j]->dimm->nr_pages;
694 edac_mc_dump_csrow(csrow);
695 for (j = 0; j < csrow->nr_channels; j++)
696 if (csrow->channels[j]->dimm->nr_pages)
697 edac_mc_dump_channel(csrow->channels[j]);
700 mci_for_each_dimm(mci, dimm)
701 edac_mc_dump_dimm(dimm);
704 mutex_lock(&mem_ctls_mutex);
706 if (edac_mc_owner && edac_mc_owner != mci->mod_name) {
711 if (add_mc_to_global_list(mci))
714 /* set load time so that error rate can be tracked */
715 mci->start_time = jiffies;
717 mci->bus = edac_get_sysfs_subsys();
719 if (edac_create_sysfs_mci_device(mci, groups)) {
720 edac_mc_printk(mci, KERN_WARNING,
721 "failed to create sysfs device\n");
725 if (mci->edac_check) {
726 mci->op_state = OP_RUNNING_POLL;
728 INIT_DELAYED_WORK(&mci->work, edac_mc_workq_function);
729 edac_queue_work(&mci->work, msecs_to_jiffies(edac_mc_get_poll_msec()));
732 mci->op_state = OP_RUNNING_INTERRUPT;
735 /* Report action taken */
736 edac_mc_printk(mci, KERN_INFO,
737 "Giving out device to module %s controller %s: DEV %s (%s)\n",
738 mci->mod_name, mci->ctl_name, mci->dev_name,
739 edac_op_state_to_string(mci->op_state));
741 edac_mc_owner = mci->mod_name;
743 mutex_unlock(&mem_ctls_mutex);
747 del_mc_from_global_list(mci);
750 mutex_unlock(&mem_ctls_mutex);
753 EXPORT_SYMBOL_GPL(edac_mc_add_mc_with_groups);
755 struct mem_ctl_info *edac_mc_del_mc(struct device *dev)
757 struct mem_ctl_info *mci;
761 mutex_lock(&mem_ctls_mutex);
763 /* find the requested mci struct in the global list */
764 mci = __find_mci_by_dev(dev);
766 mutex_unlock(&mem_ctls_mutex);
770 /* mark MCI offline: */
771 mci->op_state = OP_OFFLINE;
773 if (del_mc_from_global_list(mci))
774 edac_mc_owner = NULL;
776 mutex_unlock(&mem_ctls_mutex);
779 edac_stop_work(&mci->work);
781 /* remove from sysfs */
782 edac_remove_sysfs_mci_device(mci);
784 edac_printk(KERN_INFO, EDAC_MC,
785 "Removed device %d for %s %s: DEV %s\n", mci->mc_idx,
786 mci->mod_name, mci->ctl_name, edac_dev_name(mci));
790 EXPORT_SYMBOL_GPL(edac_mc_del_mc);
792 static void edac_mc_scrub_block(unsigned long page, unsigned long offset,
797 unsigned long flags = 0;
801 /* ECC error page was not in our memory. Ignore it. */
802 if (!pfn_valid(page))
805 /* Find the actual page structure then map it and fix */
806 pg = pfn_to_page(page);
809 local_irq_save(flags);
811 virt_addr = kmap_atomic(pg);
813 /* Perform architecture specific atomic scrub operation */
814 edac_atomic_scrub(virt_addr + offset, size);
816 /* Unmap and complete */
817 kunmap_atomic(virt_addr);
820 local_irq_restore(flags);
823 /* FIXME - should return -1 */
824 int edac_mc_find_csrow_by_page(struct mem_ctl_info *mci, unsigned long page)
826 struct csrow_info **csrows = mci->csrows;
829 edac_dbg(1, "MC%d: 0x%lx\n", mci->mc_idx, page);
832 for (i = 0; i < mci->nr_csrows; i++) {
833 struct csrow_info *csrow = csrows[i];
835 for (j = 0; j < csrow->nr_channels; j++) {
836 struct dimm_info *dimm = csrow->channels[j]->dimm;
842 edac_dbg(3, "MC%d: first(0x%lx) page(0x%lx) last(0x%lx) mask(0x%lx)\n",
844 csrow->first_page, page, csrow->last_page,
847 if ((page >= csrow->first_page) &&
848 (page <= csrow->last_page) &&
849 ((page & csrow->page_mask) ==
850 (csrow->first_page & csrow->page_mask))) {
857 edac_mc_printk(mci, KERN_ERR,
858 "could not look up page error address %lx\n",
859 (unsigned long)page);
863 EXPORT_SYMBOL_GPL(edac_mc_find_csrow_by_page);
865 const char *edac_layer_name[] = {
866 [EDAC_MC_LAYER_BRANCH] = "branch",
867 [EDAC_MC_LAYER_CHANNEL] = "channel",
868 [EDAC_MC_LAYER_SLOT] = "slot",
869 [EDAC_MC_LAYER_CHIP_SELECT] = "csrow",
870 [EDAC_MC_LAYER_ALL_MEM] = "memory",
872 EXPORT_SYMBOL_GPL(edac_layer_name);
874 static void edac_inc_ce_error(struct edac_raw_error_desc *e)
876 int pos[EDAC_MAX_LAYERS] = { e->top_layer, e->mid_layer, e->low_layer };
877 struct mem_ctl_info *mci = error_desc_to_mci(e);
878 struct dimm_info *dimm = edac_get_dimm(mci, pos[0], pos[1], pos[2]);
880 mci->ce_mc += e->error_count;
883 dimm->ce_count += e->error_count;
885 mci->ce_noinfo_count += e->error_count;
888 static void edac_inc_ue_error(struct edac_raw_error_desc *e)
890 int pos[EDAC_MAX_LAYERS] = { e->top_layer, e->mid_layer, e->low_layer };
891 struct mem_ctl_info *mci = error_desc_to_mci(e);
892 struct dimm_info *dimm = edac_get_dimm(mci, pos[0], pos[1], pos[2]);
894 mci->ue_mc += e->error_count;
897 dimm->ue_count += e->error_count;
899 mci->ue_noinfo_count += e->error_count;
902 static void edac_ce_error(struct edac_raw_error_desc *e)
904 struct mem_ctl_info *mci = error_desc_to_mci(e);
905 unsigned long remapped_page;
907 if (edac_mc_get_log_ce()) {
908 edac_mc_printk(mci, KERN_WARNING,
909 "%d CE %s%son %s (%s page:0x%lx offset:0x%lx grain:%ld syndrome:0x%lx%s%s)\n",
910 e->error_count, e->msg,
912 e->label, e->location, e->page_frame_number, e->offset_in_page,
913 e->grain, e->syndrome,
914 *e->other_detail ? " - " : "",
918 edac_inc_ce_error(e);
920 if (mci->scrub_mode == SCRUB_SW_SRC) {
922 * Some memory controllers (called MCs below) can remap
923 * memory so that it is still available at a different
924 * address when PCI devices map into memory.
925 * MC's that can't do this, lose the memory where PCI
926 * devices are mapped. This mapping is MC-dependent
927 * and so we call back into the MC driver for it to
928 * map the MC page to a physical (CPU) page which can
929 * then be mapped to a virtual page - which can then
932 remapped_page = mci->ctl_page_to_phys ?
933 mci->ctl_page_to_phys(mci, e->page_frame_number) :
934 e->page_frame_number;
936 edac_mc_scrub_block(remapped_page, e->offset_in_page, e->grain);
940 static void edac_ue_error(struct edac_raw_error_desc *e)
942 struct mem_ctl_info *mci = error_desc_to_mci(e);
944 if (edac_mc_get_log_ue()) {
945 edac_mc_printk(mci, KERN_WARNING,
946 "%d UE %s%son %s (%s page:0x%lx offset:0x%lx grain:%ld%s%s)\n",
947 e->error_count, e->msg,
949 e->label, e->location, e->page_frame_number, e->offset_in_page,
951 *e->other_detail ? " - " : "",
955 edac_inc_ue_error(e);
957 if (edac_mc_get_panic_on_ue()) {
958 panic("UE %s%son %s (%s page:0x%lx offset:0x%lx grain:%ld%s%s)\n",
961 e->label, e->location, e->page_frame_number, e->offset_in_page,
963 *e->other_detail ? " - " : "",
968 static void edac_inc_csrow(struct edac_raw_error_desc *e, int row, int chan)
970 struct mem_ctl_info *mci = error_desc_to_mci(e);
971 enum hw_event_mc_err_type type = e->type;
972 u16 count = e->error_count;
977 edac_dbg(4, "csrow/channel to increment: (%d,%d)\n", row, chan);
979 if (type == HW_EVENT_ERR_CORRECTED) {
980 mci->csrows[row]->ce_count += count;
982 mci->csrows[row]->channels[chan]->ce_count += count;
984 mci->csrows[row]->ue_count += count;
988 void edac_raw_mc_handle_error(struct edac_raw_error_desc *e)
990 struct mem_ctl_info *mci = error_desc_to_mci(e);
993 /* Sanity-check driver-supplied grain value. */
994 if (WARN_ON_ONCE(!e->grain))
997 grain_bits = fls_long(e->grain - 1);
999 /* Report the error via the trace interface */
1000 if (IS_ENABLED(CONFIG_RAS))
1001 trace_mc_event(e->type, e->msg, e->label, e->error_count,
1002 mci->mc_idx, e->top_layer, e->mid_layer,
1004 (e->page_frame_number << PAGE_SHIFT) | e->offset_in_page,
1005 grain_bits, e->syndrome, e->other_detail);
1007 if (e->type == HW_EVENT_ERR_CORRECTED)
1012 EXPORT_SYMBOL_GPL(edac_raw_mc_handle_error);
1014 void edac_mc_handle_error(const enum hw_event_mc_err_type type,
1015 struct mem_ctl_info *mci,
1016 const u16 error_count,
1017 const unsigned long page_frame_number,
1018 const unsigned long offset_in_page,
1019 const unsigned long syndrome,
1020 const int top_layer,
1021 const int mid_layer,
1022 const int low_layer,
1024 const char *other_detail)
1026 struct dimm_info *dimm;
1028 int row = -1, chan = -1;
1029 int pos[EDAC_MAX_LAYERS] = { top_layer, mid_layer, low_layer };
1030 int i, n_labels = 0;
1031 struct edac_raw_error_desc *e = &mci->error_desc;
1032 bool any_memory = true;
1035 edac_dbg(3, "MC%d\n", mci->mc_idx);
1037 /* Fills the error report buffer */
1038 memset(e, 0, sizeof (*e));
1039 e->error_count = error_count;
1041 e->top_layer = top_layer;
1042 e->mid_layer = mid_layer;
1043 e->low_layer = low_layer;
1044 e->page_frame_number = page_frame_number;
1045 e->offset_in_page = offset_in_page;
1046 e->syndrome = syndrome;
1047 /* need valid strings here for both: */
1049 e->other_detail = other_detail ?: "";
1052 * Check if the event report is consistent and if the memory location is
1053 * known. If it is, the DIMM(s) label info will be filled and the DIMM's
1054 * error counters will be incremented.
1056 for (i = 0; i < mci->n_layers; i++) {
1057 if (pos[i] >= (int)mci->layers[i].size) {
1059 edac_mc_printk(mci, KERN_ERR,
1060 "INTERNAL ERROR: %s value is out of range (%d >= %d)\n",
1061 edac_layer_name[mci->layers[i].type],
1062 pos[i], mci->layers[i].size);
1064 * Instead of just returning it, let's use what's
1065 * known about the error. The increment routines and
1066 * the DIMM filter logic will do the right thing by
1067 * pointing the likely damaged DIMMs.
1076 * Get the dimm label/grain that applies to the match criteria.
1077 * As the error algorithm may not be able to point to just one memory
1078 * stick, the logic here will get all possible labels that could
1079 * pottentially be affected by the error.
1080 * On FB-DIMM memory controllers, for uncorrected errors, it is common
1081 * to have only the MC channel and the MC dimm (also called "branch")
1082 * but the channel is not known, as the memory is arranged in pairs,
1083 * where each memory belongs to a separate channel within the same
1088 end = p + sizeof(e->label);
1091 mci_for_each_dimm(mci, dimm) {
1092 if (top_layer >= 0 && top_layer != dimm->location[0])
1094 if (mid_layer >= 0 && mid_layer != dimm->location[1])
1096 if (low_layer >= 0 && low_layer != dimm->location[2])
1099 /* get the max grain, over the error match range */
1100 if (dimm->grain > e->grain)
1101 e->grain = dimm->grain;
1104 * If the error is memory-controller wide, there's no need to
1105 * seek for the affected DIMMs because the whole channel/memory
1106 * controller/... may be affected. Also, don't show errors for
1109 if (!dimm->nr_pages)
1113 if (n_labels > EDAC_MAX_LABELS) {
1117 p += scnprintf(p, end - p, "%s%s", prefix, dimm->label);
1118 prefix = OTHER_LABEL;
1122 * get csrow/channel of the DIMM, in order to allow
1123 * incrementing the compat API counters
1125 edac_dbg(4, "%s csrows map: (%d,%d)\n",
1126 mci->csbased ? "rank" : "dimm",
1127 dimm->csrow, dimm->cschannel);
1130 else if (row >= 0 && row != dimm->csrow)
1134 chan = dimm->cschannel;
1135 else if (chan >= 0 && chan != dimm->cschannel)
1140 strscpy(e->label, "any memory", sizeof(e->label));
1141 else if (!*e->label)
1142 strscpy(e->label, "unknown memory", sizeof(e->label));
1144 edac_inc_csrow(e, row, chan);
1146 /* Fill the RAM location data */
1148 end = p + sizeof(e->location);
1151 for (i = 0; i < mci->n_layers; i++) {
1155 p += scnprintf(p, end - p, "%s%s:%d", prefix,
1156 edac_layer_name[mci->layers[i].type], pos[i]);
1160 edac_raw_mc_handle_error(e);
1162 EXPORT_SYMBOL_GPL(edac_mc_handle_error);