arm64: zynqmp: Make zynqmp_firmware driver optional
[linux-2.6-microblaze.git] / drivers / edac / edac_mc.c
1 /*
2  * edac_mc kernel module
3  * (C) 2005, 2006 Linux Networx (http://lnxi.com)
4  * This file may be distributed under the terms of the
5  * GNU General Public License.
6  *
7  * Written by Thayne Harbaugh
8  * Based on work by Dan Hollis <goemon at anime dot net> and others.
9  *      http://www.anime.net/~goemon/linux-ecc/
10  *
11  * Modified by Dave Peterson and Doug Thompson
12  *
13  */
14
15 #include <linux/module.h>
16 #include <linux/proc_fs.h>
17 #include <linux/kernel.h>
18 #include <linux/types.h>
19 #include <linux/smp.h>
20 #include <linux/init.h>
21 #include <linux/sysctl.h>
22 #include <linux/highmem.h>
23 #include <linux/timer.h>
24 #include <linux/slab.h>
25 #include <linux/jiffies.h>
26 #include <linux/spinlock.h>
27 #include <linux/list.h>
28 #include <linux/ctype.h>
29 #include <linux/edac.h>
30 #include <linux/bitops.h>
31 #include <linux/uaccess.h>
32 #include <asm/page.h>
33 #include "edac_mc.h"
34 #include "edac_module.h"
35 #include <ras/ras_event.h>
36
37 #ifdef CONFIG_EDAC_ATOMIC_SCRUB
38 #include <asm/edac.h>
39 #else
40 #define edac_atomic_scrub(va, size) do { } while (0)
41 #endif
42
43 int edac_op_state = EDAC_OPSTATE_INVAL;
44 EXPORT_SYMBOL_GPL(edac_op_state);
45
46 static int edac_report = EDAC_REPORTING_ENABLED;
47
48 /* lock to memory controller's control array */
49 static DEFINE_MUTEX(mem_ctls_mutex);
50 static LIST_HEAD(mc_devices);
51
52 /*
53  * Used to lock EDAC MC to just one module, avoiding two drivers e. g.
54  *      apei/ghes and i7core_edac to be used at the same time.
55  */
56 static const char *edac_mc_owner;
57
58 int edac_get_report_status(void)
59 {
60         return edac_report;
61 }
62 EXPORT_SYMBOL_GPL(edac_get_report_status);
63
64 void edac_set_report_status(int new)
65 {
66         if (new == EDAC_REPORTING_ENABLED ||
67             new == EDAC_REPORTING_DISABLED ||
68             new == EDAC_REPORTING_FORCE)
69                 edac_report = new;
70 }
71 EXPORT_SYMBOL_GPL(edac_set_report_status);
72
73 static int edac_report_set(const char *str, const struct kernel_param *kp)
74 {
75         if (!str)
76                 return -EINVAL;
77
78         if (!strncmp(str, "on", 2))
79                 edac_report = EDAC_REPORTING_ENABLED;
80         else if (!strncmp(str, "off", 3))
81                 edac_report = EDAC_REPORTING_DISABLED;
82         else if (!strncmp(str, "force", 5))
83                 edac_report = EDAC_REPORTING_FORCE;
84
85         return 0;
86 }
87
88 static int edac_report_get(char *buffer, const struct kernel_param *kp)
89 {
90         int ret = 0;
91
92         switch (edac_report) {
93         case EDAC_REPORTING_ENABLED:
94                 ret = sprintf(buffer, "on");
95                 break;
96         case EDAC_REPORTING_DISABLED:
97                 ret = sprintf(buffer, "off");
98                 break;
99         case EDAC_REPORTING_FORCE:
100                 ret = sprintf(buffer, "force");
101                 break;
102         default:
103                 ret = -EINVAL;
104                 break;
105         }
106
107         return ret;
108 }
109
110 static const struct kernel_param_ops edac_report_ops = {
111         .set = edac_report_set,
112         .get = edac_report_get,
113 };
114
115 module_param_cb(edac_report, &edac_report_ops, &edac_report, 0644);
116
117 unsigned int edac_dimm_info_location(struct dimm_info *dimm, char *buf,
118                                      unsigned int len)
119 {
120         struct mem_ctl_info *mci = dimm->mci;
121         int i, n, count = 0;
122         char *p = buf;
123
124         for (i = 0; i < mci->n_layers; i++) {
125                 n = snprintf(p, len, "%s %d ",
126                               edac_layer_name[mci->layers[i].type],
127                               dimm->location[i]);
128                 p += n;
129                 len -= n;
130                 count += n;
131                 if (!len)
132                         break;
133         }
134
135         return count;
136 }
137
138 #ifdef CONFIG_EDAC_DEBUG
139
140 static void edac_mc_dump_channel(struct rank_info *chan)
141 {
142         edac_dbg(4, "  channel->chan_idx = %d\n", chan->chan_idx);
143         edac_dbg(4, "    channel = %p\n", chan);
144         edac_dbg(4, "    channel->csrow = %p\n", chan->csrow);
145         edac_dbg(4, "    channel->dimm = %p\n", chan->dimm);
146 }
147
148 static void edac_mc_dump_dimm(struct dimm_info *dimm)
149 {
150         char location[80];
151
152         if (!dimm->nr_pages)
153                 return;
154
155         edac_dimm_info_location(dimm, location, sizeof(location));
156
157         edac_dbg(4, "%s%i: %smapped as virtual row %d, chan %d\n",
158                  dimm->mci->csbased ? "rank" : "dimm",
159                  dimm->idx, location, dimm->csrow, dimm->cschannel);
160         edac_dbg(4, "  dimm = %p\n", dimm);
161         edac_dbg(4, "  dimm->label = '%s'\n", dimm->label);
162         edac_dbg(4, "  dimm->nr_pages = 0x%x\n", dimm->nr_pages);
163         edac_dbg(4, "  dimm->grain = %d\n", dimm->grain);
164         edac_dbg(4, "  dimm->nr_pages = 0x%x\n", dimm->nr_pages);
165 }
166
167 static void edac_mc_dump_csrow(struct csrow_info *csrow)
168 {
169         edac_dbg(4, "csrow->csrow_idx = %d\n", csrow->csrow_idx);
170         edac_dbg(4, "  csrow = %p\n", csrow);
171         edac_dbg(4, "  csrow->first_page = 0x%lx\n", csrow->first_page);
172         edac_dbg(4, "  csrow->last_page = 0x%lx\n", csrow->last_page);
173         edac_dbg(4, "  csrow->page_mask = 0x%lx\n", csrow->page_mask);
174         edac_dbg(4, "  csrow->nr_channels = %d\n", csrow->nr_channels);
175         edac_dbg(4, "  csrow->channels = %p\n", csrow->channels);
176         edac_dbg(4, "  csrow->mci = %p\n", csrow->mci);
177 }
178
179 static void edac_mc_dump_mci(struct mem_ctl_info *mci)
180 {
181         edac_dbg(3, "\tmci = %p\n", mci);
182         edac_dbg(3, "\tmci->mtype_cap = %lx\n", mci->mtype_cap);
183         edac_dbg(3, "\tmci->edac_ctl_cap = %lx\n", mci->edac_ctl_cap);
184         edac_dbg(3, "\tmci->edac_cap = %lx\n", mci->edac_cap);
185         edac_dbg(4, "\tmci->edac_check = %p\n", mci->edac_check);
186         edac_dbg(3, "\tmci->nr_csrows = %d, csrows = %p\n",
187                  mci->nr_csrows, mci->csrows);
188         edac_dbg(3, "\tmci->nr_dimms = %d, dimms = %p\n",
189                  mci->tot_dimms, mci->dimms);
190         edac_dbg(3, "\tdev = %p\n", mci->pdev);
191         edac_dbg(3, "\tmod_name:ctl_name = %s:%s\n",
192                  mci->mod_name, mci->ctl_name);
193         edac_dbg(3, "\tpvt_info = %p\n\n", mci->pvt_info);
194 }
195
196 #endif                          /* CONFIG_EDAC_DEBUG */
197
198 const char * const edac_mem_types[] = {
199         [MEM_EMPTY]     = "Empty",
200         [MEM_RESERVED]  = "Reserved",
201         [MEM_UNKNOWN]   = "Unknown",
202         [MEM_FPM]       = "FPM",
203         [MEM_EDO]       = "EDO",
204         [MEM_BEDO]      = "BEDO",
205         [MEM_SDR]       = "Unbuffered-SDR",
206         [MEM_RDR]       = "Registered-SDR",
207         [MEM_DDR]       = "Unbuffered-DDR",
208         [MEM_RDDR]      = "Registered-DDR",
209         [MEM_RMBS]      = "RMBS",
210         [MEM_DDR2]      = "Unbuffered-DDR2",
211         [MEM_FB_DDR2]   = "FullyBuffered-DDR2",
212         [MEM_RDDR2]     = "Registered-DDR2",
213         [MEM_XDR]       = "XDR",
214         [MEM_DDR3]      = "Unbuffered-DDR3",
215         [MEM_RDDR3]     = "Registered-DDR3",
216         [MEM_LRDDR3]    = "Load-Reduced-DDR3-RAM",
217         [MEM_DDR4]      = "Unbuffered-DDR4",
218         [MEM_RDDR4]     = "Registered-DDR4",
219         [MEM_LRDDR4]    = "Load-Reduced-DDR4-RAM",
220         [MEM_NVDIMM]    = "Non-volatile-RAM",
221 };
222 EXPORT_SYMBOL_GPL(edac_mem_types);
223
224 /**
225  * edac_align_ptr - Prepares the pointer offsets for a single-shot allocation
226  * @p:          pointer to a pointer with the memory offset to be used. At
227  *              return, this will be incremented to point to the next offset
228  * @size:       Size of the data structure to be reserved
229  * @n_elems:    Number of elements that should be reserved
230  *
231  * If 'size' is a constant, the compiler will optimize this whole function
232  * down to either a no-op or the addition of a constant to the value of '*p'.
233  *
234  * The 'p' pointer is absolutely needed to keep the proper advancing
235  * further in memory to the proper offsets when allocating the struct along
236  * with its embedded structs, as edac_device_alloc_ctl_info() does it
237  * above, for example.
238  *
239  * At return, the pointer 'p' will be incremented to be used on a next call
240  * to this function.
241  */
242 void *edac_align_ptr(void **p, unsigned int size, int n_elems)
243 {
244         unsigned int align, r;
245         void *ptr = *p;
246
247         *p += size * n_elems;
248
249         /*
250          * 'p' can possibly be an unaligned item X such that sizeof(X) is
251          * 'size'.  Adjust 'p' so that its alignment is at least as
252          * stringent as what the compiler would provide for X and return
253          * the aligned result.
254          * Here we assume that the alignment of a "long long" is the most
255          * stringent alignment that the compiler will ever provide by default.
256          * As far as I know, this is a reasonable assumption.
257          */
258         if (size > sizeof(long))
259                 align = sizeof(long long);
260         else if (size > sizeof(int))
261                 align = sizeof(long);
262         else if (size > sizeof(short))
263                 align = sizeof(int);
264         else if (size > sizeof(char))
265                 align = sizeof(short);
266         else
267                 return (char *)ptr;
268
269         r = (unsigned long)p % align;
270
271         if (r == 0)
272                 return (char *)ptr;
273
274         *p += align - r;
275
276         return (void *)(((unsigned long)ptr) + align - r);
277 }
278
279 static void _edac_mc_free(struct mem_ctl_info *mci)
280 {
281         struct csrow_info *csr;
282         int i, chn, row;
283
284         if (mci->dimms) {
285                 for (i = 0; i < mci->tot_dimms; i++)
286                         kfree(mci->dimms[i]);
287                 kfree(mci->dimms);
288         }
289
290         if (mci->csrows) {
291                 for (row = 0; row < mci->nr_csrows; row++) {
292                         csr = mci->csrows[row];
293                         if (!csr)
294                                 continue;
295
296                         if (csr->channels) {
297                                 for (chn = 0; chn < mci->num_cschannel; chn++)
298                                         kfree(csr->channels[chn]);
299                                 kfree(csr->channels);
300                         }
301                         kfree(csr);
302                 }
303                 kfree(mci->csrows);
304         }
305         kfree(mci);
306 }
307
308 struct mem_ctl_info *edac_mc_alloc(unsigned int mc_num,
309                                    unsigned int n_layers,
310                                    struct edac_mc_layer *layers,
311                                    unsigned int sz_pvt)
312 {
313         struct mem_ctl_info *mci;
314         struct edac_mc_layer *layer;
315         struct csrow_info *csr;
316         struct rank_info *chan;
317         struct dimm_info *dimm;
318         u32 *ce_per_layer[EDAC_MAX_LAYERS], *ue_per_layer[EDAC_MAX_LAYERS];
319         unsigned int pos[EDAC_MAX_LAYERS];
320         unsigned int idx, size, tot_dimms = 1, count = 1;
321         unsigned int tot_csrows = 1, tot_channels = 1, tot_errcount = 0;
322         void *pvt, *p, *ptr = NULL;
323         int i, j, row, chn, n, len;
324         bool per_rank = false;
325
326         if (WARN_ON(n_layers > EDAC_MAX_LAYERS || n_layers == 0))
327                 return NULL;
328
329         /*
330          * Calculate the total amount of dimms and csrows/cschannels while
331          * in the old API emulation mode
332          */
333         for (idx = 0; idx < n_layers; idx++) {
334                 tot_dimms *= layers[idx].size;
335
336                 if (layers[idx].is_virt_csrow)
337                         tot_csrows *= layers[idx].size;
338                 else
339                         tot_channels *= layers[idx].size;
340
341                 if (layers[idx].type == EDAC_MC_LAYER_CHIP_SELECT)
342                         per_rank = true;
343         }
344
345         /* Figure out the offsets of the various items from the start of an mc
346          * structure.  We want the alignment of each item to be at least as
347          * stringent as what the compiler would provide if we could simply
348          * hardcode everything into a single struct.
349          */
350         mci = edac_align_ptr(&ptr, sizeof(*mci), 1);
351         layer = edac_align_ptr(&ptr, sizeof(*layer), n_layers);
352         for (i = 0; i < n_layers; i++) {
353                 count *= layers[i].size;
354                 edac_dbg(4, "errcount layer %d size %d\n", i, count);
355                 ce_per_layer[i] = edac_align_ptr(&ptr, sizeof(u32), count);
356                 ue_per_layer[i] = edac_align_ptr(&ptr, sizeof(u32), count);
357                 tot_errcount += 2 * count;
358         }
359
360         edac_dbg(4, "allocating %d error counters\n", tot_errcount);
361         pvt = edac_align_ptr(&ptr, sz_pvt, 1);
362         size = ((unsigned long)pvt) + sz_pvt;
363
364         edac_dbg(1, "allocating %u bytes for mci data (%d %s, %d csrows/channels)\n",
365                  size,
366                  tot_dimms,
367                  per_rank ? "ranks" : "dimms",
368                  tot_csrows * tot_channels);
369
370         mci = kzalloc(size, GFP_KERNEL);
371         if (mci == NULL)
372                 return NULL;
373
374         /* Adjust pointers so they point within the memory we just allocated
375          * rather than an imaginary chunk of memory located at address 0.
376          */
377         layer = (struct edac_mc_layer *)(((char *)mci) + ((unsigned long)layer));
378         for (i = 0; i < n_layers; i++) {
379                 mci->ce_per_layer[i] = (u32 *)((char *)mci + ((unsigned long)ce_per_layer[i]));
380                 mci->ue_per_layer[i] = (u32 *)((char *)mci + ((unsigned long)ue_per_layer[i]));
381         }
382         pvt = sz_pvt ? (((char *)mci) + ((unsigned long)pvt)) : NULL;
383
384         /* setup index and various internal pointers */
385         mci->mc_idx = mc_num;
386         mci->tot_dimms = tot_dimms;
387         mci->pvt_info = pvt;
388         mci->n_layers = n_layers;
389         mci->layers = layer;
390         memcpy(mci->layers, layers, sizeof(*layer) * n_layers);
391         mci->nr_csrows = tot_csrows;
392         mci->num_cschannel = tot_channels;
393         mci->csbased = per_rank;
394
395         /*
396          * Alocate and fill the csrow/channels structs
397          */
398         mci->csrows = kcalloc(tot_csrows, sizeof(*mci->csrows), GFP_KERNEL);
399         if (!mci->csrows)
400                 goto error;
401         for (row = 0; row < tot_csrows; row++) {
402                 csr = kzalloc(sizeof(**mci->csrows), GFP_KERNEL);
403                 if (!csr)
404                         goto error;
405                 mci->csrows[row] = csr;
406                 csr->csrow_idx = row;
407                 csr->mci = mci;
408                 csr->nr_channels = tot_channels;
409                 csr->channels = kcalloc(tot_channels, sizeof(*csr->channels),
410                                         GFP_KERNEL);
411                 if (!csr->channels)
412                         goto error;
413
414                 for (chn = 0; chn < tot_channels; chn++) {
415                         chan = kzalloc(sizeof(**csr->channels), GFP_KERNEL);
416                         if (!chan)
417                                 goto error;
418                         csr->channels[chn] = chan;
419                         chan->chan_idx = chn;
420                         chan->csrow = csr;
421                 }
422         }
423
424         /*
425          * Allocate and fill the dimm structs
426          */
427         mci->dimms  = kcalloc(tot_dimms, sizeof(*mci->dimms), GFP_KERNEL);
428         if (!mci->dimms)
429                 goto error;
430
431         memset(&pos, 0, sizeof(pos));
432         row = 0;
433         chn = 0;
434         for (idx = 0; idx < tot_dimms; idx++) {
435                 chan = mci->csrows[row]->channels[chn];
436
437                 dimm = kzalloc(sizeof(**mci->dimms), GFP_KERNEL);
438                 if (!dimm)
439                         goto error;
440                 mci->dimms[idx] = dimm;
441                 dimm->mci = mci;
442                 dimm->idx = idx;
443
444                 /*
445                  * Copy DIMM location and initialize it.
446                  */
447                 len = sizeof(dimm->label);
448                 p = dimm->label;
449                 n = snprintf(p, len, "mc#%u", mc_num);
450                 p += n;
451                 len -= n;
452                 for (j = 0; j < n_layers; j++) {
453                         n = snprintf(p, len, "%s#%u",
454                                      edac_layer_name[layers[j].type],
455                                      pos[j]);
456                         p += n;
457                         len -= n;
458                         dimm->location[j] = pos[j];
459
460                         if (len <= 0)
461                                 break;
462                 }
463
464                 /* Link it to the csrows old API data */
465                 chan->dimm = dimm;
466                 dimm->csrow = row;
467                 dimm->cschannel = chn;
468
469                 /* Increment csrow location */
470                 if (layers[0].is_virt_csrow) {
471                         chn++;
472                         if (chn == tot_channels) {
473                                 chn = 0;
474                                 row++;
475                         }
476                 } else {
477                         row++;
478                         if (row == tot_csrows) {
479                                 row = 0;
480                                 chn++;
481                         }
482                 }
483
484                 /* Increment dimm location */
485                 for (j = n_layers - 1; j >= 0; j--) {
486                         pos[j]++;
487                         if (pos[j] < layers[j].size)
488                                 break;
489                         pos[j] = 0;
490                 }
491         }
492
493         mci->op_state = OP_ALLOC;
494
495         return mci;
496
497 error:
498         _edac_mc_free(mci);
499
500         return NULL;
501 }
502 EXPORT_SYMBOL_GPL(edac_mc_alloc);
503
504 void edac_mc_free(struct mem_ctl_info *mci)
505 {
506         edac_dbg(1, "\n");
507
508         /* If we're not yet registered with sysfs free only what was allocated
509          * in edac_mc_alloc().
510          */
511         if (!device_is_registered(&mci->dev)) {
512                 _edac_mc_free(mci);
513                 return;
514         }
515
516         /* the mci instance is freed here, when the sysfs object is dropped */
517         edac_unregister_sysfs(mci);
518 }
519 EXPORT_SYMBOL_GPL(edac_mc_free);
520
521 bool edac_has_mcs(void)
522 {
523         bool ret;
524
525         mutex_lock(&mem_ctls_mutex);
526
527         ret = list_empty(&mc_devices);
528
529         mutex_unlock(&mem_ctls_mutex);
530
531         return !ret;
532 }
533 EXPORT_SYMBOL_GPL(edac_has_mcs);
534
535 /* Caller must hold mem_ctls_mutex */
536 static struct mem_ctl_info *__find_mci_by_dev(struct device *dev)
537 {
538         struct mem_ctl_info *mci;
539         struct list_head *item;
540
541         edac_dbg(3, "\n");
542
543         list_for_each(item, &mc_devices) {
544                 mci = list_entry(item, struct mem_ctl_info, link);
545
546                 if (mci->pdev == dev)
547                         return mci;
548         }
549
550         return NULL;
551 }
552
553 /**
554  * find_mci_by_dev
555  *
556  *      scan list of controllers looking for the one that manages
557  *      the 'dev' device
558  * @dev: pointer to a struct device related with the MCI
559  */
560 struct mem_ctl_info *find_mci_by_dev(struct device *dev)
561 {
562         struct mem_ctl_info *ret;
563
564         mutex_lock(&mem_ctls_mutex);
565         ret = __find_mci_by_dev(dev);
566         mutex_unlock(&mem_ctls_mutex);
567
568         return ret;
569 }
570 EXPORT_SYMBOL_GPL(find_mci_by_dev);
571
572 /*
573  * edac_mc_workq_function
574  *      performs the operation scheduled by a workq request
575  */
576 static void edac_mc_workq_function(struct work_struct *work_req)
577 {
578         struct delayed_work *d_work = to_delayed_work(work_req);
579         struct mem_ctl_info *mci = to_edac_mem_ctl_work(d_work);
580
581         mutex_lock(&mem_ctls_mutex);
582
583         if (mci->op_state != OP_RUNNING_POLL) {
584                 mutex_unlock(&mem_ctls_mutex);
585                 return;
586         }
587
588         if (edac_op_state == EDAC_OPSTATE_POLL)
589                 mci->edac_check(mci);
590
591         mutex_unlock(&mem_ctls_mutex);
592
593         /* Queue ourselves again. */
594         edac_queue_work(&mci->work, msecs_to_jiffies(edac_mc_get_poll_msec()));
595 }
596
597 /*
598  * edac_mc_reset_delay_period(unsigned long value)
599  *
600  *      user space has updated our poll period value, need to
601  *      reset our workq delays
602  */
603 void edac_mc_reset_delay_period(unsigned long value)
604 {
605         struct mem_ctl_info *mci;
606         struct list_head *item;
607
608         mutex_lock(&mem_ctls_mutex);
609
610         list_for_each(item, &mc_devices) {
611                 mci = list_entry(item, struct mem_ctl_info, link);
612
613                 if (mci->op_state == OP_RUNNING_POLL)
614                         edac_mod_work(&mci->work, value);
615         }
616         mutex_unlock(&mem_ctls_mutex);
617 }
618
619
620
621 /* Return 0 on success, 1 on failure.
622  * Before calling this function, caller must
623  * assign a unique value to mci->mc_idx.
624  *
625  *      locking model:
626  *
627  *              called with the mem_ctls_mutex lock held
628  */
629 static int add_mc_to_global_list(struct mem_ctl_info *mci)
630 {
631         struct list_head *item, *insert_before;
632         struct mem_ctl_info *p;
633
634         insert_before = &mc_devices;
635
636         p = __find_mci_by_dev(mci->pdev);
637         if (unlikely(p != NULL))
638                 goto fail0;
639
640         list_for_each(item, &mc_devices) {
641                 p = list_entry(item, struct mem_ctl_info, link);
642
643                 if (p->mc_idx >= mci->mc_idx) {
644                         if (unlikely(p->mc_idx == mci->mc_idx))
645                                 goto fail1;
646
647                         insert_before = item;
648                         break;
649                 }
650         }
651
652         list_add_tail_rcu(&mci->link, insert_before);
653         return 0;
654
655 fail0:
656         edac_printk(KERN_WARNING, EDAC_MC,
657                 "%s (%s) %s %s already assigned %d\n", dev_name(p->pdev),
658                 edac_dev_name(mci), p->mod_name, p->ctl_name, p->mc_idx);
659         return 1;
660
661 fail1:
662         edac_printk(KERN_WARNING, EDAC_MC,
663                 "bug in low-level driver: attempt to assign\n"
664                 "    duplicate mc_idx %d in %s()\n", p->mc_idx, __func__);
665         return 1;
666 }
667
668 static int del_mc_from_global_list(struct mem_ctl_info *mci)
669 {
670         list_del_rcu(&mci->link);
671
672         /* these are for safe removal of devices from global list while
673          * NMI handlers may be traversing list
674          */
675         synchronize_rcu();
676         INIT_LIST_HEAD(&mci->link);
677
678         return list_empty(&mc_devices);
679 }
680
681 struct mem_ctl_info *edac_mc_find(int idx)
682 {
683         struct mem_ctl_info *mci;
684         struct list_head *item;
685
686         mutex_lock(&mem_ctls_mutex);
687
688         list_for_each(item, &mc_devices) {
689                 mci = list_entry(item, struct mem_ctl_info, link);
690                 if (mci->mc_idx == idx)
691                         goto unlock;
692         }
693
694         mci = NULL;
695 unlock:
696         mutex_unlock(&mem_ctls_mutex);
697         return mci;
698 }
699 EXPORT_SYMBOL(edac_mc_find);
700
701 const char *edac_get_owner(void)
702 {
703         return edac_mc_owner;
704 }
705 EXPORT_SYMBOL_GPL(edac_get_owner);
706
707 /* FIXME - should a warning be printed if no error detection? correction? */
708 int edac_mc_add_mc_with_groups(struct mem_ctl_info *mci,
709                                const struct attribute_group **groups)
710 {
711         int ret = -EINVAL;
712         edac_dbg(0, "\n");
713
714 #ifdef CONFIG_EDAC_DEBUG
715         if (edac_debug_level >= 3)
716                 edac_mc_dump_mci(mci);
717
718         if (edac_debug_level >= 4) {
719                 struct dimm_info *dimm;
720                 int i;
721
722                 for (i = 0; i < mci->nr_csrows; i++) {
723                         struct csrow_info *csrow = mci->csrows[i];
724                         u32 nr_pages = 0;
725                         int j;
726
727                         for (j = 0; j < csrow->nr_channels; j++)
728                                 nr_pages += csrow->channels[j]->dimm->nr_pages;
729                         if (!nr_pages)
730                                 continue;
731                         edac_mc_dump_csrow(csrow);
732                         for (j = 0; j < csrow->nr_channels; j++)
733                                 if (csrow->channels[j]->dimm->nr_pages)
734                                         edac_mc_dump_channel(csrow->channels[j]);
735                 }
736
737                 mci_for_each_dimm(mci, dimm)
738                         edac_mc_dump_dimm(dimm);
739         }
740 #endif
741         mutex_lock(&mem_ctls_mutex);
742
743         if (edac_mc_owner && edac_mc_owner != mci->mod_name) {
744                 ret = -EPERM;
745                 goto fail0;
746         }
747
748         if (add_mc_to_global_list(mci))
749                 goto fail0;
750
751         /* set load time so that error rate can be tracked */
752         mci->start_time = jiffies;
753
754         mci->bus = edac_get_sysfs_subsys();
755
756         if (edac_create_sysfs_mci_device(mci, groups)) {
757                 edac_mc_printk(mci, KERN_WARNING,
758                         "failed to create sysfs device\n");
759                 goto fail1;
760         }
761
762         if (mci->edac_check) {
763                 mci->op_state = OP_RUNNING_POLL;
764
765                 INIT_DELAYED_WORK(&mci->work, edac_mc_workq_function);
766                 edac_queue_work(&mci->work, msecs_to_jiffies(edac_mc_get_poll_msec()));
767
768         } else {
769                 mci->op_state = OP_RUNNING_INTERRUPT;
770         }
771
772         /* Report action taken */
773         edac_mc_printk(mci, KERN_INFO,
774                 "Giving out device to module %s controller %s: DEV %s (%s)\n",
775                 mci->mod_name, mci->ctl_name, mci->dev_name,
776                 edac_op_state_to_string(mci->op_state));
777
778         edac_mc_owner = mci->mod_name;
779
780         mutex_unlock(&mem_ctls_mutex);
781         return 0;
782
783 fail1:
784         del_mc_from_global_list(mci);
785
786 fail0:
787         mutex_unlock(&mem_ctls_mutex);
788         return ret;
789 }
790 EXPORT_SYMBOL_GPL(edac_mc_add_mc_with_groups);
791
792 struct mem_ctl_info *edac_mc_del_mc(struct device *dev)
793 {
794         struct mem_ctl_info *mci;
795
796         edac_dbg(0, "\n");
797
798         mutex_lock(&mem_ctls_mutex);
799
800         /* find the requested mci struct in the global list */
801         mci = __find_mci_by_dev(dev);
802         if (mci == NULL) {
803                 mutex_unlock(&mem_ctls_mutex);
804                 return NULL;
805         }
806
807         /* mark MCI offline: */
808         mci->op_state = OP_OFFLINE;
809
810         if (del_mc_from_global_list(mci))
811                 edac_mc_owner = NULL;
812
813         mutex_unlock(&mem_ctls_mutex);
814
815         if (mci->edac_check)
816                 edac_stop_work(&mci->work);
817
818         /* remove from sysfs */
819         edac_remove_sysfs_mci_device(mci);
820
821         edac_printk(KERN_INFO, EDAC_MC,
822                 "Removed device %d for %s %s: DEV %s\n", mci->mc_idx,
823                 mci->mod_name, mci->ctl_name, edac_dev_name(mci));
824
825         return mci;
826 }
827 EXPORT_SYMBOL_GPL(edac_mc_del_mc);
828
829 static void edac_mc_scrub_block(unsigned long page, unsigned long offset,
830                                 u32 size)
831 {
832         struct page *pg;
833         void *virt_addr;
834         unsigned long flags = 0;
835
836         edac_dbg(3, "\n");
837
838         /* ECC error page was not in our memory. Ignore it. */
839         if (!pfn_valid(page))
840                 return;
841
842         /* Find the actual page structure then map it and fix */
843         pg = pfn_to_page(page);
844
845         if (PageHighMem(pg))
846                 local_irq_save(flags);
847
848         virt_addr = kmap_atomic(pg);
849
850         /* Perform architecture specific atomic scrub operation */
851         edac_atomic_scrub(virt_addr + offset, size);
852
853         /* Unmap and complete */
854         kunmap_atomic(virt_addr);
855
856         if (PageHighMem(pg))
857                 local_irq_restore(flags);
858 }
859
860 /* FIXME - should return -1 */
861 int edac_mc_find_csrow_by_page(struct mem_ctl_info *mci, unsigned long page)
862 {
863         struct csrow_info **csrows = mci->csrows;
864         int row, i, j, n;
865
866         edac_dbg(1, "MC%d: 0x%lx\n", mci->mc_idx, page);
867         row = -1;
868
869         for (i = 0; i < mci->nr_csrows; i++) {
870                 struct csrow_info *csrow = csrows[i];
871                 n = 0;
872                 for (j = 0; j < csrow->nr_channels; j++) {
873                         struct dimm_info *dimm = csrow->channels[j]->dimm;
874                         n += dimm->nr_pages;
875                 }
876                 if (n == 0)
877                         continue;
878
879                 edac_dbg(3, "MC%d: first(0x%lx) page(0x%lx) last(0x%lx) mask(0x%lx)\n",
880                          mci->mc_idx,
881                          csrow->first_page, page, csrow->last_page,
882                          csrow->page_mask);
883
884                 if ((page >= csrow->first_page) &&
885                     (page <= csrow->last_page) &&
886                     ((page & csrow->page_mask) ==
887                      (csrow->first_page & csrow->page_mask))) {
888                         row = i;
889                         break;
890                 }
891         }
892
893         if (row == -1)
894                 edac_mc_printk(mci, KERN_ERR,
895                         "could not look up page error address %lx\n",
896                         (unsigned long)page);
897
898         return row;
899 }
900 EXPORT_SYMBOL_GPL(edac_mc_find_csrow_by_page);
901
902 const char *edac_layer_name[] = {
903         [EDAC_MC_LAYER_BRANCH] = "branch",
904         [EDAC_MC_LAYER_CHANNEL] = "channel",
905         [EDAC_MC_LAYER_SLOT] = "slot",
906         [EDAC_MC_LAYER_CHIP_SELECT] = "csrow",
907         [EDAC_MC_LAYER_ALL_MEM] = "memory",
908 };
909 EXPORT_SYMBOL_GPL(edac_layer_name);
910
911 static void edac_inc_ce_error(struct mem_ctl_info *mci,
912                               bool enable_per_layer_report,
913                               const int pos[EDAC_MAX_LAYERS],
914                               const u16 count)
915 {
916         int i, index = 0;
917
918         mci->ce_mc += count;
919
920         if (!enable_per_layer_report) {
921                 mci->ce_noinfo_count += count;
922                 return;
923         }
924
925         for (i = 0; i < mci->n_layers; i++) {
926                 if (pos[i] < 0)
927                         break;
928                 index += pos[i];
929                 mci->ce_per_layer[i][index] += count;
930
931                 if (i < mci->n_layers - 1)
932                         index *= mci->layers[i + 1].size;
933         }
934 }
935
936 static void edac_inc_ue_error(struct mem_ctl_info *mci,
937                                     bool enable_per_layer_report,
938                                     const int pos[EDAC_MAX_LAYERS],
939                                     const u16 count)
940 {
941         int i, index = 0;
942
943         mci->ue_mc += count;
944
945         if (!enable_per_layer_report) {
946                 mci->ue_noinfo_count += count;
947                 return;
948         }
949
950         for (i = 0; i < mci->n_layers; i++) {
951                 if (pos[i] < 0)
952                         break;
953                 index += pos[i];
954                 mci->ue_per_layer[i][index] += count;
955
956                 if (i < mci->n_layers - 1)
957                         index *= mci->layers[i + 1].size;
958         }
959 }
960
961 static void edac_ce_error(struct mem_ctl_info *mci,
962                           const u16 error_count,
963                           const int pos[EDAC_MAX_LAYERS],
964                           const char *msg,
965                           const char *location,
966                           const char *label,
967                           const char *detail,
968                           const char *other_detail,
969                           const bool enable_per_layer_report,
970                           const unsigned long page_frame_number,
971                           const unsigned long offset_in_page,
972                           long grain)
973 {
974         unsigned long remapped_page;
975         char *msg_aux = "";
976
977         if (*msg)
978                 msg_aux = " ";
979
980         if (edac_mc_get_log_ce()) {
981                 if (other_detail && *other_detail)
982                         edac_mc_printk(mci, KERN_WARNING,
983                                        "%d CE %s%son %s (%s %s - %s)\n",
984                                        error_count, msg, msg_aux, label,
985                                        location, detail, other_detail);
986                 else
987                         edac_mc_printk(mci, KERN_WARNING,
988                                        "%d CE %s%son %s (%s %s)\n",
989                                        error_count, msg, msg_aux, label,
990                                        location, detail);
991         }
992         edac_inc_ce_error(mci, enable_per_layer_report, pos, error_count);
993
994         if (mci->scrub_mode == SCRUB_SW_SRC) {
995                 /*
996                         * Some memory controllers (called MCs below) can remap
997                         * memory so that it is still available at a different
998                         * address when PCI devices map into memory.
999                         * MC's that can't do this, lose the memory where PCI
1000                         * devices are mapped. This mapping is MC-dependent
1001                         * and so we call back into the MC driver for it to
1002                         * map the MC page to a physical (CPU) page which can
1003                         * then be mapped to a virtual page - which can then
1004                         * be scrubbed.
1005                         */
1006                 remapped_page = mci->ctl_page_to_phys ?
1007                         mci->ctl_page_to_phys(mci, page_frame_number) :
1008                         page_frame_number;
1009
1010                 edac_mc_scrub_block(remapped_page,
1011                                         offset_in_page, grain);
1012         }
1013 }
1014
1015 static void edac_ue_error(struct mem_ctl_info *mci,
1016                           const u16 error_count,
1017                           const int pos[EDAC_MAX_LAYERS],
1018                           const char *msg,
1019                           const char *location,
1020                           const char *label,
1021                           const char *detail,
1022                           const char *other_detail,
1023                           const bool enable_per_layer_report)
1024 {
1025         char *msg_aux = "";
1026
1027         if (*msg)
1028                 msg_aux = " ";
1029
1030         if (edac_mc_get_log_ue()) {
1031                 if (other_detail && *other_detail)
1032                         edac_mc_printk(mci, KERN_WARNING,
1033                                        "%d UE %s%son %s (%s %s - %s)\n",
1034                                        error_count, msg, msg_aux, label,
1035                                        location, detail, other_detail);
1036                 else
1037                         edac_mc_printk(mci, KERN_WARNING,
1038                                        "%d UE %s%son %s (%s %s)\n",
1039                                        error_count, msg, msg_aux, label,
1040                                        location, detail);
1041         }
1042
1043         if (edac_mc_get_panic_on_ue()) {
1044                 if (other_detail && *other_detail)
1045                         panic("UE %s%son %s (%s%s - %s)\n",
1046                               msg, msg_aux, label, location, detail, other_detail);
1047                 else
1048                         panic("UE %s%son %s (%s%s)\n",
1049                               msg, msg_aux, label, location, detail);
1050         }
1051
1052         edac_inc_ue_error(mci, enable_per_layer_report, pos, error_count);
1053 }
1054
1055 void edac_raw_mc_handle_error(const enum hw_event_mc_err_type type,
1056                               struct mem_ctl_info *mci,
1057                               struct edac_raw_error_desc *e)
1058 {
1059         char detail[80];
1060         int pos[EDAC_MAX_LAYERS] = { e->top_layer, e->mid_layer, e->low_layer };
1061         u8 grain_bits;
1062
1063         /* Sanity-check driver-supplied grain value. */
1064         if (WARN_ON_ONCE(!e->grain))
1065                 e->grain = 1;
1066
1067         grain_bits = fls_long(e->grain - 1);
1068
1069         /* Report the error via the trace interface */
1070         if (IS_ENABLED(CONFIG_RAS))
1071                 trace_mc_event(type, e->msg, e->label, e->error_count,
1072                                mci->mc_idx, e->top_layer, e->mid_layer,
1073                                e->low_layer,
1074                                (e->page_frame_number << PAGE_SHIFT) | e->offset_in_page,
1075                                grain_bits, e->syndrome, e->other_detail);
1076
1077         /* Memory type dependent details about the error */
1078         if (type == HW_EVENT_ERR_CORRECTED) {
1079                 snprintf(detail, sizeof(detail),
1080                         "page:0x%lx offset:0x%lx grain:%ld syndrome:0x%lx",
1081                         e->page_frame_number, e->offset_in_page,
1082                         e->grain, e->syndrome);
1083                 edac_ce_error(mci, e->error_count, pos, e->msg, e->location, e->label,
1084                               detail, e->other_detail, e->enable_per_layer_report,
1085                               e->page_frame_number, e->offset_in_page, e->grain);
1086         } else {
1087                 snprintf(detail, sizeof(detail),
1088                         "page:0x%lx offset:0x%lx grain:%ld",
1089                         e->page_frame_number, e->offset_in_page, e->grain);
1090
1091                 edac_ue_error(mci, e->error_count, pos, e->msg, e->location, e->label,
1092                               detail, e->other_detail, e->enable_per_layer_report);
1093         }
1094
1095
1096 }
1097 EXPORT_SYMBOL_GPL(edac_raw_mc_handle_error);
1098
1099 void edac_mc_handle_error(const enum hw_event_mc_err_type type,
1100                           struct mem_ctl_info *mci,
1101                           const u16 error_count,
1102                           const unsigned long page_frame_number,
1103                           const unsigned long offset_in_page,
1104                           const unsigned long syndrome,
1105                           const int top_layer,
1106                           const int mid_layer,
1107                           const int low_layer,
1108                           const char *msg,
1109                           const char *other_detail)
1110 {
1111         struct dimm_info *dimm;
1112         char *p;
1113         int row = -1, chan = -1;
1114         int pos[EDAC_MAX_LAYERS] = { top_layer, mid_layer, low_layer };
1115         int i, n_labels = 0;
1116         struct edac_raw_error_desc *e = &mci->error_desc;
1117
1118         edac_dbg(3, "MC%d\n", mci->mc_idx);
1119
1120         /* Fills the error report buffer */
1121         memset(e, 0, sizeof (*e));
1122         e->error_count = error_count;
1123         e->top_layer = top_layer;
1124         e->mid_layer = mid_layer;
1125         e->low_layer = low_layer;
1126         e->page_frame_number = page_frame_number;
1127         e->offset_in_page = offset_in_page;
1128         e->syndrome = syndrome;
1129         e->msg = msg;
1130         e->other_detail = other_detail;
1131
1132         /*
1133          * Check if the event report is consistent and if the memory
1134          * location is known. If it is known, enable_per_layer_report will be
1135          * true, the DIMM(s) label info will be filled and the per-layer
1136          * error counters will be incremented.
1137          */
1138         for (i = 0; i < mci->n_layers; i++) {
1139                 if (pos[i] >= (int)mci->layers[i].size) {
1140
1141                         edac_mc_printk(mci, KERN_ERR,
1142                                        "INTERNAL ERROR: %s value is out of range (%d >= %d)\n",
1143                                        edac_layer_name[mci->layers[i].type],
1144                                        pos[i], mci->layers[i].size);
1145                         /*
1146                          * Instead of just returning it, let's use what's
1147                          * known about the error. The increment routines and
1148                          * the DIMM filter logic will do the right thing by
1149                          * pointing the likely damaged DIMMs.
1150                          */
1151                         pos[i] = -1;
1152                 }
1153                 if (pos[i] >= 0)
1154                         e->enable_per_layer_report = true;
1155         }
1156
1157         /*
1158          * Get the dimm label/grain that applies to the match criteria.
1159          * As the error algorithm may not be able to point to just one memory
1160          * stick, the logic here will get all possible labels that could
1161          * pottentially be affected by the error.
1162          * On FB-DIMM memory controllers, for uncorrected errors, it is common
1163          * to have only the MC channel and the MC dimm (also called "branch")
1164          * but the channel is not known, as the memory is arranged in pairs,
1165          * where each memory belongs to a separate channel within the same
1166          * branch.
1167          */
1168         p = e->label;
1169         *p = '\0';
1170
1171         mci_for_each_dimm(mci, dimm) {
1172                 if (top_layer >= 0 && top_layer != dimm->location[0])
1173                         continue;
1174                 if (mid_layer >= 0 && mid_layer != dimm->location[1])
1175                         continue;
1176                 if (low_layer >= 0 && low_layer != dimm->location[2])
1177                         continue;
1178
1179                 /* get the max grain, over the error match range */
1180                 if (dimm->grain > e->grain)
1181                         e->grain = dimm->grain;
1182
1183                 /*
1184                  * If the error is memory-controller wide, there's no need to
1185                  * seek for the affected DIMMs because the whole
1186                  * channel/memory controller/...  may be affected.
1187                  * Also, don't show errors for empty DIMM slots.
1188                  */
1189                 if (!e->enable_per_layer_report || !dimm->nr_pages)
1190                         continue;
1191
1192                 if (n_labels >= EDAC_MAX_LABELS) {
1193                         e->enable_per_layer_report = false;
1194                         break;
1195                 }
1196                 n_labels++;
1197                 if (p != e->label) {
1198                         strcpy(p, OTHER_LABEL);
1199                         p += strlen(OTHER_LABEL);
1200                 }
1201                 strcpy(p, dimm->label);
1202                 p += strlen(p);
1203
1204                 /*
1205                  * get csrow/channel of the DIMM, in order to allow
1206                  * incrementing the compat API counters
1207                  */
1208                 edac_dbg(4, "%s csrows map: (%d,%d)\n",
1209                         mci->csbased ? "rank" : "dimm",
1210                         dimm->csrow, dimm->cschannel);
1211                 if (row == -1)
1212                         row = dimm->csrow;
1213                 else if (row >= 0 && row != dimm->csrow)
1214                         row = -2;
1215
1216                 if (chan == -1)
1217                         chan = dimm->cschannel;
1218                 else if (chan >= 0 && chan != dimm->cschannel)
1219                         chan = -2;
1220         }
1221
1222         if (!e->enable_per_layer_report) {
1223                 strcpy(e->label, "any memory");
1224         } else {
1225                 edac_dbg(4, "csrow/channel to increment: (%d,%d)\n", row, chan);
1226                 if (p == e->label)
1227                         strcpy(e->label, "unknown memory");
1228                 if (type == HW_EVENT_ERR_CORRECTED) {
1229                         if (row >= 0) {
1230                                 mci->csrows[row]->ce_count += error_count;
1231                                 if (chan >= 0)
1232                                         mci->csrows[row]->channels[chan]->ce_count += error_count;
1233                         }
1234                 } else
1235                         if (row >= 0)
1236                                 mci->csrows[row]->ue_count += error_count;
1237         }
1238
1239         /* Fill the RAM location data */
1240         p = e->location;
1241
1242         for (i = 0; i < mci->n_layers; i++) {
1243                 if (pos[i] < 0)
1244                         continue;
1245
1246                 p += sprintf(p, "%s:%d ",
1247                              edac_layer_name[mci->layers[i].type],
1248                              pos[i]);
1249         }
1250         if (p > e->location)
1251                 *(p - 1) = '\0';
1252
1253         edac_raw_mc_handle_error(type, mci, e);
1254 }
1255 EXPORT_SYMBOL_GPL(edac_mc_handle_error);