1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Machine check exception handling.
5 * Copyright 2013 IBM Corporation
6 * Author: Mahesh Salgaonkar <mahesh@linux.vnet.ibm.com>
10 #define pr_fmt(fmt) "mce: " fmt
12 #include <linux/hardirq.h>
13 #include <linux/types.h>
14 #include <linux/ptrace.h>
15 #include <linux/percpu.h>
16 #include <linux/export.h>
17 #include <linux/irq_work.h>
18 #include <linux/extable.h>
19 #include <linux/ftrace.h>
21 #include <asm/machdep.h>
25 static DEFINE_PER_CPU(int, mce_nest_count);
26 static DEFINE_PER_CPU(struct machine_check_event[MAX_MC_EVT], mce_event);
28 /* Queue for delayed MCE events. */
29 static DEFINE_PER_CPU(int, mce_queue_count);
30 static DEFINE_PER_CPU(struct machine_check_event[MAX_MC_EVT], mce_event_queue);
32 /* Queue for delayed MCE UE events. */
33 static DEFINE_PER_CPU(int, mce_ue_count);
34 static DEFINE_PER_CPU(struct machine_check_event[MAX_MC_EVT],
37 static void machine_check_process_queued_event(struct irq_work *work);
38 static void machine_check_ue_irq_work(struct irq_work *work);
39 static void machine_check_ue_event(struct machine_check_event *evt);
40 static void machine_process_ue_event(struct work_struct *work);
42 static struct irq_work mce_event_process_work = {
43 .func = machine_check_process_queued_event,
46 static struct irq_work mce_ue_event_irq_work = {
47 .func = machine_check_ue_irq_work,
50 DECLARE_WORK(mce_ue_event_work, machine_process_ue_event);
52 static void mce_set_error_info(struct machine_check_event *mce,
53 struct mce_error_info *mce_err)
55 mce->error_type = mce_err->error_type;
56 switch (mce_err->error_type) {
57 case MCE_ERROR_TYPE_UE:
58 mce->u.ue_error.ue_error_type = mce_err->u.ue_error_type;
60 case MCE_ERROR_TYPE_SLB:
61 mce->u.slb_error.slb_error_type = mce_err->u.slb_error_type;
63 case MCE_ERROR_TYPE_ERAT:
64 mce->u.erat_error.erat_error_type = mce_err->u.erat_error_type;
66 case MCE_ERROR_TYPE_TLB:
67 mce->u.tlb_error.tlb_error_type = mce_err->u.tlb_error_type;
69 case MCE_ERROR_TYPE_USER:
70 mce->u.user_error.user_error_type = mce_err->u.user_error_type;
72 case MCE_ERROR_TYPE_RA:
73 mce->u.ra_error.ra_error_type = mce_err->u.ra_error_type;
75 case MCE_ERROR_TYPE_LINK:
76 mce->u.link_error.link_error_type = mce_err->u.link_error_type;
78 case MCE_ERROR_TYPE_UNKNOWN:
85 * Decode and save high level MCE information into per cpu buffer which
86 * is an array of machine_check_event structure.
88 void save_mce_event(struct pt_regs *regs, long handled,
89 struct mce_error_info *mce_err,
90 uint64_t nip, uint64_t addr, uint64_t phys_addr)
92 int index = __this_cpu_inc_return(mce_nest_count) - 1;
93 struct machine_check_event *mce = this_cpu_ptr(&mce_event[index]);
96 * Return if we don't have enough space to log mce event.
97 * mce_nest_count may go beyond MAX_MC_EVT but that's ok,
98 * the check below will stop buffer overrun.
100 if (index >= MAX_MC_EVT)
103 /* Populate generic machine check info */
104 mce->version = MCE_V1;
106 mce->srr1 = regs->msr;
107 mce->gpr3 = regs->gpr[3];
109 mce->cpu = get_paca()->paca_index;
111 /* Mark it recovered if we have handled it and MSR(RI=1). */
112 if (handled && (regs->msr & MSR_RI))
113 mce->disposition = MCE_DISPOSITION_RECOVERED;
115 mce->disposition = MCE_DISPOSITION_NOT_RECOVERED;
117 mce->initiator = mce_err->initiator;
118 mce->severity = mce_err->severity;
119 mce->sync_error = mce_err->sync_error;
120 mce->error_class = mce_err->error_class;
123 * Populate the mce error_type and type-specific error_type.
125 mce_set_error_info(mce, mce_err);
130 if (mce->error_type == MCE_ERROR_TYPE_TLB) {
131 mce->u.tlb_error.effective_address_provided = true;
132 mce->u.tlb_error.effective_address = addr;
133 } else if (mce->error_type == MCE_ERROR_TYPE_SLB) {
134 mce->u.slb_error.effective_address_provided = true;
135 mce->u.slb_error.effective_address = addr;
136 } else if (mce->error_type == MCE_ERROR_TYPE_ERAT) {
137 mce->u.erat_error.effective_address_provided = true;
138 mce->u.erat_error.effective_address = addr;
139 } else if (mce->error_type == MCE_ERROR_TYPE_USER) {
140 mce->u.user_error.effective_address_provided = true;
141 mce->u.user_error.effective_address = addr;
142 } else if (mce->error_type == MCE_ERROR_TYPE_RA) {
143 mce->u.ra_error.effective_address_provided = true;
144 mce->u.ra_error.effective_address = addr;
145 } else if (mce->error_type == MCE_ERROR_TYPE_LINK) {
146 mce->u.link_error.effective_address_provided = true;
147 mce->u.link_error.effective_address = addr;
148 } else if (mce->error_type == MCE_ERROR_TYPE_UE) {
149 mce->u.ue_error.effective_address_provided = true;
150 mce->u.ue_error.effective_address = addr;
151 if (phys_addr != ULONG_MAX) {
152 mce->u.ue_error.physical_address_provided = true;
153 mce->u.ue_error.physical_address = phys_addr;
154 mce->u.ue_error.ignore_event = mce_err->ignore_event;
155 machine_check_ue_event(mce);
163 * mce Pointer to machine_check_event structure to be filled.
164 * release Flag to indicate whether to free the event slot or not.
165 * 0 <= do not release the mce event. Caller will invoke
166 * release_mce_event() once event has been consumed.
167 * 1 <= release the slot.
172 * get_mce_event() will be called by platform specific machine check
173 * handle routine and in KVM.
174 * When we call get_mce_event(), we are still in interrupt context and
175 * preemption will not be scheduled until ret_from_expect() routine
178 int get_mce_event(struct machine_check_event *mce, bool release)
180 int index = __this_cpu_read(mce_nest_count) - 1;
181 struct machine_check_event *mc_evt;
188 /* Check if we have MCE info to process. */
189 if (index < MAX_MC_EVT) {
190 mc_evt = this_cpu_ptr(&mce_event[index]);
191 /* Copy the event structure and release the original */
198 /* Decrement the count to free the slot. */
200 __this_cpu_dec(mce_nest_count);
205 void release_mce_event(void)
207 get_mce_event(NULL, true);
210 static void machine_check_ue_irq_work(struct irq_work *work)
212 schedule_work(&mce_ue_event_work);
216 * Queue up the MCE event which then can be handled later.
218 static void machine_check_ue_event(struct machine_check_event *evt)
222 index = __this_cpu_inc_return(mce_ue_count) - 1;
223 /* If queue is full, just return for now. */
224 if (index >= MAX_MC_EVT) {
225 __this_cpu_dec(mce_ue_count);
228 memcpy(this_cpu_ptr(&mce_ue_event_queue[index]), evt, sizeof(*evt));
230 /* Queue work to process this event later. */
231 irq_work_queue(&mce_ue_event_irq_work);
235 * Queue up the MCE event which then can be handled later.
237 void machine_check_queue_event(void)
240 struct machine_check_event evt;
242 if (!get_mce_event(&evt, MCE_EVENT_RELEASE))
245 index = __this_cpu_inc_return(mce_queue_count) - 1;
246 /* If queue is full, just return for now. */
247 if (index >= MAX_MC_EVT) {
248 __this_cpu_dec(mce_queue_count);
251 memcpy(this_cpu_ptr(&mce_event_queue[index]), &evt, sizeof(evt));
253 /* Queue irq work to process this event later. */
254 irq_work_queue(&mce_event_process_work);
257 void mce_common_process_ue(struct pt_regs *regs,
258 struct mce_error_info *mce_err)
260 const struct exception_table_entry *entry;
262 entry = search_kernel_exception_table(regs->nip);
264 mce_err->ignore_event = true;
265 regs->nip = extable_fixup(entry);
270 * process pending MCE event from the mce event queue. This function will be
271 * called during syscall exit.
273 static void machine_process_ue_event(struct work_struct *work)
276 struct machine_check_event *evt;
278 while (__this_cpu_read(mce_ue_count) > 0) {
279 index = __this_cpu_read(mce_ue_count) - 1;
280 evt = this_cpu_ptr(&mce_ue_event_queue[index]);
281 #ifdef CONFIG_MEMORY_FAILURE
283 * This should probably queued elsewhere, but
286 * Don't report this machine check because the caller has a
287 * asked us to ignore the event, it has a fixup handler which
288 * will do the appropriate error handling and reporting.
290 if (evt->error_type == MCE_ERROR_TYPE_UE) {
291 if (evt->u.ue_error.ignore_event) {
292 __this_cpu_dec(mce_ue_count);
296 if (evt->u.ue_error.physical_address_provided) {
299 pfn = evt->u.ue_error.physical_address >>
301 memory_failure(pfn, 0);
303 pr_warn("Failed to identify bad address from "
304 "where the uncorrectable error (UE) "
308 __this_cpu_dec(mce_ue_count);
312 * process pending MCE event from the mce event queue. This function will be
313 * called during syscall exit.
315 static void machine_check_process_queued_event(struct irq_work *work)
318 struct machine_check_event *evt;
320 add_taint(TAINT_MACHINE_CHECK, LOCKDEP_NOW_UNRELIABLE);
323 * For now just print it to console.
324 * TODO: log this error event to FSP or nvram.
326 while (__this_cpu_read(mce_queue_count) > 0) {
327 index = __this_cpu_read(mce_queue_count) - 1;
328 evt = this_cpu_ptr(&mce_event_queue[index]);
330 if (evt->error_type == MCE_ERROR_TYPE_UE &&
331 evt->u.ue_error.ignore_event) {
332 __this_cpu_dec(mce_queue_count);
335 machine_check_print_event_info(evt, false, false);
336 __this_cpu_dec(mce_queue_count);
340 void machine_check_print_event_info(struct machine_check_event *evt,
341 bool user_mode, bool in_guest)
343 const char *level, *sevstr, *subtype, *err_type, *initiator;
344 uint64_t ea = 0, pa = 0;
348 static const char *mc_ue_types[] = {
351 "Page table walk ifetch",
353 "Page table walk Load/Store",
355 static const char *mc_slb_types[] = {
360 static const char *mc_erat_types[] = {
365 static const char *mc_tlb_types[] = {
370 static const char *mc_user_types[] = {
374 static const char *mc_ra_types[] = {
376 "Instruction fetch (bad)",
377 "Instruction fetch (foreign)",
378 "Page table walk ifetch (bad)",
379 "Page table walk ifetch (foreign)",
382 "Page table walk Load/Store (bad)",
383 "Page table walk Load/Store (foreign)",
384 "Load/Store (foreign)",
386 static const char *mc_link_types[] = {
388 "Instruction fetch (timeout)",
389 "Page table walk ifetch (timeout)",
392 "Page table walk Load/Store (timeout)",
394 static const char *mc_error_class[] = {
397 "Probable Hardware error (some chance of software cause)",
399 "Probable Software error (some chance of hardware cause)",
402 /* Print things out */
403 if (evt->version != MCE_V1) {
404 pr_err("Machine Check Exception, Unknown event version %d !\n",
408 switch (evt->severity) {
409 case MCE_SEV_NO_ERROR:
413 case MCE_SEV_WARNING:
414 level = KERN_WARNING;
428 switch(evt->initiator) {
429 case MCE_INITIATOR_CPU:
432 case MCE_INITIATOR_PCI:
435 case MCE_INITIATOR_ISA:
438 case MCE_INITIATOR_MEMORY:
439 initiator = "Memory";
441 case MCE_INITIATOR_POWERMGM:
442 initiator = "Power Management";
444 case MCE_INITIATOR_UNKNOWN:
446 initiator = "Unknown";
450 switch (evt->error_type) {
451 case MCE_ERROR_TYPE_UE:
453 subtype = evt->u.ue_error.ue_error_type <
454 ARRAY_SIZE(mc_ue_types) ?
455 mc_ue_types[evt->u.ue_error.ue_error_type]
457 if (evt->u.ue_error.effective_address_provided)
458 ea = evt->u.ue_error.effective_address;
459 if (evt->u.ue_error.physical_address_provided)
460 pa = evt->u.ue_error.physical_address;
462 case MCE_ERROR_TYPE_SLB:
464 subtype = evt->u.slb_error.slb_error_type <
465 ARRAY_SIZE(mc_slb_types) ?
466 mc_slb_types[evt->u.slb_error.slb_error_type]
468 if (evt->u.slb_error.effective_address_provided)
469 ea = evt->u.slb_error.effective_address;
471 case MCE_ERROR_TYPE_ERAT:
473 subtype = evt->u.erat_error.erat_error_type <
474 ARRAY_SIZE(mc_erat_types) ?
475 mc_erat_types[evt->u.erat_error.erat_error_type]
477 if (evt->u.erat_error.effective_address_provided)
478 ea = evt->u.erat_error.effective_address;
480 case MCE_ERROR_TYPE_TLB:
482 subtype = evt->u.tlb_error.tlb_error_type <
483 ARRAY_SIZE(mc_tlb_types) ?
484 mc_tlb_types[evt->u.tlb_error.tlb_error_type]
486 if (evt->u.tlb_error.effective_address_provided)
487 ea = evt->u.tlb_error.effective_address;
489 case MCE_ERROR_TYPE_USER:
491 subtype = evt->u.user_error.user_error_type <
492 ARRAY_SIZE(mc_user_types) ?
493 mc_user_types[evt->u.user_error.user_error_type]
495 if (evt->u.user_error.effective_address_provided)
496 ea = evt->u.user_error.effective_address;
498 case MCE_ERROR_TYPE_RA:
499 err_type = "Real address";
500 subtype = evt->u.ra_error.ra_error_type <
501 ARRAY_SIZE(mc_ra_types) ?
502 mc_ra_types[evt->u.ra_error.ra_error_type]
504 if (evt->u.ra_error.effective_address_provided)
505 ea = evt->u.ra_error.effective_address;
507 case MCE_ERROR_TYPE_LINK:
509 subtype = evt->u.link_error.link_error_type <
510 ARRAY_SIZE(mc_link_types) ?
511 mc_link_types[evt->u.link_error.link_error_type]
513 if (evt->u.link_error.effective_address_provided)
514 ea = evt->u.link_error.effective_address;
516 case MCE_ERROR_TYPE_DCACHE:
517 err_type = "D-Cache";
520 case MCE_ERROR_TYPE_ICACHE:
521 err_type = "I-Cache";
525 case MCE_ERROR_TYPE_UNKNOWN:
526 err_type = "Unknown";
531 dar_str[0] = pa_str[0] = '\0';
532 if (ea && evt->srr0 != ea) {
533 /* Load/Store address */
534 n = sprintf(dar_str, "DAR: %016llx ", ea);
536 sprintf(dar_str + n, "paddr: %016llx ", pa);
538 sprintf(pa_str, " paddr: %016llx", pa);
541 printk("%sMCE: CPU%d: machine check (%s) %s %s %s %s[%s]\n",
542 level, evt->cpu, sevstr, in_guest ? "Guest" : "Host",
543 err_type, subtype, dar_str,
544 evt->disposition == MCE_DISPOSITION_RECOVERED ?
545 "Recovered" : "Not recovered");
547 if (in_guest || user_mode) {
548 printk("%sMCE: CPU%d: PID: %d Comm: %s %sNIP: [%016llx]%s\n",
549 level, evt->cpu, current->pid, current->comm,
550 in_guest ? "Guest " : "", evt->srr0, pa_str);
552 printk("%sMCE: CPU%d: NIP: [%016llx] %pS%s\n",
553 level, evt->cpu, evt->srr0, (void *)evt->srr0, pa_str);
556 printk("%sMCE: CPU%d: Initiator %s\n", level, evt->cpu, initiator);
558 subtype = evt->error_class < ARRAY_SIZE(mc_error_class) ?
559 mc_error_class[evt->error_class] : "Unknown";
560 printk("%sMCE: CPU%d: %s\n", level, evt->cpu, subtype);
562 #ifdef CONFIG_PPC_BOOK3S_64
563 /* Display faulty slb contents for SLB errors. */
564 if (evt->error_type == MCE_ERROR_TYPE_SLB)
565 slb_dump_contents(local_paca->mce_faulty_slbs);
568 EXPORT_SYMBOL_GPL(machine_check_print_event_info);
571 * This function is called in real mode. Strictly no printk's please.
573 * regs->nip and regs->msr contains srr0 and ssr1.
575 long notrace machine_check_early(struct pt_regs *regs)
578 bool nested = in_nmi();
579 u8 ftrace_enabled = this_cpu_get_ftrace_enabled();
581 this_cpu_set_ftrace_enabled(0);
586 hv_nmi_check_nonrecoverable(regs);
589 * See if platform is capable of handling machine check.
591 if (ppc_md.machine_check_early)
592 handled = ppc_md.machine_check_early(regs);
597 this_cpu_set_ftrace_enabled(ftrace_enabled);
602 /* Possible meanings for HMER_DEBUG_TRIG bit being set on POWER9 */
605 DTRIG_VECTOR_CI, /* need to emulate vector CI load instr */
606 DTRIG_SUSPEND_ESCAPE, /* need to escape from TM suspend mode */
607 } hmer_debug_trig_function;
609 static int init_debug_trig_function(void)
612 struct device_node *cpun;
613 struct property *prop = NULL;
616 /* First look in the device tree */
618 cpun = of_get_cpu_node(smp_processor_id(), NULL);
620 of_property_for_each_string(cpun, "ibm,hmi-special-triggers",
622 if (strcmp(str, "bit17-vector-ci-load") == 0)
623 hmer_debug_trig_function = DTRIG_VECTOR_CI;
624 else if (strcmp(str, "bit17-tm-suspend-escape") == 0)
625 hmer_debug_trig_function = DTRIG_SUSPEND_ESCAPE;
631 /* If we found the property, don't look at PVR */
635 pvr = mfspr(SPRN_PVR);
636 /* Check for POWER9 Nimbus (scale-out) */
637 if ((PVR_VER(pvr) == PVR_POWER9) && (pvr & 0xe000) == 0) {
638 /* DD2.2 and later */
639 if ((pvr & 0xfff) >= 0x202)
640 hmer_debug_trig_function = DTRIG_SUSPEND_ESCAPE;
641 /* DD2.0 and DD2.1 - used for vector CI load emulation */
642 else if ((pvr & 0xfff) >= 0x200)
643 hmer_debug_trig_function = DTRIG_VECTOR_CI;
647 switch (hmer_debug_trig_function) {
648 case DTRIG_VECTOR_CI:
649 pr_debug("HMI debug trigger used for vector CI load\n");
651 case DTRIG_SUSPEND_ESCAPE:
652 pr_debug("HMI debug trigger used for TM suspend escape\n");
659 __initcall(init_debug_trig_function);
662 * Handle HMIs that occur as a result of a debug trigger.
664 * -1 means this is not a HMI cause that we know about
665 * 0 means no further handling is required
666 * 1 means further handling is required
668 long hmi_handle_debugtrig(struct pt_regs *regs)
670 unsigned long hmer = mfspr(SPRN_HMER);
673 /* HMER_DEBUG_TRIG bit is used for various workarounds on P9 */
674 if (!((hmer & HMER_DEBUG_TRIG)
675 && hmer_debug_trig_function != DTRIG_UNKNOWN))
678 hmer &= ~HMER_DEBUG_TRIG;
679 /* HMER is a write-AND register */
680 mtspr(SPRN_HMER, ~HMER_DEBUG_TRIG);
682 switch (hmer_debug_trig_function) {
683 case DTRIG_VECTOR_CI:
685 * Now to avoid problems with soft-disable we
686 * only do the emulation if we are coming from
689 if (regs && user_mode(regs))
690 ret = local_paca->hmi_p9_special_emu = 1;
699 * See if any other HMI causes remain to be handled
701 if (hmer & mfspr(SPRN_HMEER))
710 long hmi_exception_realmode(struct pt_regs *regs)
714 __this_cpu_inc(irq_stat.hmi_exceptions);
716 ret = hmi_handle_debugtrig(regs);
720 wait_for_subcore_guest_exit();
722 if (ppc_md.hmi_exception_early)
723 ppc_md.hmi_exception_early(regs);
725 wait_for_tb_resync();