1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Machine check exception handling.
5 * Copyright 2013 IBM Corporation
6 * Author: Mahesh Salgaonkar <mahesh@linux.vnet.ibm.com>
10 #define pr_fmt(fmt) "mce: " fmt
12 #include <linux/hardirq.h>
13 #include <linux/types.h>
14 #include <linux/ptrace.h>
15 #include <linux/percpu.h>
16 #include <linux/export.h>
17 #include <linux/irq_work.h>
18 #include <linux/extable.h>
19 #include <linux/ftrace.h>
20 #include <linux/memblock.h>
22 #include <asm/interrupt.h>
23 #include <asm/machdep.h>
26 #include <asm/asm-prototypes.h>
30 static void machine_check_process_queued_event(struct irq_work *work);
31 static void machine_check_ue_irq_work(struct irq_work *work);
32 static void machine_check_ue_event(struct machine_check_event *evt);
33 static void machine_process_ue_event(struct work_struct *work);
35 static struct irq_work mce_event_process_work = {
36 .func = machine_check_process_queued_event,
39 static struct irq_work mce_ue_event_irq_work = {
40 .func = machine_check_ue_irq_work,
43 DECLARE_WORK(mce_ue_event_work, machine_process_ue_event);
45 static BLOCKING_NOTIFIER_HEAD(mce_notifier_list);
47 int mce_register_notifier(struct notifier_block *nb)
49 return blocking_notifier_chain_register(&mce_notifier_list, nb);
51 EXPORT_SYMBOL_GPL(mce_register_notifier);
53 int mce_unregister_notifier(struct notifier_block *nb)
55 return blocking_notifier_chain_unregister(&mce_notifier_list, nb);
57 EXPORT_SYMBOL_GPL(mce_unregister_notifier);
59 static void mce_set_error_info(struct machine_check_event *mce,
60 struct mce_error_info *mce_err)
62 mce->error_type = mce_err->error_type;
63 switch (mce_err->error_type) {
64 case MCE_ERROR_TYPE_UE:
65 mce->u.ue_error.ue_error_type = mce_err->u.ue_error_type;
67 case MCE_ERROR_TYPE_SLB:
68 mce->u.slb_error.slb_error_type = mce_err->u.slb_error_type;
70 case MCE_ERROR_TYPE_ERAT:
71 mce->u.erat_error.erat_error_type = mce_err->u.erat_error_type;
73 case MCE_ERROR_TYPE_TLB:
74 mce->u.tlb_error.tlb_error_type = mce_err->u.tlb_error_type;
76 case MCE_ERROR_TYPE_USER:
77 mce->u.user_error.user_error_type = mce_err->u.user_error_type;
79 case MCE_ERROR_TYPE_RA:
80 mce->u.ra_error.ra_error_type = mce_err->u.ra_error_type;
82 case MCE_ERROR_TYPE_LINK:
83 mce->u.link_error.link_error_type = mce_err->u.link_error_type;
85 case MCE_ERROR_TYPE_UNKNOWN:
92 * Decode and save high level MCE information into per cpu buffer which
93 * is an array of machine_check_event structure.
95 void save_mce_event(struct pt_regs *regs, long handled,
96 struct mce_error_info *mce_err,
97 uint64_t nip, uint64_t addr, uint64_t phys_addr)
99 int index = local_paca->mce_info->mce_nest_count++;
100 struct machine_check_event *mce;
102 mce = &local_paca->mce_info->mce_event[index];
104 * Return if we don't have enough space to log mce event.
105 * mce_nest_count may go beyond MAX_MC_EVT but that's ok,
106 * the check below will stop buffer overrun.
108 if (index >= MAX_MC_EVT)
111 /* Populate generic machine check info */
112 mce->version = MCE_V1;
114 mce->srr1 = regs->msr;
115 mce->gpr3 = regs->gpr[3];
117 mce->cpu = get_paca()->paca_index;
119 /* Mark it recovered if we have handled it and MSR(RI=1). */
120 if (handled && (regs->msr & MSR_RI))
121 mce->disposition = MCE_DISPOSITION_RECOVERED;
123 mce->disposition = MCE_DISPOSITION_NOT_RECOVERED;
125 mce->initiator = mce_err->initiator;
126 mce->severity = mce_err->severity;
127 mce->sync_error = mce_err->sync_error;
128 mce->error_class = mce_err->error_class;
131 * Populate the mce error_type and type-specific error_type.
133 mce_set_error_info(mce, mce_err);
138 if (mce->error_type == MCE_ERROR_TYPE_TLB) {
139 mce->u.tlb_error.effective_address_provided = true;
140 mce->u.tlb_error.effective_address = addr;
141 } else if (mce->error_type == MCE_ERROR_TYPE_SLB) {
142 mce->u.slb_error.effective_address_provided = true;
143 mce->u.slb_error.effective_address = addr;
144 } else if (mce->error_type == MCE_ERROR_TYPE_ERAT) {
145 mce->u.erat_error.effective_address_provided = true;
146 mce->u.erat_error.effective_address = addr;
147 } else if (mce->error_type == MCE_ERROR_TYPE_USER) {
148 mce->u.user_error.effective_address_provided = true;
149 mce->u.user_error.effective_address = addr;
150 } else if (mce->error_type == MCE_ERROR_TYPE_RA) {
151 mce->u.ra_error.effective_address_provided = true;
152 mce->u.ra_error.effective_address = addr;
153 } else if (mce->error_type == MCE_ERROR_TYPE_LINK) {
154 mce->u.link_error.effective_address_provided = true;
155 mce->u.link_error.effective_address = addr;
156 } else if (mce->error_type == MCE_ERROR_TYPE_UE) {
157 mce->u.ue_error.effective_address_provided = true;
158 mce->u.ue_error.effective_address = addr;
159 if (phys_addr != ULONG_MAX) {
160 mce->u.ue_error.physical_address_provided = true;
161 mce->u.ue_error.physical_address = phys_addr;
162 mce->u.ue_error.ignore_event = mce_err->ignore_event;
163 machine_check_ue_event(mce);
171 * mce Pointer to machine_check_event structure to be filled.
172 * release Flag to indicate whether to free the event slot or not.
173 * 0 <= do not release the mce event. Caller will invoke
174 * release_mce_event() once event has been consumed.
175 * 1 <= release the slot.
180 * get_mce_event() will be called by platform specific machine check
181 * handle routine and in KVM.
182 * When we call get_mce_event(), we are still in interrupt context and
183 * preemption will not be scheduled until ret_from_expect() routine
186 int get_mce_event(struct machine_check_event *mce, bool release)
188 int index = local_paca->mce_info->mce_nest_count - 1;
189 struct machine_check_event *mc_evt;
196 /* Check if we have MCE info to process. */
197 if (index < MAX_MC_EVT) {
198 mc_evt = &local_paca->mce_info->mce_event[index];
199 /* Copy the event structure and release the original */
206 /* Decrement the count to free the slot. */
208 local_paca->mce_info->mce_nest_count--;
213 void release_mce_event(void)
215 get_mce_event(NULL, true);
218 static void machine_check_ue_irq_work(struct irq_work *work)
220 schedule_work(&mce_ue_event_work);
224 * Queue up the MCE event which then can be handled later.
226 static void machine_check_ue_event(struct machine_check_event *evt)
230 index = local_paca->mce_info->mce_ue_count++;
231 /* If queue is full, just return for now. */
232 if (index >= MAX_MC_EVT) {
233 local_paca->mce_info->mce_ue_count--;
236 memcpy(&local_paca->mce_info->mce_ue_event_queue[index],
239 /* Queue work to process this event later. */
240 irq_work_queue(&mce_ue_event_irq_work);
244 * Queue up the MCE event which then can be handled later.
246 void machine_check_queue_event(void)
249 struct machine_check_event evt;
251 if (!get_mce_event(&evt, MCE_EVENT_RELEASE))
254 index = local_paca->mce_info->mce_queue_count++;
255 /* If queue is full, just return for now. */
256 if (index >= MAX_MC_EVT) {
257 local_paca->mce_info->mce_queue_count--;
260 memcpy(&local_paca->mce_info->mce_event_queue[index],
263 /* Queue irq work to process this event later. */
264 irq_work_queue(&mce_event_process_work);
267 void mce_common_process_ue(struct pt_regs *regs,
268 struct mce_error_info *mce_err)
270 const struct exception_table_entry *entry;
272 entry = search_kernel_exception_table(regs->nip);
274 mce_err->ignore_event = true;
275 regs->nip = extable_fixup(entry);
280 * process pending MCE event from the mce event queue. This function will be
281 * called during syscall exit.
283 static void machine_process_ue_event(struct work_struct *work)
286 struct machine_check_event *evt;
288 while (local_paca->mce_info->mce_ue_count > 0) {
289 index = local_paca->mce_info->mce_ue_count - 1;
290 evt = &local_paca->mce_info->mce_ue_event_queue[index];
291 blocking_notifier_call_chain(&mce_notifier_list, 0, evt);
292 #ifdef CONFIG_MEMORY_FAILURE
294 * This should probably queued elsewhere, but
297 * Don't report this machine check because the caller has a
298 * asked us to ignore the event, it has a fixup handler which
299 * will do the appropriate error handling and reporting.
301 if (evt->error_type == MCE_ERROR_TYPE_UE) {
302 if (evt->u.ue_error.ignore_event) {
303 local_paca->mce_info->mce_ue_count--;
307 if (evt->u.ue_error.physical_address_provided) {
310 pfn = evt->u.ue_error.physical_address >>
312 memory_failure(pfn, 0);
314 pr_warn("Failed to identify bad address from "
315 "where the uncorrectable error (UE) "
319 local_paca->mce_info->mce_ue_count--;
323 * process pending MCE event from the mce event queue. This function will be
324 * called during syscall exit.
326 static void machine_check_process_queued_event(struct irq_work *work)
329 struct machine_check_event *evt;
331 add_taint(TAINT_MACHINE_CHECK, LOCKDEP_NOW_UNRELIABLE);
334 * For now just print it to console.
335 * TODO: log this error event to FSP or nvram.
337 while (local_paca->mce_info->mce_queue_count > 0) {
338 index = local_paca->mce_info->mce_queue_count - 1;
339 evt = &local_paca->mce_info->mce_event_queue[index];
341 if (evt->error_type == MCE_ERROR_TYPE_UE &&
342 evt->u.ue_error.ignore_event) {
343 local_paca->mce_info->mce_queue_count--;
346 machine_check_print_event_info(evt, false, false);
347 local_paca->mce_info->mce_queue_count--;
351 void machine_check_print_event_info(struct machine_check_event *evt,
352 bool user_mode, bool in_guest)
354 const char *level, *sevstr, *subtype, *err_type, *initiator;
355 uint64_t ea = 0, pa = 0;
359 static const char *mc_ue_types[] = {
362 "Page table walk ifetch",
364 "Page table walk Load/Store",
366 static const char *mc_slb_types[] = {
371 static const char *mc_erat_types[] = {
376 static const char *mc_tlb_types[] = {
381 static const char *mc_user_types[] = {
386 static const char *mc_ra_types[] = {
388 "Instruction fetch (bad)",
389 "Instruction fetch (foreign)",
390 "Page table walk ifetch (bad)",
391 "Page table walk ifetch (foreign)",
394 "Page table walk Load/Store (bad)",
395 "Page table walk Load/Store (foreign)",
396 "Load/Store (foreign)",
398 static const char *mc_link_types[] = {
400 "Instruction fetch (timeout)",
401 "Page table walk ifetch (timeout)",
404 "Page table walk Load/Store (timeout)",
406 static const char *mc_error_class[] = {
409 "Probable Hardware error (some chance of software cause)",
411 "Probable Software error (some chance of hardware cause)",
414 /* Print things out */
415 if (evt->version != MCE_V1) {
416 pr_err("Machine Check Exception, Unknown event version %d !\n",
420 switch (evt->severity) {
421 case MCE_SEV_NO_ERROR:
425 case MCE_SEV_WARNING:
426 level = KERN_WARNING;
440 switch(evt->initiator) {
441 case MCE_INITIATOR_CPU:
444 case MCE_INITIATOR_PCI:
447 case MCE_INITIATOR_ISA:
450 case MCE_INITIATOR_MEMORY:
451 initiator = "Memory";
453 case MCE_INITIATOR_POWERMGM:
454 initiator = "Power Management";
456 case MCE_INITIATOR_UNKNOWN:
458 initiator = "Unknown";
462 switch (evt->error_type) {
463 case MCE_ERROR_TYPE_UE:
465 subtype = evt->u.ue_error.ue_error_type <
466 ARRAY_SIZE(mc_ue_types) ?
467 mc_ue_types[evt->u.ue_error.ue_error_type]
469 if (evt->u.ue_error.effective_address_provided)
470 ea = evt->u.ue_error.effective_address;
471 if (evt->u.ue_error.physical_address_provided)
472 pa = evt->u.ue_error.physical_address;
474 case MCE_ERROR_TYPE_SLB:
476 subtype = evt->u.slb_error.slb_error_type <
477 ARRAY_SIZE(mc_slb_types) ?
478 mc_slb_types[evt->u.slb_error.slb_error_type]
480 if (evt->u.slb_error.effective_address_provided)
481 ea = evt->u.slb_error.effective_address;
483 case MCE_ERROR_TYPE_ERAT:
485 subtype = evt->u.erat_error.erat_error_type <
486 ARRAY_SIZE(mc_erat_types) ?
487 mc_erat_types[evt->u.erat_error.erat_error_type]
489 if (evt->u.erat_error.effective_address_provided)
490 ea = evt->u.erat_error.effective_address;
492 case MCE_ERROR_TYPE_TLB:
494 subtype = evt->u.tlb_error.tlb_error_type <
495 ARRAY_SIZE(mc_tlb_types) ?
496 mc_tlb_types[evt->u.tlb_error.tlb_error_type]
498 if (evt->u.tlb_error.effective_address_provided)
499 ea = evt->u.tlb_error.effective_address;
501 case MCE_ERROR_TYPE_USER:
503 subtype = evt->u.user_error.user_error_type <
504 ARRAY_SIZE(mc_user_types) ?
505 mc_user_types[evt->u.user_error.user_error_type]
507 if (evt->u.user_error.effective_address_provided)
508 ea = evt->u.user_error.effective_address;
510 case MCE_ERROR_TYPE_RA:
511 err_type = "Real address";
512 subtype = evt->u.ra_error.ra_error_type <
513 ARRAY_SIZE(mc_ra_types) ?
514 mc_ra_types[evt->u.ra_error.ra_error_type]
516 if (evt->u.ra_error.effective_address_provided)
517 ea = evt->u.ra_error.effective_address;
519 case MCE_ERROR_TYPE_LINK:
521 subtype = evt->u.link_error.link_error_type <
522 ARRAY_SIZE(mc_link_types) ?
523 mc_link_types[evt->u.link_error.link_error_type]
525 if (evt->u.link_error.effective_address_provided)
526 ea = evt->u.link_error.effective_address;
528 case MCE_ERROR_TYPE_DCACHE:
529 err_type = "D-Cache";
532 case MCE_ERROR_TYPE_ICACHE:
533 err_type = "I-Cache";
537 case MCE_ERROR_TYPE_UNKNOWN:
538 err_type = "Unknown";
543 dar_str[0] = pa_str[0] = '\0';
544 if (ea && evt->srr0 != ea) {
545 /* Load/Store address */
546 n = sprintf(dar_str, "DAR: %016llx ", ea);
548 sprintf(dar_str + n, "paddr: %016llx ", pa);
550 sprintf(pa_str, " paddr: %016llx", pa);
553 printk("%sMCE: CPU%d: machine check (%s) %s %s %s %s[%s]\n",
554 level, evt->cpu, sevstr, in_guest ? "Guest" : "",
555 err_type, subtype, dar_str,
556 evt->disposition == MCE_DISPOSITION_RECOVERED ?
557 "Recovered" : "Not recovered");
559 if (in_guest || user_mode) {
560 printk("%sMCE: CPU%d: PID: %d Comm: %s %sNIP: [%016llx]%s\n",
561 level, evt->cpu, current->pid, current->comm,
562 in_guest ? "Guest " : "", evt->srr0, pa_str);
564 printk("%sMCE: CPU%d: NIP: [%016llx] %pS%s\n",
565 level, evt->cpu, evt->srr0, (void *)evt->srr0, pa_str);
568 printk("%sMCE: CPU%d: Initiator %s\n", level, evt->cpu, initiator);
570 subtype = evt->error_class < ARRAY_SIZE(mc_error_class) ?
571 mc_error_class[evt->error_class] : "Unknown";
572 printk("%sMCE: CPU%d: %s\n", level, evt->cpu, subtype);
574 #ifdef CONFIG_PPC_BOOK3S_64
575 /* Display faulty slb contents for SLB errors. */
576 if (evt->error_type == MCE_ERROR_TYPE_SLB && !in_guest)
577 slb_dump_contents(local_paca->mce_faulty_slbs);
580 EXPORT_SYMBOL_GPL(machine_check_print_event_info);
583 * This function is called in real mode. Strictly no printk's please.
585 * regs->nip and regs->msr contains srr0 and ssr1.
587 DEFINE_INTERRUPT_HANDLER_NMI(machine_check_early)
590 u8 ftrace_enabled = this_cpu_get_ftrace_enabled();
592 this_cpu_set_ftrace_enabled(0);
593 /* Do not use nmi_enter/exit for pseries hpte guest */
594 if (radix_enabled() || !firmware_has_feature(FW_FEATURE_LPAR))
597 hv_nmi_check_nonrecoverable(regs);
600 * See if platform is capable of handling machine check.
602 if (ppc_md.machine_check_early)
603 handled = ppc_md.machine_check_early(regs);
605 if (radix_enabled() || !firmware_has_feature(FW_FEATURE_LPAR))
608 this_cpu_set_ftrace_enabled(ftrace_enabled);
613 /* Possible meanings for HMER_DEBUG_TRIG bit being set on POWER9 */
616 DTRIG_VECTOR_CI, /* need to emulate vector CI load instr */
617 DTRIG_SUSPEND_ESCAPE, /* need to escape from TM suspend mode */
618 } hmer_debug_trig_function;
620 static int init_debug_trig_function(void)
623 struct device_node *cpun;
624 struct property *prop = NULL;
627 /* First look in the device tree */
629 cpun = of_get_cpu_node(smp_processor_id(), NULL);
631 of_property_for_each_string(cpun, "ibm,hmi-special-triggers",
633 if (strcmp(str, "bit17-vector-ci-load") == 0)
634 hmer_debug_trig_function = DTRIG_VECTOR_CI;
635 else if (strcmp(str, "bit17-tm-suspend-escape") == 0)
636 hmer_debug_trig_function = DTRIG_SUSPEND_ESCAPE;
642 /* If we found the property, don't look at PVR */
646 pvr = mfspr(SPRN_PVR);
647 /* Check for POWER9 Nimbus (scale-out) */
648 if ((PVR_VER(pvr) == PVR_POWER9) && (pvr & 0xe000) == 0) {
649 /* DD2.2 and later */
650 if ((pvr & 0xfff) >= 0x202)
651 hmer_debug_trig_function = DTRIG_SUSPEND_ESCAPE;
652 /* DD2.0 and DD2.1 - used for vector CI load emulation */
653 else if ((pvr & 0xfff) >= 0x200)
654 hmer_debug_trig_function = DTRIG_VECTOR_CI;
658 switch (hmer_debug_trig_function) {
659 case DTRIG_VECTOR_CI:
660 pr_debug("HMI debug trigger used for vector CI load\n");
662 case DTRIG_SUSPEND_ESCAPE:
663 pr_debug("HMI debug trigger used for TM suspend escape\n");
670 __initcall(init_debug_trig_function);
673 * Handle HMIs that occur as a result of a debug trigger.
675 * -1 means this is not a HMI cause that we know about
676 * 0 means no further handling is required
677 * 1 means further handling is required
679 long hmi_handle_debugtrig(struct pt_regs *regs)
681 unsigned long hmer = mfspr(SPRN_HMER);
684 /* HMER_DEBUG_TRIG bit is used for various workarounds on P9 */
685 if (!((hmer & HMER_DEBUG_TRIG)
686 && hmer_debug_trig_function != DTRIG_UNKNOWN))
689 hmer &= ~HMER_DEBUG_TRIG;
690 /* HMER is a write-AND register */
691 mtspr(SPRN_HMER, ~HMER_DEBUG_TRIG);
693 switch (hmer_debug_trig_function) {
694 case DTRIG_VECTOR_CI:
696 * Now to avoid problems with soft-disable we
697 * only do the emulation if we are coming from
700 if (regs && user_mode(regs))
701 ret = local_paca->hmi_p9_special_emu = 1;
710 * See if any other HMI causes remain to be handled
712 if (hmer & mfspr(SPRN_HMEER))
721 DEFINE_INTERRUPT_HANDLER_NMI(hmi_exception_realmode)
725 local_paca->hmi_irqs++;
727 ret = hmi_handle_debugtrig(regs);
731 wait_for_subcore_guest_exit();
733 if (ppc_md.hmi_exception_early)
734 ppc_md.hmi_exception_early(regs);
736 wait_for_tb_resync();
741 void __init mce_init(void)
743 struct mce_info *mce_info;
747 limit = min(ppc64_bolted_size(), ppc64_rma_size);
748 for_each_possible_cpu(i) {
749 mce_info = memblock_alloc_try_nid(sizeof(*mce_info),
750 __alignof__(*mce_info),
752 limit, cpu_to_node(i));
755 paca_ptrs[i]->mce_info = mce_info;
759 panic("Failed to allocate memory for MCE event data\n");