1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Machine check exception handling.
5 * Copyright 2013 IBM Corporation
6 * Author: Mahesh Salgaonkar <mahesh@linux.vnet.ibm.com>
10 #define pr_fmt(fmt) "mce: " fmt
12 #include <linux/hardirq.h>
13 #include <linux/types.h>
14 #include <linux/ptrace.h>
15 #include <linux/percpu.h>
16 #include <linux/export.h>
17 #include <linux/irq_work.h>
18 #include <linux/extable.h>
19 #include <linux/ftrace.h>
20 #include <linux/memblock.h>
23 #include <asm/interrupt.h>
24 #include <asm/machdep.h>
27 #include <asm/asm-prototypes.h>
31 static void machine_check_process_queued_event(struct irq_work *work);
32 static void machine_check_ue_irq_work(struct irq_work *work);
33 static void machine_check_ue_event(struct machine_check_event *evt);
34 static void machine_process_ue_event(struct work_struct *work);
36 static struct irq_work mce_event_process_work = {
37 .func = machine_check_process_queued_event,
40 static struct irq_work mce_ue_event_irq_work = {
41 .func = machine_check_ue_irq_work,
44 static DECLARE_WORK(mce_ue_event_work, machine_process_ue_event);
46 static BLOCKING_NOTIFIER_HEAD(mce_notifier_list);
48 int mce_register_notifier(struct notifier_block *nb)
50 return blocking_notifier_chain_register(&mce_notifier_list, nb);
52 EXPORT_SYMBOL_GPL(mce_register_notifier);
54 int mce_unregister_notifier(struct notifier_block *nb)
56 return blocking_notifier_chain_unregister(&mce_notifier_list, nb);
58 EXPORT_SYMBOL_GPL(mce_unregister_notifier);
60 static void mce_set_error_info(struct machine_check_event *mce,
61 struct mce_error_info *mce_err)
63 mce->error_type = mce_err->error_type;
64 switch (mce_err->error_type) {
65 case MCE_ERROR_TYPE_UE:
66 mce->u.ue_error.ue_error_type = mce_err->u.ue_error_type;
68 case MCE_ERROR_TYPE_SLB:
69 mce->u.slb_error.slb_error_type = mce_err->u.slb_error_type;
71 case MCE_ERROR_TYPE_ERAT:
72 mce->u.erat_error.erat_error_type = mce_err->u.erat_error_type;
74 case MCE_ERROR_TYPE_TLB:
75 mce->u.tlb_error.tlb_error_type = mce_err->u.tlb_error_type;
77 case MCE_ERROR_TYPE_USER:
78 mce->u.user_error.user_error_type = mce_err->u.user_error_type;
80 case MCE_ERROR_TYPE_RA:
81 mce->u.ra_error.ra_error_type = mce_err->u.ra_error_type;
83 case MCE_ERROR_TYPE_LINK:
84 mce->u.link_error.link_error_type = mce_err->u.link_error_type;
86 case MCE_ERROR_TYPE_UNKNOWN:
93 * Decode and save high level MCE information into per cpu buffer which
94 * is an array of machine_check_event structure.
96 void save_mce_event(struct pt_regs *regs, long handled,
97 struct mce_error_info *mce_err,
98 uint64_t nip, uint64_t addr, uint64_t phys_addr)
100 int index = local_paca->mce_info->mce_nest_count++;
101 struct machine_check_event *mce;
103 mce = &local_paca->mce_info->mce_event[index];
105 * Return if we don't have enough space to log mce event.
106 * mce_nest_count may go beyond MAX_MC_EVT but that's ok,
107 * the check below will stop buffer overrun.
109 if (index >= MAX_MC_EVT)
112 /* Populate generic machine check info */
113 mce->version = MCE_V1;
115 mce->srr1 = regs->msr;
116 mce->gpr3 = regs->gpr[3];
118 mce->cpu = get_paca()->paca_index;
120 /* Mark it recovered if we have handled it and MSR(RI=1). */
121 if (handled && (regs->msr & MSR_RI))
122 mce->disposition = MCE_DISPOSITION_RECOVERED;
124 mce->disposition = MCE_DISPOSITION_NOT_RECOVERED;
126 mce->initiator = mce_err->initiator;
127 mce->severity = mce_err->severity;
128 mce->sync_error = mce_err->sync_error;
129 mce->error_class = mce_err->error_class;
132 * Populate the mce error_type and type-specific error_type.
134 mce_set_error_info(mce, mce_err);
135 if (mce->error_type == MCE_ERROR_TYPE_UE)
136 mce->u.ue_error.ignore_event = mce_err->ignore_event;
141 if (mce->error_type == MCE_ERROR_TYPE_TLB) {
142 mce->u.tlb_error.effective_address_provided = true;
143 mce->u.tlb_error.effective_address = addr;
144 } else if (mce->error_type == MCE_ERROR_TYPE_SLB) {
145 mce->u.slb_error.effective_address_provided = true;
146 mce->u.slb_error.effective_address = addr;
147 } else if (mce->error_type == MCE_ERROR_TYPE_ERAT) {
148 mce->u.erat_error.effective_address_provided = true;
149 mce->u.erat_error.effective_address = addr;
150 } else if (mce->error_type == MCE_ERROR_TYPE_USER) {
151 mce->u.user_error.effective_address_provided = true;
152 mce->u.user_error.effective_address = addr;
153 } else if (mce->error_type == MCE_ERROR_TYPE_RA) {
154 mce->u.ra_error.effective_address_provided = true;
155 mce->u.ra_error.effective_address = addr;
156 } else if (mce->error_type == MCE_ERROR_TYPE_LINK) {
157 mce->u.link_error.effective_address_provided = true;
158 mce->u.link_error.effective_address = addr;
159 } else if (mce->error_type == MCE_ERROR_TYPE_UE) {
160 mce->u.ue_error.effective_address_provided = true;
161 mce->u.ue_error.effective_address = addr;
162 if (phys_addr != ULONG_MAX) {
163 mce->u.ue_error.physical_address_provided = true;
164 mce->u.ue_error.physical_address = phys_addr;
165 machine_check_ue_event(mce);
173 * mce Pointer to machine_check_event structure to be filled.
174 * release Flag to indicate whether to free the event slot or not.
175 * 0 <= do not release the mce event. Caller will invoke
176 * release_mce_event() once event has been consumed.
177 * 1 <= release the slot.
182 * get_mce_event() will be called by platform specific machine check
183 * handle routine and in KVM.
184 * When we call get_mce_event(), we are still in interrupt context and
185 * preemption will not be scheduled until ret_from_expect() routine
188 int get_mce_event(struct machine_check_event *mce, bool release)
190 int index = local_paca->mce_info->mce_nest_count - 1;
191 struct machine_check_event *mc_evt;
198 /* Check if we have MCE info to process. */
199 if (index < MAX_MC_EVT) {
200 mc_evt = &local_paca->mce_info->mce_event[index];
201 /* Copy the event structure and release the original */
208 /* Decrement the count to free the slot. */
210 local_paca->mce_info->mce_nest_count--;
215 void release_mce_event(void)
217 get_mce_event(NULL, true);
220 static void machine_check_ue_irq_work(struct irq_work *work)
222 schedule_work(&mce_ue_event_work);
226 * Queue up the MCE event which then can be handled later.
228 static void machine_check_ue_event(struct machine_check_event *evt)
232 index = local_paca->mce_info->mce_ue_count++;
233 /* If queue is full, just return for now. */
234 if (index >= MAX_MC_EVT) {
235 local_paca->mce_info->mce_ue_count--;
238 memcpy(&local_paca->mce_info->mce_ue_event_queue[index],
241 /* Queue work to process this event later. */
242 irq_work_queue(&mce_ue_event_irq_work);
246 * Queue up the MCE event which then can be handled later.
248 void machine_check_queue_event(void)
251 struct machine_check_event evt;
253 if (!get_mce_event(&evt, MCE_EVENT_RELEASE))
256 index = local_paca->mce_info->mce_queue_count++;
257 /* If queue is full, just return for now. */
258 if (index >= MAX_MC_EVT) {
259 local_paca->mce_info->mce_queue_count--;
262 memcpy(&local_paca->mce_info->mce_event_queue[index],
265 /* Queue irq work to process this event later. */
266 irq_work_queue(&mce_event_process_work);
269 void mce_common_process_ue(struct pt_regs *regs,
270 struct mce_error_info *mce_err)
272 const struct exception_table_entry *entry;
274 entry = search_kernel_exception_table(regs->nip);
276 mce_err->ignore_event = true;
277 regs_set_return_ip(regs, extable_fixup(entry));
282 * process pending MCE event from the mce event queue. This function will be
283 * called during syscall exit.
285 static void machine_process_ue_event(struct work_struct *work)
288 struct machine_check_event *evt;
290 while (local_paca->mce_info->mce_ue_count > 0) {
291 index = local_paca->mce_info->mce_ue_count - 1;
292 evt = &local_paca->mce_info->mce_ue_event_queue[index];
293 blocking_notifier_call_chain(&mce_notifier_list, 0, evt);
294 #ifdef CONFIG_MEMORY_FAILURE
296 * This should probably queued elsewhere, but
299 * Don't report this machine check because the caller has a
300 * asked us to ignore the event, it has a fixup handler which
301 * will do the appropriate error handling and reporting.
303 if (evt->error_type == MCE_ERROR_TYPE_UE) {
304 if (evt->u.ue_error.ignore_event) {
305 local_paca->mce_info->mce_ue_count--;
309 if (evt->u.ue_error.physical_address_provided) {
312 pfn = evt->u.ue_error.physical_address >>
314 memory_failure(pfn, 0);
316 pr_warn("Failed to identify bad address from "
317 "where the uncorrectable error (UE) "
321 local_paca->mce_info->mce_ue_count--;
325 * process pending MCE event from the mce event queue. This function will be
326 * called during syscall exit.
328 static void machine_check_process_queued_event(struct irq_work *work)
331 struct machine_check_event *evt;
333 add_taint(TAINT_MACHINE_CHECK, LOCKDEP_NOW_UNRELIABLE);
336 * For now just print it to console.
337 * TODO: log this error event to FSP or nvram.
339 while (local_paca->mce_info->mce_queue_count > 0) {
340 index = local_paca->mce_info->mce_queue_count - 1;
341 evt = &local_paca->mce_info->mce_event_queue[index];
343 if (evt->error_type == MCE_ERROR_TYPE_UE &&
344 evt->u.ue_error.ignore_event) {
345 local_paca->mce_info->mce_queue_count--;
348 machine_check_print_event_info(evt, false, false);
349 local_paca->mce_info->mce_queue_count--;
353 void machine_check_print_event_info(struct machine_check_event *evt,
354 bool user_mode, bool in_guest)
356 const char *level, *sevstr, *subtype, *err_type, *initiator;
357 uint64_t ea = 0, pa = 0;
361 static const char *mc_ue_types[] = {
364 "Page table walk ifetch",
366 "Page table walk Load/Store",
368 static const char *mc_slb_types[] = {
373 static const char *mc_erat_types[] = {
378 static const char *mc_tlb_types[] = {
383 static const char *mc_user_types[] = {
388 static const char *mc_ra_types[] = {
390 "Instruction fetch (bad)",
391 "Instruction fetch (foreign)",
392 "Page table walk ifetch (bad)",
393 "Page table walk ifetch (foreign)",
396 "Page table walk Load/Store (bad)",
397 "Page table walk Load/Store (foreign)",
398 "Load/Store (foreign)",
400 static const char *mc_link_types[] = {
402 "Instruction fetch (timeout)",
403 "Page table walk ifetch (timeout)",
406 "Page table walk Load/Store (timeout)",
408 static const char *mc_error_class[] = {
411 "Probable Hardware error (some chance of software cause)",
413 "Probable Software error (some chance of hardware cause)",
416 /* Print things out */
417 if (evt->version != MCE_V1) {
418 pr_err("Machine Check Exception, Unknown event version %d !\n",
422 switch (evt->severity) {
423 case MCE_SEV_NO_ERROR:
427 case MCE_SEV_WARNING:
428 level = KERN_WARNING;
442 switch(evt->initiator) {
443 case MCE_INITIATOR_CPU:
446 case MCE_INITIATOR_PCI:
449 case MCE_INITIATOR_ISA:
452 case MCE_INITIATOR_MEMORY:
453 initiator = "Memory";
455 case MCE_INITIATOR_POWERMGM:
456 initiator = "Power Management";
458 case MCE_INITIATOR_UNKNOWN:
460 initiator = "Unknown";
464 switch (evt->error_type) {
465 case MCE_ERROR_TYPE_UE:
467 subtype = evt->u.ue_error.ue_error_type <
468 ARRAY_SIZE(mc_ue_types) ?
469 mc_ue_types[evt->u.ue_error.ue_error_type]
471 if (evt->u.ue_error.effective_address_provided)
472 ea = evt->u.ue_error.effective_address;
473 if (evt->u.ue_error.physical_address_provided)
474 pa = evt->u.ue_error.physical_address;
476 case MCE_ERROR_TYPE_SLB:
478 subtype = evt->u.slb_error.slb_error_type <
479 ARRAY_SIZE(mc_slb_types) ?
480 mc_slb_types[evt->u.slb_error.slb_error_type]
482 if (evt->u.slb_error.effective_address_provided)
483 ea = evt->u.slb_error.effective_address;
485 case MCE_ERROR_TYPE_ERAT:
487 subtype = evt->u.erat_error.erat_error_type <
488 ARRAY_SIZE(mc_erat_types) ?
489 mc_erat_types[evt->u.erat_error.erat_error_type]
491 if (evt->u.erat_error.effective_address_provided)
492 ea = evt->u.erat_error.effective_address;
494 case MCE_ERROR_TYPE_TLB:
496 subtype = evt->u.tlb_error.tlb_error_type <
497 ARRAY_SIZE(mc_tlb_types) ?
498 mc_tlb_types[evt->u.tlb_error.tlb_error_type]
500 if (evt->u.tlb_error.effective_address_provided)
501 ea = evt->u.tlb_error.effective_address;
503 case MCE_ERROR_TYPE_USER:
505 subtype = evt->u.user_error.user_error_type <
506 ARRAY_SIZE(mc_user_types) ?
507 mc_user_types[evt->u.user_error.user_error_type]
509 if (evt->u.user_error.effective_address_provided)
510 ea = evt->u.user_error.effective_address;
512 case MCE_ERROR_TYPE_RA:
513 err_type = "Real address";
514 subtype = evt->u.ra_error.ra_error_type <
515 ARRAY_SIZE(mc_ra_types) ?
516 mc_ra_types[evt->u.ra_error.ra_error_type]
518 if (evt->u.ra_error.effective_address_provided)
519 ea = evt->u.ra_error.effective_address;
521 case MCE_ERROR_TYPE_LINK:
523 subtype = evt->u.link_error.link_error_type <
524 ARRAY_SIZE(mc_link_types) ?
525 mc_link_types[evt->u.link_error.link_error_type]
527 if (evt->u.link_error.effective_address_provided)
528 ea = evt->u.link_error.effective_address;
530 case MCE_ERROR_TYPE_DCACHE:
531 err_type = "D-Cache";
534 case MCE_ERROR_TYPE_ICACHE:
535 err_type = "I-Cache";
539 case MCE_ERROR_TYPE_UNKNOWN:
540 err_type = "Unknown";
545 dar_str[0] = pa_str[0] = '\0';
546 if (ea && evt->srr0 != ea) {
547 /* Load/Store address */
548 n = sprintf(dar_str, "DAR: %016llx ", ea);
550 sprintf(dar_str + n, "paddr: %016llx ", pa);
552 sprintf(pa_str, " paddr: %016llx", pa);
555 printk("%sMCE: CPU%d: machine check (%s) %s %s %s %s[%s]\n",
556 level, evt->cpu, sevstr, in_guest ? "Guest" : "",
557 err_type, subtype, dar_str,
558 evt->disposition == MCE_DISPOSITION_RECOVERED ?
559 "Recovered" : "Not recovered");
561 if (in_guest || user_mode) {
562 printk("%sMCE: CPU%d: PID: %d Comm: %s %sNIP: [%016llx]%s\n",
563 level, evt->cpu, current->pid, current->comm,
564 in_guest ? "Guest " : "", evt->srr0, pa_str);
566 printk("%sMCE: CPU%d: NIP: [%016llx] %pS%s\n",
567 level, evt->cpu, evt->srr0, (void *)evt->srr0, pa_str);
570 printk("%sMCE: CPU%d: Initiator %s\n", level, evt->cpu, initiator);
572 subtype = evt->error_class < ARRAY_SIZE(mc_error_class) ?
573 mc_error_class[evt->error_class] : "Unknown";
574 printk("%sMCE: CPU%d: %s\n", level, evt->cpu, subtype);
576 #ifdef CONFIG_PPC_BOOK3S_64
577 /* Display faulty slb contents for SLB errors. */
578 if (evt->error_type == MCE_ERROR_TYPE_SLB && !in_guest)
579 slb_dump_contents(local_paca->mce_faulty_slbs);
582 EXPORT_SYMBOL_GPL(machine_check_print_event_info);
585 * This function is called in real mode. Strictly no printk's please.
587 * regs->nip and regs->msr contains srr0 and ssr1.
589 DEFINE_INTERRUPT_HANDLER_NMI(machine_check_early)
593 hv_nmi_check_nonrecoverable(regs);
596 * See if platform is capable of handling machine check.
598 if (ppc_md.machine_check_early)
599 handled = ppc_md.machine_check_early(regs);
604 /* Possible meanings for HMER_DEBUG_TRIG bit being set on POWER9 */
607 DTRIG_VECTOR_CI, /* need to emulate vector CI load instr */
608 DTRIG_SUSPEND_ESCAPE, /* need to escape from TM suspend mode */
609 } hmer_debug_trig_function;
611 static int init_debug_trig_function(void)
614 struct device_node *cpun;
615 struct property *prop = NULL;
618 /* First look in the device tree */
620 cpun = of_get_cpu_node(smp_processor_id(), NULL);
622 of_property_for_each_string(cpun, "ibm,hmi-special-triggers",
624 if (strcmp(str, "bit17-vector-ci-load") == 0)
625 hmer_debug_trig_function = DTRIG_VECTOR_CI;
626 else if (strcmp(str, "bit17-tm-suspend-escape") == 0)
627 hmer_debug_trig_function = DTRIG_SUSPEND_ESCAPE;
633 /* If we found the property, don't look at PVR */
637 pvr = mfspr(SPRN_PVR);
638 /* Check for POWER9 Nimbus (scale-out) */
639 if ((PVR_VER(pvr) == PVR_POWER9) && (pvr & 0xe000) == 0) {
640 /* DD2.2 and later */
641 if ((pvr & 0xfff) >= 0x202)
642 hmer_debug_trig_function = DTRIG_SUSPEND_ESCAPE;
643 /* DD2.0 and DD2.1 - used for vector CI load emulation */
644 else if ((pvr & 0xfff) >= 0x200)
645 hmer_debug_trig_function = DTRIG_VECTOR_CI;
649 switch (hmer_debug_trig_function) {
650 case DTRIG_VECTOR_CI:
651 pr_debug("HMI debug trigger used for vector CI load\n");
653 case DTRIG_SUSPEND_ESCAPE:
654 pr_debug("HMI debug trigger used for TM suspend escape\n");
661 __initcall(init_debug_trig_function);
664 * Handle HMIs that occur as a result of a debug trigger.
666 * -1 means this is not a HMI cause that we know about
667 * 0 means no further handling is required
668 * 1 means further handling is required
670 long hmi_handle_debugtrig(struct pt_regs *regs)
672 unsigned long hmer = mfspr(SPRN_HMER);
675 /* HMER_DEBUG_TRIG bit is used for various workarounds on P9 */
676 if (!((hmer & HMER_DEBUG_TRIG)
677 && hmer_debug_trig_function != DTRIG_UNKNOWN))
680 hmer &= ~HMER_DEBUG_TRIG;
681 /* HMER is a write-AND register */
682 mtspr(SPRN_HMER, ~HMER_DEBUG_TRIG);
684 switch (hmer_debug_trig_function) {
685 case DTRIG_VECTOR_CI:
687 * Now to avoid problems with soft-disable we
688 * only do the emulation if we are coming from
691 if (regs && user_mode(regs))
692 ret = local_paca->hmi_p9_special_emu = 1;
701 * See if any other HMI causes remain to be handled
703 if (hmer & mfspr(SPRN_HMEER))
712 DEFINE_INTERRUPT_HANDLER_NMI(hmi_exception_realmode)
716 local_paca->hmi_irqs++;
718 ret = hmi_handle_debugtrig(regs);
722 wait_for_subcore_guest_exit();
724 if (ppc_md.hmi_exception_early)
725 ppc_md.hmi_exception_early(regs);
727 wait_for_tb_resync();
732 void __init mce_init(void)
734 struct mce_info *mce_info;
738 limit = min(ppc64_bolted_size(), ppc64_rma_size);
739 for_each_possible_cpu(i) {
740 mce_info = memblock_alloc_try_nid(sizeof(*mce_info),
741 __alignof__(*mce_info),
743 limit, cpu_to_node(i));
746 paca_ptrs[i]->mce_info = mce_info;
750 panic("Failed to allocate memory for MCE event data\n");