1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Machine check exception handling.
5 * Copyright 2013 IBM Corporation
6 * Author: Mahesh Salgaonkar <mahesh@linux.vnet.ibm.com>
10 #define pr_fmt(fmt) "mce: " fmt
12 #include <linux/hardirq.h>
13 #include <linux/types.h>
14 #include <linux/ptrace.h>
15 #include <linux/percpu.h>
16 #include <linux/export.h>
17 #include <linux/irq_work.h>
18 #include <linux/extable.h>
19 #include <linux/ftrace.h>
20 #include <linux/memblock.h>
23 #include <asm/interrupt.h>
24 #include <asm/machdep.h>
30 static void machine_check_ue_event(struct machine_check_event *evt);
31 static void machine_process_ue_event(struct work_struct *work);
33 static DECLARE_WORK(mce_ue_event_work, machine_process_ue_event);
35 static BLOCKING_NOTIFIER_HEAD(mce_notifier_list);
37 int mce_register_notifier(struct notifier_block *nb)
39 return blocking_notifier_chain_register(&mce_notifier_list, nb);
41 EXPORT_SYMBOL_GPL(mce_register_notifier);
43 int mce_unregister_notifier(struct notifier_block *nb)
45 return blocking_notifier_chain_unregister(&mce_notifier_list, nb);
47 EXPORT_SYMBOL_GPL(mce_unregister_notifier);
49 static void mce_set_error_info(struct machine_check_event *mce,
50 struct mce_error_info *mce_err)
52 mce->error_type = mce_err->error_type;
53 switch (mce_err->error_type) {
54 case MCE_ERROR_TYPE_UE:
55 mce->u.ue_error.ue_error_type = mce_err->u.ue_error_type;
57 case MCE_ERROR_TYPE_SLB:
58 mce->u.slb_error.slb_error_type = mce_err->u.slb_error_type;
60 case MCE_ERROR_TYPE_ERAT:
61 mce->u.erat_error.erat_error_type = mce_err->u.erat_error_type;
63 case MCE_ERROR_TYPE_TLB:
64 mce->u.tlb_error.tlb_error_type = mce_err->u.tlb_error_type;
66 case MCE_ERROR_TYPE_USER:
67 mce->u.user_error.user_error_type = mce_err->u.user_error_type;
69 case MCE_ERROR_TYPE_RA:
70 mce->u.ra_error.ra_error_type = mce_err->u.ra_error_type;
72 case MCE_ERROR_TYPE_LINK:
73 mce->u.link_error.link_error_type = mce_err->u.link_error_type;
75 case MCE_ERROR_TYPE_UNKNOWN:
81 void mce_irq_work_queue(void)
83 /* Raise decrementer interrupt */
84 arch_irq_work_raise();
85 set_mce_pending_irq_work();
89 * Decode and save high level MCE information into per cpu buffer which
90 * is an array of machine_check_event structure.
92 void save_mce_event(struct pt_regs *regs, long handled,
93 struct mce_error_info *mce_err,
94 uint64_t nip, uint64_t addr, uint64_t phys_addr)
96 int index = local_paca->mce_info->mce_nest_count++;
97 struct machine_check_event *mce;
99 mce = &local_paca->mce_info->mce_event[index];
101 * Return if we don't have enough space to log mce event.
102 * mce_nest_count may go beyond MAX_MC_EVT but that's ok,
103 * the check below will stop buffer overrun.
105 if (index >= MAX_MC_EVT)
108 /* Populate generic machine check info */
109 mce->version = MCE_V1;
111 mce->srr1 = regs->msr;
112 mce->gpr3 = regs->gpr[3];
114 mce->cpu = get_paca()->paca_index;
116 /* Mark it recovered if we have handled it and MSR(RI=1). */
117 if (handled && (regs->msr & MSR_RI))
118 mce->disposition = MCE_DISPOSITION_RECOVERED;
120 mce->disposition = MCE_DISPOSITION_NOT_RECOVERED;
122 mce->initiator = mce_err->initiator;
123 mce->severity = mce_err->severity;
124 mce->sync_error = mce_err->sync_error;
125 mce->error_class = mce_err->error_class;
128 * Populate the mce error_type and type-specific error_type.
130 mce_set_error_info(mce, mce_err);
131 if (mce->error_type == MCE_ERROR_TYPE_UE)
132 mce->u.ue_error.ignore_event = mce_err->ignore_event;
137 if (mce->error_type == MCE_ERROR_TYPE_TLB) {
138 mce->u.tlb_error.effective_address_provided = true;
139 mce->u.tlb_error.effective_address = addr;
140 } else if (mce->error_type == MCE_ERROR_TYPE_SLB) {
141 mce->u.slb_error.effective_address_provided = true;
142 mce->u.slb_error.effective_address = addr;
143 } else if (mce->error_type == MCE_ERROR_TYPE_ERAT) {
144 mce->u.erat_error.effective_address_provided = true;
145 mce->u.erat_error.effective_address = addr;
146 } else if (mce->error_type == MCE_ERROR_TYPE_USER) {
147 mce->u.user_error.effective_address_provided = true;
148 mce->u.user_error.effective_address = addr;
149 } else if (mce->error_type == MCE_ERROR_TYPE_RA) {
150 mce->u.ra_error.effective_address_provided = true;
151 mce->u.ra_error.effective_address = addr;
152 } else if (mce->error_type == MCE_ERROR_TYPE_LINK) {
153 mce->u.link_error.effective_address_provided = true;
154 mce->u.link_error.effective_address = addr;
155 } else if (mce->error_type == MCE_ERROR_TYPE_UE) {
156 mce->u.ue_error.effective_address_provided = true;
157 mce->u.ue_error.effective_address = addr;
158 if (phys_addr != ULONG_MAX) {
159 mce->u.ue_error.physical_address_provided = true;
160 mce->u.ue_error.physical_address = phys_addr;
161 machine_check_ue_event(mce);
169 * mce Pointer to machine_check_event structure to be filled.
170 * release Flag to indicate whether to free the event slot or not.
171 * 0 <= do not release the mce event. Caller will invoke
172 * release_mce_event() once event has been consumed.
173 * 1 <= release the slot.
178 * get_mce_event() will be called by platform specific machine check
179 * handle routine and in KVM.
180 * When we call get_mce_event(), we are still in interrupt context and
181 * preemption will not be scheduled until ret_from_expect() routine
184 int get_mce_event(struct machine_check_event *mce, bool release)
186 int index = local_paca->mce_info->mce_nest_count - 1;
187 struct machine_check_event *mc_evt;
194 /* Check if we have MCE info to process. */
195 if (index < MAX_MC_EVT) {
196 mc_evt = &local_paca->mce_info->mce_event[index];
197 /* Copy the event structure and release the original */
204 /* Decrement the count to free the slot. */
206 local_paca->mce_info->mce_nest_count--;
211 void release_mce_event(void)
213 get_mce_event(NULL, true);
216 static void machine_check_ue_work(void)
218 schedule_work(&mce_ue_event_work);
222 * Queue up the MCE event which then can be handled later.
224 static void machine_check_ue_event(struct machine_check_event *evt)
228 index = local_paca->mce_info->mce_ue_count++;
229 /* If queue is full, just return for now. */
230 if (index >= MAX_MC_EVT) {
231 local_paca->mce_info->mce_ue_count--;
234 memcpy(&local_paca->mce_info->mce_ue_event_queue[index],
237 /* Queue work to process this event later. */
238 mce_irq_work_queue();
242 * Queue up the MCE event which then can be handled later.
244 void machine_check_queue_event(void)
247 struct machine_check_event evt;
249 if (!get_mce_event(&evt, MCE_EVENT_RELEASE))
252 index = local_paca->mce_info->mce_queue_count++;
253 /* If queue is full, just return for now. */
254 if (index >= MAX_MC_EVT) {
255 local_paca->mce_info->mce_queue_count--;
258 memcpy(&local_paca->mce_info->mce_event_queue[index],
261 mce_irq_work_queue();
264 void mce_common_process_ue(struct pt_regs *regs,
265 struct mce_error_info *mce_err)
267 const struct exception_table_entry *entry;
269 entry = search_kernel_exception_table(regs->nip);
271 mce_err->ignore_event = true;
272 regs_set_return_ip(regs, extable_fixup(entry));
277 * process pending MCE event from the mce event queue. This function will be
278 * called during syscall exit.
280 static void machine_process_ue_event(struct work_struct *work)
283 struct machine_check_event *evt;
285 while (local_paca->mce_info->mce_ue_count > 0) {
286 index = local_paca->mce_info->mce_ue_count - 1;
287 evt = &local_paca->mce_info->mce_ue_event_queue[index];
288 blocking_notifier_call_chain(&mce_notifier_list, 0, evt);
289 #ifdef CONFIG_MEMORY_FAILURE
291 * This should probably queued elsewhere, but
294 * Don't report this machine check because the caller has a
295 * asked us to ignore the event, it has a fixup handler which
296 * will do the appropriate error handling and reporting.
298 if (evt->error_type == MCE_ERROR_TYPE_UE) {
299 if (evt->u.ue_error.ignore_event) {
300 local_paca->mce_info->mce_ue_count--;
304 if (evt->u.ue_error.physical_address_provided) {
307 pfn = evt->u.ue_error.physical_address >>
309 memory_failure(pfn, 0);
311 pr_warn("Failed to identify bad address from "
312 "where the uncorrectable error (UE) "
316 local_paca->mce_info->mce_ue_count--;
320 * process pending MCE event from the mce event queue. This function will be
321 * called during syscall exit.
323 static void machine_check_process_queued_event(void)
326 struct machine_check_event *evt;
328 add_taint(TAINT_MACHINE_CHECK, LOCKDEP_NOW_UNRELIABLE);
331 * For now just print it to console.
332 * TODO: log this error event to FSP or nvram.
334 while (local_paca->mce_info->mce_queue_count > 0) {
335 index = local_paca->mce_info->mce_queue_count - 1;
336 evt = &local_paca->mce_info->mce_event_queue[index];
338 if (evt->error_type == MCE_ERROR_TYPE_UE &&
339 evt->u.ue_error.ignore_event) {
340 local_paca->mce_info->mce_queue_count--;
343 machine_check_print_event_info(evt, false, false);
344 local_paca->mce_info->mce_queue_count--;
348 void set_mce_pending_irq_work(void)
350 local_paca->mce_pending_irq_work = 1;
353 void clear_mce_pending_irq_work(void)
355 local_paca->mce_pending_irq_work = 0;
358 void mce_run_irq_context_handlers(void)
360 if (unlikely(local_paca->mce_pending_irq_work)) {
361 if (ppc_md.machine_check_log_err)
362 ppc_md.machine_check_log_err();
363 machine_check_process_queued_event();
364 machine_check_ue_work();
365 clear_mce_pending_irq_work();
369 void machine_check_print_event_info(struct machine_check_event *evt,
370 bool user_mode, bool in_guest)
372 const char *level, *sevstr, *subtype, *err_type, *initiator;
373 uint64_t ea = 0, pa = 0;
377 static const char *mc_ue_types[] = {
380 "Page table walk ifetch",
382 "Page table walk Load/Store",
384 static const char *mc_slb_types[] = {
389 static const char *mc_erat_types[] = {
394 static const char *mc_tlb_types[] = {
399 static const char *mc_user_types[] = {
404 static const char *mc_ra_types[] = {
406 "Instruction fetch (bad)",
407 "Instruction fetch (foreign/control memory)",
408 "Page table walk ifetch (bad)",
409 "Page table walk ifetch (foreign/control memory)",
412 "Page table walk Load/Store (bad)",
413 "Page table walk Load/Store (foreign/control memory)",
414 "Load/Store (foreign/control memory)",
416 static const char *mc_link_types[] = {
418 "Instruction fetch (timeout)",
419 "Page table walk ifetch (timeout)",
422 "Page table walk Load/Store (timeout)",
424 static const char *mc_error_class[] = {
427 "Probable Hardware error (some chance of software cause)",
429 "Probable Software error (some chance of hardware cause)",
432 /* Print things out */
433 if (evt->version != MCE_V1) {
434 pr_err("Machine Check Exception, Unknown event version %d !\n",
438 switch (evt->severity) {
439 case MCE_SEV_NO_ERROR:
443 case MCE_SEV_WARNING:
444 level = KERN_WARNING;
458 switch(evt->initiator) {
459 case MCE_INITIATOR_CPU:
462 case MCE_INITIATOR_PCI:
465 case MCE_INITIATOR_ISA:
468 case MCE_INITIATOR_MEMORY:
469 initiator = "Memory";
471 case MCE_INITIATOR_POWERMGM:
472 initiator = "Power Management";
474 case MCE_INITIATOR_UNKNOWN:
476 initiator = "Unknown";
480 switch (evt->error_type) {
481 case MCE_ERROR_TYPE_UE:
483 subtype = evt->u.ue_error.ue_error_type <
484 ARRAY_SIZE(mc_ue_types) ?
485 mc_ue_types[evt->u.ue_error.ue_error_type]
487 if (evt->u.ue_error.effective_address_provided)
488 ea = evt->u.ue_error.effective_address;
489 if (evt->u.ue_error.physical_address_provided)
490 pa = evt->u.ue_error.physical_address;
492 case MCE_ERROR_TYPE_SLB:
494 subtype = evt->u.slb_error.slb_error_type <
495 ARRAY_SIZE(mc_slb_types) ?
496 mc_slb_types[evt->u.slb_error.slb_error_type]
498 if (evt->u.slb_error.effective_address_provided)
499 ea = evt->u.slb_error.effective_address;
501 case MCE_ERROR_TYPE_ERAT:
503 subtype = evt->u.erat_error.erat_error_type <
504 ARRAY_SIZE(mc_erat_types) ?
505 mc_erat_types[evt->u.erat_error.erat_error_type]
507 if (evt->u.erat_error.effective_address_provided)
508 ea = evt->u.erat_error.effective_address;
510 case MCE_ERROR_TYPE_TLB:
512 subtype = evt->u.tlb_error.tlb_error_type <
513 ARRAY_SIZE(mc_tlb_types) ?
514 mc_tlb_types[evt->u.tlb_error.tlb_error_type]
516 if (evt->u.tlb_error.effective_address_provided)
517 ea = evt->u.tlb_error.effective_address;
519 case MCE_ERROR_TYPE_USER:
521 subtype = evt->u.user_error.user_error_type <
522 ARRAY_SIZE(mc_user_types) ?
523 mc_user_types[evt->u.user_error.user_error_type]
525 if (evt->u.user_error.effective_address_provided)
526 ea = evt->u.user_error.effective_address;
528 case MCE_ERROR_TYPE_RA:
529 err_type = "Real address";
530 subtype = evt->u.ra_error.ra_error_type <
531 ARRAY_SIZE(mc_ra_types) ?
532 mc_ra_types[evt->u.ra_error.ra_error_type]
534 if (evt->u.ra_error.effective_address_provided)
535 ea = evt->u.ra_error.effective_address;
537 case MCE_ERROR_TYPE_LINK:
539 subtype = evt->u.link_error.link_error_type <
540 ARRAY_SIZE(mc_link_types) ?
541 mc_link_types[evt->u.link_error.link_error_type]
543 if (evt->u.link_error.effective_address_provided)
544 ea = evt->u.link_error.effective_address;
546 case MCE_ERROR_TYPE_DCACHE:
547 err_type = "D-Cache";
550 case MCE_ERROR_TYPE_ICACHE:
551 err_type = "I-Cache";
555 case MCE_ERROR_TYPE_UNKNOWN:
556 err_type = "Unknown";
561 dar_str[0] = pa_str[0] = '\0';
562 if (ea && evt->srr0 != ea) {
563 /* Load/Store address */
564 n = sprintf(dar_str, "DAR: %016llx ", ea);
566 sprintf(dar_str + n, "paddr: %016llx ", pa);
568 sprintf(pa_str, " paddr: %016llx", pa);
571 printk("%sMCE: CPU%d: machine check (%s) %s %s %s %s[%s]\n",
572 level, evt->cpu, sevstr, in_guest ? "Guest" : "",
573 err_type, subtype, dar_str,
574 evt->disposition == MCE_DISPOSITION_RECOVERED ?
575 "Recovered" : "Not recovered");
577 if (in_guest || user_mode) {
578 printk("%sMCE: CPU%d: PID: %d Comm: %s %sNIP: [%016llx]%s\n",
579 level, evt->cpu, current->pid, current->comm,
580 in_guest ? "Guest " : "", evt->srr0, pa_str);
582 printk("%sMCE: CPU%d: NIP: [%016llx] %pS%s\n",
583 level, evt->cpu, evt->srr0, (void *)evt->srr0, pa_str);
586 printk("%sMCE: CPU%d: Initiator %s\n", level, evt->cpu, initiator);
588 subtype = evt->error_class < ARRAY_SIZE(mc_error_class) ?
589 mc_error_class[evt->error_class] : "Unknown";
590 printk("%sMCE: CPU%d: %s\n", level, evt->cpu, subtype);
592 #ifdef CONFIG_PPC_64S_HASH_MMU
593 /* Display faulty slb contents for SLB errors. */
594 if (evt->error_type == MCE_ERROR_TYPE_SLB && !in_guest)
595 slb_dump_contents(local_paca->mce_faulty_slbs);
598 EXPORT_SYMBOL_GPL(machine_check_print_event_info);
601 * This function is called in real mode. Strictly no printk's please.
603 * regs->nip and regs->msr contains srr0 and ssr1.
605 DEFINE_INTERRUPT_HANDLER_NMI(machine_check_early)
609 hv_nmi_check_nonrecoverable(regs);
612 * See if platform is capable of handling machine check.
614 if (ppc_md.machine_check_early)
615 handled = ppc_md.machine_check_early(regs);
620 /* Possible meanings for HMER_DEBUG_TRIG bit being set on POWER9 */
623 DTRIG_VECTOR_CI, /* need to emulate vector CI load instr */
624 DTRIG_SUSPEND_ESCAPE, /* need to escape from TM suspend mode */
625 } hmer_debug_trig_function;
627 static int init_debug_trig_function(void)
630 struct device_node *cpun;
631 struct property *prop = NULL;
634 /* First look in the device tree */
636 cpun = of_get_cpu_node(smp_processor_id(), NULL);
638 of_property_for_each_string(cpun, "ibm,hmi-special-triggers",
640 if (strcmp(str, "bit17-vector-ci-load") == 0)
641 hmer_debug_trig_function = DTRIG_VECTOR_CI;
642 else if (strcmp(str, "bit17-tm-suspend-escape") == 0)
643 hmer_debug_trig_function = DTRIG_SUSPEND_ESCAPE;
649 /* If we found the property, don't look at PVR */
653 pvr = mfspr(SPRN_PVR);
654 /* Check for POWER9 Nimbus (scale-out) */
655 if ((PVR_VER(pvr) == PVR_POWER9) && (pvr & 0xe000) == 0) {
656 /* DD2.2 and later */
657 if ((pvr & 0xfff) >= 0x202)
658 hmer_debug_trig_function = DTRIG_SUSPEND_ESCAPE;
659 /* DD2.0 and DD2.1 - used for vector CI load emulation */
660 else if ((pvr & 0xfff) >= 0x200)
661 hmer_debug_trig_function = DTRIG_VECTOR_CI;
665 switch (hmer_debug_trig_function) {
666 case DTRIG_VECTOR_CI:
667 pr_debug("HMI debug trigger used for vector CI load\n");
669 case DTRIG_SUSPEND_ESCAPE:
670 pr_debug("HMI debug trigger used for TM suspend escape\n");
677 __initcall(init_debug_trig_function);
680 * Handle HMIs that occur as a result of a debug trigger.
682 * -1 means this is not a HMI cause that we know about
683 * 0 means no further handling is required
684 * 1 means further handling is required
686 long hmi_handle_debugtrig(struct pt_regs *regs)
688 unsigned long hmer = mfspr(SPRN_HMER);
691 /* HMER_DEBUG_TRIG bit is used for various workarounds on P9 */
692 if (!((hmer & HMER_DEBUG_TRIG)
693 && hmer_debug_trig_function != DTRIG_UNKNOWN))
696 hmer &= ~HMER_DEBUG_TRIG;
697 /* HMER is a write-AND register */
698 mtspr(SPRN_HMER, ~HMER_DEBUG_TRIG);
700 switch (hmer_debug_trig_function) {
701 case DTRIG_VECTOR_CI:
703 * Now to avoid problems with soft-disable we
704 * only do the emulation if we are coming from
707 if (regs && user_mode(regs))
708 ret = local_paca->hmi_p9_special_emu = 1;
717 * See if any other HMI causes remain to be handled
719 if (hmer & mfspr(SPRN_HMEER))
728 DEFINE_INTERRUPT_HANDLER_NMI(hmi_exception_realmode)
732 local_paca->hmi_irqs++;
734 ret = hmi_handle_debugtrig(regs);
738 wait_for_subcore_guest_exit();
740 if (ppc_md.hmi_exception_early)
741 ppc_md.hmi_exception_early(regs);
743 wait_for_tb_resync();
748 void __init mce_init(void)
750 struct mce_info *mce_info;
754 limit = min(ppc64_bolted_size(), ppc64_rma_size);
755 for_each_possible_cpu(i) {
756 mce_info = memblock_alloc_try_nid(sizeof(*mce_info),
757 __alignof__(*mce_info),
759 limit, cpu_to_node(i));
762 paca_ptrs[i]->mce_info = mce_info;
766 panic("Failed to allocate memory for MCE event data\n");