1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Machine check exception handling.
5 * Copyright 2013 IBM Corporation
6 * Author: Mahesh Salgaonkar <mahesh@linux.vnet.ibm.com>
10 #define pr_fmt(fmt) "mce: " fmt
12 #include <linux/hardirq.h>
13 #include <linux/types.h>
14 #include <linux/ptrace.h>
15 #include <linux/percpu.h>
16 #include <linux/export.h>
17 #include <linux/irq_work.h>
18 #include <linux/extable.h>
20 #include <asm/machdep.h>
24 static DEFINE_PER_CPU(int, mce_nest_count);
25 static DEFINE_PER_CPU(struct machine_check_event[MAX_MC_EVT], mce_event);
27 /* Queue for delayed MCE events. */
28 static DEFINE_PER_CPU(int, mce_queue_count);
29 static DEFINE_PER_CPU(struct machine_check_event[MAX_MC_EVT], mce_event_queue);
31 /* Queue for delayed MCE UE events. */
32 static DEFINE_PER_CPU(int, mce_ue_count);
33 static DEFINE_PER_CPU(struct machine_check_event[MAX_MC_EVT],
36 static void machine_check_process_queued_event(struct irq_work *work);
37 static void machine_check_ue_irq_work(struct irq_work *work);
38 static void machine_check_ue_event(struct machine_check_event *evt);
39 static void machine_process_ue_event(struct work_struct *work);
41 static struct irq_work mce_event_process_work = {
42 .func = machine_check_process_queued_event,
45 static struct irq_work mce_ue_event_irq_work = {
46 .func = machine_check_ue_irq_work,
49 DECLARE_WORK(mce_ue_event_work, machine_process_ue_event);
51 static void mce_set_error_info(struct machine_check_event *mce,
52 struct mce_error_info *mce_err)
54 mce->error_type = mce_err->error_type;
55 switch (mce_err->error_type) {
56 case MCE_ERROR_TYPE_UE:
57 mce->u.ue_error.ue_error_type = mce_err->u.ue_error_type;
59 case MCE_ERROR_TYPE_SLB:
60 mce->u.slb_error.slb_error_type = mce_err->u.slb_error_type;
62 case MCE_ERROR_TYPE_ERAT:
63 mce->u.erat_error.erat_error_type = mce_err->u.erat_error_type;
65 case MCE_ERROR_TYPE_TLB:
66 mce->u.tlb_error.tlb_error_type = mce_err->u.tlb_error_type;
68 case MCE_ERROR_TYPE_USER:
69 mce->u.user_error.user_error_type = mce_err->u.user_error_type;
71 case MCE_ERROR_TYPE_RA:
72 mce->u.ra_error.ra_error_type = mce_err->u.ra_error_type;
74 case MCE_ERROR_TYPE_LINK:
75 mce->u.link_error.link_error_type = mce_err->u.link_error_type;
77 case MCE_ERROR_TYPE_UNKNOWN:
84 * Decode and save high level MCE information into per cpu buffer which
85 * is an array of machine_check_event structure.
87 void save_mce_event(struct pt_regs *regs, long handled,
88 struct mce_error_info *mce_err,
89 uint64_t nip, uint64_t addr, uint64_t phys_addr)
91 int index = __this_cpu_inc_return(mce_nest_count) - 1;
92 struct machine_check_event *mce = this_cpu_ptr(&mce_event[index]);
95 * Return if we don't have enough space to log mce event.
96 * mce_nest_count may go beyond MAX_MC_EVT but that's ok,
97 * the check below will stop buffer overrun.
99 if (index >= MAX_MC_EVT)
102 /* Populate generic machine check info */
103 mce->version = MCE_V1;
105 mce->srr1 = regs->msr;
106 mce->gpr3 = regs->gpr[3];
108 mce->cpu = get_paca()->paca_index;
110 /* Mark it recovered if we have handled it and MSR(RI=1). */
111 if (handled && (regs->msr & MSR_RI))
112 mce->disposition = MCE_DISPOSITION_RECOVERED;
114 mce->disposition = MCE_DISPOSITION_NOT_RECOVERED;
116 mce->initiator = mce_err->initiator;
117 mce->severity = mce_err->severity;
118 mce->sync_error = mce_err->sync_error;
119 mce->error_class = mce_err->error_class;
122 * Populate the mce error_type and type-specific error_type.
124 mce_set_error_info(mce, mce_err);
129 if (mce->error_type == MCE_ERROR_TYPE_TLB) {
130 mce->u.tlb_error.effective_address_provided = true;
131 mce->u.tlb_error.effective_address = addr;
132 } else if (mce->error_type == MCE_ERROR_TYPE_SLB) {
133 mce->u.slb_error.effective_address_provided = true;
134 mce->u.slb_error.effective_address = addr;
135 } else if (mce->error_type == MCE_ERROR_TYPE_ERAT) {
136 mce->u.erat_error.effective_address_provided = true;
137 mce->u.erat_error.effective_address = addr;
138 } else if (mce->error_type == MCE_ERROR_TYPE_USER) {
139 mce->u.user_error.effective_address_provided = true;
140 mce->u.user_error.effective_address = addr;
141 } else if (mce->error_type == MCE_ERROR_TYPE_RA) {
142 mce->u.ra_error.effective_address_provided = true;
143 mce->u.ra_error.effective_address = addr;
144 } else if (mce->error_type == MCE_ERROR_TYPE_LINK) {
145 mce->u.link_error.effective_address_provided = true;
146 mce->u.link_error.effective_address = addr;
147 } else if (mce->error_type == MCE_ERROR_TYPE_UE) {
148 mce->u.ue_error.effective_address_provided = true;
149 mce->u.ue_error.effective_address = addr;
150 if (phys_addr != ULONG_MAX) {
151 mce->u.ue_error.physical_address_provided = true;
152 mce->u.ue_error.physical_address = phys_addr;
153 mce->u.ue_error.ignore_event = mce_err->ignore_event;
154 machine_check_ue_event(mce);
162 * mce Pointer to machine_check_event structure to be filled.
163 * release Flag to indicate whether to free the event slot or not.
164 * 0 <= do not release the mce event. Caller will invoke
165 * release_mce_event() once event has been consumed.
166 * 1 <= release the slot.
171 * get_mce_event() will be called by platform specific machine check
172 * handle routine and in KVM.
173 * When we call get_mce_event(), we are still in interrupt context and
174 * preemption will not be scheduled until ret_from_expect() routine
177 int get_mce_event(struct machine_check_event *mce, bool release)
179 int index = __this_cpu_read(mce_nest_count) - 1;
180 struct machine_check_event *mc_evt;
187 /* Check if we have MCE info to process. */
188 if (index < MAX_MC_EVT) {
189 mc_evt = this_cpu_ptr(&mce_event[index]);
190 /* Copy the event structure and release the original */
197 /* Decrement the count to free the slot. */
199 __this_cpu_dec(mce_nest_count);
204 void release_mce_event(void)
206 get_mce_event(NULL, true);
209 static void machine_check_ue_irq_work(struct irq_work *work)
211 schedule_work(&mce_ue_event_work);
215 * Queue up the MCE event which then can be handled later.
217 static void machine_check_ue_event(struct machine_check_event *evt)
221 index = __this_cpu_inc_return(mce_ue_count) - 1;
222 /* If queue is full, just return for now. */
223 if (index >= MAX_MC_EVT) {
224 __this_cpu_dec(mce_ue_count);
227 memcpy(this_cpu_ptr(&mce_ue_event_queue[index]), evt, sizeof(*evt));
229 /* Queue work to process this event later. */
230 irq_work_queue(&mce_ue_event_irq_work);
234 * Queue up the MCE event which then can be handled later.
236 void machine_check_queue_event(void)
239 struct machine_check_event evt;
241 if (!get_mce_event(&evt, MCE_EVENT_RELEASE))
244 index = __this_cpu_inc_return(mce_queue_count) - 1;
245 /* If queue is full, just return for now. */
246 if (index >= MAX_MC_EVT) {
247 __this_cpu_dec(mce_queue_count);
250 memcpy(this_cpu_ptr(&mce_event_queue[index]), &evt, sizeof(evt));
252 /* Queue irq work to process this event later. */
253 irq_work_queue(&mce_event_process_work);
256 void mce_common_process_ue(struct pt_regs *regs,
257 struct mce_error_info *mce_err)
259 const struct exception_table_entry *entry;
261 entry = search_kernel_exception_table(regs->nip);
263 mce_err->ignore_event = true;
264 regs->nip = extable_fixup(entry);
269 * process pending MCE event from the mce event queue. This function will be
270 * called during syscall exit.
272 static void machine_process_ue_event(struct work_struct *work)
275 struct machine_check_event *evt;
277 while (__this_cpu_read(mce_ue_count) > 0) {
278 index = __this_cpu_read(mce_ue_count) - 1;
279 evt = this_cpu_ptr(&mce_ue_event_queue[index]);
280 #ifdef CONFIG_MEMORY_FAILURE
282 * This should probably queued elsewhere, but
285 * Don't report this machine check because the caller has a
286 * asked us to ignore the event, it has a fixup handler which
287 * will do the appropriate error handling and reporting.
289 if (evt->error_type == MCE_ERROR_TYPE_UE) {
290 if (evt->u.ue_error.ignore_event) {
291 __this_cpu_dec(mce_ue_count);
295 if (evt->u.ue_error.physical_address_provided) {
298 pfn = evt->u.ue_error.physical_address >>
300 memory_failure(pfn, 0);
302 pr_warn("Failed to identify bad address from "
303 "where the uncorrectable error (UE) "
307 __this_cpu_dec(mce_ue_count);
311 * process pending MCE event from the mce event queue. This function will be
312 * called during syscall exit.
314 static void machine_check_process_queued_event(struct irq_work *work)
317 struct machine_check_event *evt;
319 add_taint(TAINT_MACHINE_CHECK, LOCKDEP_NOW_UNRELIABLE);
322 * For now just print it to console.
323 * TODO: log this error event to FSP or nvram.
325 while (__this_cpu_read(mce_queue_count) > 0) {
326 index = __this_cpu_read(mce_queue_count) - 1;
327 evt = this_cpu_ptr(&mce_event_queue[index]);
329 if (evt->error_type == MCE_ERROR_TYPE_UE &&
330 evt->u.ue_error.ignore_event) {
331 __this_cpu_dec(mce_queue_count);
334 machine_check_print_event_info(evt, false, false);
335 __this_cpu_dec(mce_queue_count);
339 void machine_check_print_event_info(struct machine_check_event *evt,
340 bool user_mode, bool in_guest)
342 const char *level, *sevstr, *subtype, *err_type, *initiator;
343 uint64_t ea = 0, pa = 0;
347 static const char *mc_ue_types[] = {
350 "Page table walk ifetch",
352 "Page table walk Load/Store",
354 static const char *mc_slb_types[] = {
359 static const char *mc_erat_types[] = {
364 static const char *mc_tlb_types[] = {
369 static const char *mc_user_types[] = {
373 static const char *mc_ra_types[] = {
375 "Instruction fetch (bad)",
376 "Instruction fetch (foreign)",
377 "Page table walk ifetch (bad)",
378 "Page table walk ifetch (foreign)",
381 "Page table walk Load/Store (bad)",
382 "Page table walk Load/Store (foreign)",
383 "Load/Store (foreign)",
385 static const char *mc_link_types[] = {
387 "Instruction fetch (timeout)",
388 "Page table walk ifetch (timeout)",
391 "Page table walk Load/Store (timeout)",
393 static const char *mc_error_class[] = {
396 "Probable Hardware error (some chance of software cause)",
398 "Probable Software error (some chance of hardware cause)",
401 /* Print things out */
402 if (evt->version != MCE_V1) {
403 pr_err("Machine Check Exception, Unknown event version %d !\n",
407 switch (evt->severity) {
408 case MCE_SEV_NO_ERROR:
412 case MCE_SEV_WARNING:
413 level = KERN_WARNING;
427 switch(evt->initiator) {
428 case MCE_INITIATOR_CPU:
431 case MCE_INITIATOR_PCI:
434 case MCE_INITIATOR_ISA:
437 case MCE_INITIATOR_MEMORY:
438 initiator = "Memory";
440 case MCE_INITIATOR_POWERMGM:
441 initiator = "Power Management";
443 case MCE_INITIATOR_UNKNOWN:
445 initiator = "Unknown";
449 switch (evt->error_type) {
450 case MCE_ERROR_TYPE_UE:
452 subtype = evt->u.ue_error.ue_error_type <
453 ARRAY_SIZE(mc_ue_types) ?
454 mc_ue_types[evt->u.ue_error.ue_error_type]
456 if (evt->u.ue_error.effective_address_provided)
457 ea = evt->u.ue_error.effective_address;
458 if (evt->u.ue_error.physical_address_provided)
459 pa = evt->u.ue_error.physical_address;
461 case MCE_ERROR_TYPE_SLB:
463 subtype = evt->u.slb_error.slb_error_type <
464 ARRAY_SIZE(mc_slb_types) ?
465 mc_slb_types[evt->u.slb_error.slb_error_type]
467 if (evt->u.slb_error.effective_address_provided)
468 ea = evt->u.slb_error.effective_address;
470 case MCE_ERROR_TYPE_ERAT:
472 subtype = evt->u.erat_error.erat_error_type <
473 ARRAY_SIZE(mc_erat_types) ?
474 mc_erat_types[evt->u.erat_error.erat_error_type]
476 if (evt->u.erat_error.effective_address_provided)
477 ea = evt->u.erat_error.effective_address;
479 case MCE_ERROR_TYPE_TLB:
481 subtype = evt->u.tlb_error.tlb_error_type <
482 ARRAY_SIZE(mc_tlb_types) ?
483 mc_tlb_types[evt->u.tlb_error.tlb_error_type]
485 if (evt->u.tlb_error.effective_address_provided)
486 ea = evt->u.tlb_error.effective_address;
488 case MCE_ERROR_TYPE_USER:
490 subtype = evt->u.user_error.user_error_type <
491 ARRAY_SIZE(mc_user_types) ?
492 mc_user_types[evt->u.user_error.user_error_type]
494 if (evt->u.user_error.effective_address_provided)
495 ea = evt->u.user_error.effective_address;
497 case MCE_ERROR_TYPE_RA:
498 err_type = "Real address";
499 subtype = evt->u.ra_error.ra_error_type <
500 ARRAY_SIZE(mc_ra_types) ?
501 mc_ra_types[evt->u.ra_error.ra_error_type]
503 if (evt->u.ra_error.effective_address_provided)
504 ea = evt->u.ra_error.effective_address;
506 case MCE_ERROR_TYPE_LINK:
508 subtype = evt->u.link_error.link_error_type <
509 ARRAY_SIZE(mc_link_types) ?
510 mc_link_types[evt->u.link_error.link_error_type]
512 if (evt->u.link_error.effective_address_provided)
513 ea = evt->u.link_error.effective_address;
515 case MCE_ERROR_TYPE_DCACHE:
516 err_type = "D-Cache";
519 case MCE_ERROR_TYPE_ICACHE:
520 err_type = "I-Cache";
524 case MCE_ERROR_TYPE_UNKNOWN:
525 err_type = "Unknown";
530 dar_str[0] = pa_str[0] = '\0';
531 if (ea && evt->srr0 != ea) {
532 /* Load/Store address */
533 n = sprintf(dar_str, "DAR: %016llx ", ea);
535 sprintf(dar_str + n, "paddr: %016llx ", pa);
537 sprintf(pa_str, " paddr: %016llx", pa);
540 printk("%sMCE: CPU%d: machine check (%s) %s %s %s %s[%s]\n",
541 level, evt->cpu, sevstr, in_guest ? "Guest" : "Host",
542 err_type, subtype, dar_str,
543 evt->disposition == MCE_DISPOSITION_RECOVERED ?
544 "Recovered" : "Not recovered");
546 if (in_guest || user_mode) {
547 printk("%sMCE: CPU%d: PID: %d Comm: %s %sNIP: [%016llx]%s\n",
548 level, evt->cpu, current->pid, current->comm,
549 in_guest ? "Guest " : "", evt->srr0, pa_str);
551 printk("%sMCE: CPU%d: NIP: [%016llx] %pS%s\n",
552 level, evt->cpu, evt->srr0, (void *)evt->srr0, pa_str);
555 printk("%sMCE: CPU%d: Initiator %s\n", level, evt->cpu, initiator);
557 subtype = evt->error_class < ARRAY_SIZE(mc_error_class) ?
558 mc_error_class[evt->error_class] : "Unknown";
559 printk("%sMCE: CPU%d: %s\n", level, evt->cpu, subtype);
561 #ifdef CONFIG_PPC_BOOK3S_64
562 /* Display faulty slb contents for SLB errors. */
563 if (evt->error_type == MCE_ERROR_TYPE_SLB)
564 slb_dump_contents(local_paca->mce_faulty_slbs);
567 EXPORT_SYMBOL_GPL(machine_check_print_event_info);
570 * This function is called in real mode. Strictly no printk's please.
572 * regs->nip and regs->msr contains srr0 and ssr1.
574 long machine_check_early(struct pt_regs *regs)
578 hv_nmi_check_nonrecoverable(regs);
581 * See if platform is capable of handling machine check.
583 if (ppc_md.machine_check_early)
584 handled = ppc_md.machine_check_early(regs);
588 /* Possible meanings for HMER_DEBUG_TRIG bit being set on POWER9 */
591 DTRIG_VECTOR_CI, /* need to emulate vector CI load instr */
592 DTRIG_SUSPEND_ESCAPE, /* need to escape from TM suspend mode */
593 } hmer_debug_trig_function;
595 static int init_debug_trig_function(void)
598 struct device_node *cpun;
599 struct property *prop = NULL;
602 /* First look in the device tree */
604 cpun = of_get_cpu_node(smp_processor_id(), NULL);
606 of_property_for_each_string(cpun, "ibm,hmi-special-triggers",
608 if (strcmp(str, "bit17-vector-ci-load") == 0)
609 hmer_debug_trig_function = DTRIG_VECTOR_CI;
610 else if (strcmp(str, "bit17-tm-suspend-escape") == 0)
611 hmer_debug_trig_function = DTRIG_SUSPEND_ESCAPE;
617 /* If we found the property, don't look at PVR */
621 pvr = mfspr(SPRN_PVR);
622 /* Check for POWER9 Nimbus (scale-out) */
623 if ((PVR_VER(pvr) == PVR_POWER9) && (pvr & 0xe000) == 0) {
624 /* DD2.2 and later */
625 if ((pvr & 0xfff) >= 0x202)
626 hmer_debug_trig_function = DTRIG_SUSPEND_ESCAPE;
627 /* DD2.0 and DD2.1 - used for vector CI load emulation */
628 else if ((pvr & 0xfff) >= 0x200)
629 hmer_debug_trig_function = DTRIG_VECTOR_CI;
633 switch (hmer_debug_trig_function) {
634 case DTRIG_VECTOR_CI:
635 pr_debug("HMI debug trigger used for vector CI load\n");
637 case DTRIG_SUSPEND_ESCAPE:
638 pr_debug("HMI debug trigger used for TM suspend escape\n");
645 __initcall(init_debug_trig_function);
648 * Handle HMIs that occur as a result of a debug trigger.
650 * -1 means this is not a HMI cause that we know about
651 * 0 means no further handling is required
652 * 1 means further handling is required
654 long hmi_handle_debugtrig(struct pt_regs *regs)
656 unsigned long hmer = mfspr(SPRN_HMER);
659 /* HMER_DEBUG_TRIG bit is used for various workarounds on P9 */
660 if (!((hmer & HMER_DEBUG_TRIG)
661 && hmer_debug_trig_function != DTRIG_UNKNOWN))
664 hmer &= ~HMER_DEBUG_TRIG;
665 /* HMER is a write-AND register */
666 mtspr(SPRN_HMER, ~HMER_DEBUG_TRIG);
668 switch (hmer_debug_trig_function) {
669 case DTRIG_VECTOR_CI:
671 * Now to avoid problems with soft-disable we
672 * only do the emulation if we are coming from
675 if (regs && user_mode(regs))
676 ret = local_paca->hmi_p9_special_emu = 1;
685 * See if any other HMI causes remain to be handled
687 if (hmer & mfspr(SPRN_HMEER))
696 long hmi_exception_realmode(struct pt_regs *regs)
700 __this_cpu_inc(irq_stat.hmi_exceptions);
702 ret = hmi_handle_debugtrig(regs);
706 wait_for_subcore_guest_exit();
708 if (ppc_md.hmi_exception_early)
709 ppc_md.hmi_exception_early(regs);
711 wait_for_tb_resync();