1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
4 * Copyright 2007-2010 Freescale Semiconductor, Inc.
6 * Modified by Cort Dougan (cort@cs.nmt.edu)
7 * and Paul Mackerras (paulus@samba.org)
11 * This file handles the architecture-dependent parts of hardware exceptions
14 #include <linux/errno.h>
15 #include <linux/sched.h>
16 #include <linux/sched/debug.h>
17 #include <linux/kernel.h>
19 #include <linux/pkeys.h>
20 #include <linux/stddef.h>
21 #include <linux/unistd.h>
22 #include <linux/ptrace.h>
23 #include <linux/user.h>
24 #include <linux/interrupt.h>
25 #include <linux/init.h>
26 #include <linux/extable.h>
27 #include <linux/module.h> /* print_modules */
28 #include <linux/prctl.h>
29 #include <linux/delay.h>
30 #include <linux/kprobes.h>
31 #include <linux/kexec.h>
32 #include <linux/backlight.h>
33 #include <linux/bug.h>
34 #include <linux/kdebug.h>
35 #include <linux/ratelimit.h>
36 #include <linux/context_tracking.h>
37 #include <linux/smp.h>
38 #include <linux/console.h>
39 #include <linux/kmsg_dump.h>
41 #include <asm/emulated_ops.h>
42 #include <linux/uaccess.h>
43 #include <asm/debugfs.h>
45 #include <asm/machdep.h>
49 #ifdef CONFIG_PMAC_BACKLIGHT
50 #include <asm/backlight.h>
53 #include <asm/firmware.h>
54 #include <asm/processor.h>
57 #include <asm/kexec.h>
58 #include <asm/ppc-opcode.h>
60 #include <asm/fadump.h>
61 #include <asm/switch_to.h>
63 #include <asm/debug.h>
64 #include <asm/asm-prototypes.h>
66 #include <sysdev/fsl_pci.h>
67 #include <asm/kprobes.h>
68 #include <asm/stacktrace.h>
71 #if defined(CONFIG_DEBUGGER) || defined(CONFIG_KEXEC_CORE)
72 int (*__debugger)(struct pt_regs *regs) __read_mostly;
73 int (*__debugger_ipi)(struct pt_regs *regs) __read_mostly;
74 int (*__debugger_bpt)(struct pt_regs *regs) __read_mostly;
75 int (*__debugger_sstep)(struct pt_regs *regs) __read_mostly;
76 int (*__debugger_iabr_match)(struct pt_regs *regs) __read_mostly;
77 int (*__debugger_break_match)(struct pt_regs *regs) __read_mostly;
78 int (*__debugger_fault_handler)(struct pt_regs *regs) __read_mostly;
80 EXPORT_SYMBOL(__debugger);
81 EXPORT_SYMBOL(__debugger_ipi);
82 EXPORT_SYMBOL(__debugger_bpt);
83 EXPORT_SYMBOL(__debugger_sstep);
84 EXPORT_SYMBOL(__debugger_iabr_match);
85 EXPORT_SYMBOL(__debugger_break_match);
86 EXPORT_SYMBOL(__debugger_fault_handler);
89 /* Transactional Memory trap debug */
91 #define TM_DEBUG(x...) printk(KERN_INFO x)
93 #define TM_DEBUG(x...) do { } while(0)
96 static const char *signame(int signr)
99 case SIGBUS: return "bus error";
100 case SIGFPE: return "floating point exception";
101 case SIGILL: return "illegal instruction";
102 case SIGSEGV: return "segfault";
103 case SIGTRAP: return "unhandled trap";
106 return "unknown signal";
110 * Trap & Exception support
113 #ifdef CONFIG_PMAC_BACKLIGHT
114 static void pmac_backlight_unblank(void)
116 mutex_lock(&pmac_backlight_mutex);
117 if (pmac_backlight) {
118 struct backlight_properties *props;
120 props = &pmac_backlight->props;
121 props->brightness = props->max_brightness;
122 props->power = FB_BLANK_UNBLANK;
123 backlight_update_status(pmac_backlight);
125 mutex_unlock(&pmac_backlight_mutex);
128 static inline void pmac_backlight_unblank(void) { }
132 * If oops/die is expected to crash the machine, return true here.
134 * This should not be expected to be 100% accurate, there may be
135 * notifiers registered or other unexpected conditions that may bring
136 * down the kernel. Or if the current process in the kernel is holding
137 * locks or has other critical state, the kernel may become effectively
140 bool die_will_crash(void)
142 if (should_fadump_crash())
144 if (kexec_should_crash(current))
146 if (in_interrupt() || panic_on_oops ||
147 !current->pid || is_global_init(current))
153 static arch_spinlock_t die_lock = __ARCH_SPIN_LOCK_UNLOCKED;
154 static int die_owner = -1;
155 static unsigned int die_nest_count;
156 static int die_counter;
158 extern void panic_flush_kmsg_start(void)
161 * These are mostly taken from kernel/panic.c, but tries to do
162 * relatively minimal work. Don't use delay functions (TB may
163 * be broken), don't crash dump (need to set a firmware log),
164 * don't run notifiers. We do want to get some information to
171 extern void panic_flush_kmsg_end(void)
173 printk_safe_flush_on_panic();
174 kmsg_dump(KMSG_DUMP_PANIC);
177 console_flush_on_panic(CONSOLE_FLUSH_PENDING);
180 static unsigned long oops_begin(struct pt_regs *regs)
187 /* racy, but better than risking deadlock. */
188 raw_local_irq_save(flags);
189 cpu = smp_processor_id();
190 if (!arch_spin_trylock(&die_lock)) {
191 if (cpu == die_owner)
192 /* nested oops. should stop eventually */;
194 arch_spin_lock(&die_lock);
200 if (machine_is(powermac))
201 pmac_backlight_unblank();
204 NOKPROBE_SYMBOL(oops_begin);
206 static void oops_end(unsigned long flags, struct pt_regs *regs,
210 add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE);
214 if (!die_nest_count) {
215 /* Nest count reaches zero, release the lock. */
217 arch_spin_unlock(&die_lock);
219 raw_local_irq_restore(flags);
222 * system_reset_excption handles debugger, crash dump, panic, for 0x100
224 if (TRAP(regs) == 0x100)
227 crash_fadump(regs, "die oops");
229 if (kexec_should_crash(current))
236 * While our oops output is serialised by a spinlock, output
237 * from panic() called below can race and corrupt it. If we
238 * know we are going to panic, delay for 1 second so we have a
239 * chance to get clean backtraces from all CPUs that are oopsing.
241 if (in_interrupt() || panic_on_oops || !current->pid ||
242 is_global_init(current)) {
243 mdelay(MSEC_PER_SEC);
247 panic("Fatal exception");
250 NOKPROBE_SYMBOL(oops_end);
252 static char *get_mmu_str(void)
254 if (early_radix_enabled())
256 if (early_mmu_has_feature(MMU_FTR_HPTE_TABLE))
261 static int __die(const char *str, struct pt_regs *regs, long err)
263 printk("Oops: %s, sig: %ld [#%d]\n", str, err, ++die_counter);
265 printk("%s PAGE_SIZE=%luK%s%s%s%s%s%s %s\n",
266 IS_ENABLED(CONFIG_CPU_LITTLE_ENDIAN) ? "LE" : "BE",
267 PAGE_SIZE / 1024, get_mmu_str(),
268 IS_ENABLED(CONFIG_PREEMPT) ? " PREEMPT" : "",
269 IS_ENABLED(CONFIG_SMP) ? " SMP" : "",
270 IS_ENABLED(CONFIG_SMP) ? (" NR_CPUS=" __stringify(NR_CPUS)) : "",
271 debug_pagealloc_enabled() ? " DEBUG_PAGEALLOC" : "",
272 IS_ENABLED(CONFIG_NUMA) ? " NUMA" : "",
273 ppc_md.name ? ppc_md.name : "");
275 if (notify_die(DIE_OOPS, str, regs, err, 255, SIGSEGV) == NOTIFY_STOP)
283 NOKPROBE_SYMBOL(__die);
285 void die(const char *str, struct pt_regs *regs, long err)
290 * system_reset_excption handles debugger, crash dump, panic, for 0x100
292 if (TRAP(regs) != 0x100) {
297 flags = oops_begin(regs);
298 if (__die(str, regs, err))
300 oops_end(flags, regs, err);
302 NOKPROBE_SYMBOL(die);
304 void user_single_step_report(struct pt_regs *regs)
306 force_sig_fault(SIGTRAP, TRAP_TRACE, (void __user *)regs->nip);
309 static void show_signal_msg(int signr, struct pt_regs *regs, int code,
312 static DEFINE_RATELIMIT_STATE(rs, DEFAULT_RATELIMIT_INTERVAL,
313 DEFAULT_RATELIMIT_BURST);
315 if (!show_unhandled_signals)
318 if (!unhandled_signal(current, signr))
321 if (!__ratelimit(&rs))
324 pr_info("%s[%d]: %s (%d) at %lx nip %lx lr %lx code %x",
325 current->comm, current->pid, signame(signr), signr,
326 addr, regs->nip, regs->link, code);
328 print_vma_addr(KERN_CONT " in ", regs->nip);
332 show_user_instructions(regs);
335 static bool exception_common(int signr, struct pt_regs *regs, int code,
338 if (!user_mode(regs)) {
339 die("Exception in kernel mode", regs, signr);
343 show_signal_msg(signr, regs, code, addr);
345 if (arch_irqs_disabled() && !arch_irq_disabled_regs(regs))
348 current->thread.trap_nr = code;
353 void _exception_pkey(struct pt_regs *regs, unsigned long addr, int key)
355 if (!exception_common(SIGSEGV, regs, SEGV_PKUERR, addr))
358 force_sig_pkuerr((void __user *) addr, key);
361 void _exception(int signr, struct pt_regs *regs, int code, unsigned long addr)
363 if (!exception_common(signr, regs, code, addr))
366 force_sig_fault(signr, code, (void __user *)addr);
370 * The interrupt architecture has a quirk in that the HV interrupts excluding
371 * the NMIs (0x100 and 0x200) do not clear MSR[RI] at entry. The first thing
372 * that an interrupt handler must do is save off a GPR into a scratch register,
373 * and all interrupts on POWERNV (HV=1) use the HSPRG1 register as scratch.
374 * Therefore an NMI can clobber an HV interrupt's live HSPRG1 without noticing
375 * that it is non-reentrant, which leads to random data corruption.
377 * The solution is for NMI interrupts in HV mode to check if they originated
378 * from these critical HV interrupt regions. If so, then mark them not
381 * An alternative would be for HV NMIs to use SPRG for scratch to avoid the
382 * HSPRG1 clobber, however this would cause guest SPRG to be clobbered. Linux
383 * guests should always have MSR[RI]=0 when its scratch SPRG is in use, so
384 * that would work. However any other guest OS that may have the SPRG live
385 * and MSR[RI]=1 could encounter silent corruption.
387 * Builds that do not support KVM could take this second option to increase
388 * the recoverability of NMIs.
390 void hv_nmi_check_nonrecoverable(struct pt_regs *regs)
392 #ifdef CONFIG_PPC_POWERNV
393 unsigned long kbase = (unsigned long)_stext;
394 unsigned long nip = regs->nip;
396 if (!(regs->msr & MSR_RI))
398 if (!(regs->msr & MSR_HV))
400 if (regs->msr & MSR_PR)
404 * Now test if the interrupt has hit a range that may be using
405 * HSPRG1 without having RI=0 (i.e., an HSRR interrupt). The
406 * problem ranges all run un-relocated. Test real and virt modes
407 * at the same time by droping the high bit of the nip (virt mode
408 * entry points still have the +0x4000 offset).
410 nip &= ~0xc000000000000000ULL;
411 if ((nip >= 0x500 && nip < 0x600) || (nip >= 0x4500 && nip < 0x4600))
413 if ((nip >= 0x980 && nip < 0xa00) || (nip >= 0x4980 && nip < 0x4a00))
415 if ((nip >= 0xe00 && nip < 0xec0) || (nip >= 0x4e00 && nip < 0x4ec0))
417 if ((nip >= 0xf80 && nip < 0xfa0) || (nip >= 0x4f80 && nip < 0x4fa0))
420 /* Trampoline code runs un-relocated so subtract kbase. */
421 if (nip >= (unsigned long)(start_real_trampolines - kbase) &&
422 nip < (unsigned long)(end_real_trampolines - kbase))
424 if (nip >= (unsigned long)(start_virt_trampolines - kbase) &&
425 nip < (unsigned long)(end_virt_trampolines - kbase))
430 regs->msr &= ~MSR_RI;
434 void system_reset_exception(struct pt_regs *regs)
436 unsigned long hsrr0, hsrr1;
437 bool saved_hsrrs = false;
438 u8 ftrace_enabled = this_cpu_get_ftrace_enabled();
440 this_cpu_set_ftrace_enabled(0);
445 * System reset can interrupt code where HSRRs are live and MSR[RI]=1.
446 * The system reset interrupt itself may clobber HSRRs (e.g., to call
447 * OPAL), so save them here and restore them before returning.
449 * Machine checks don't need to save HSRRs, as the real mode handler
450 * is careful to avoid them, and the regular handler is not delivered
453 if (cpu_has_feature(CPU_FTR_HVMODE)) {
454 hsrr0 = mfspr(SPRN_HSRR0);
455 hsrr1 = mfspr(SPRN_HSRR1);
459 hv_nmi_check_nonrecoverable(regs);
461 __this_cpu_inc(irq_stat.sreset_irqs);
463 /* See if any machine dependent calls */
464 if (ppc_md.system_reset_exception) {
465 if (ppc_md.system_reset_exception(regs))
472 kmsg_dump(KMSG_DUMP_OOPS);
474 * A system reset is a request to dump, so we always send
475 * it through the crashdump code (if fadump or kdump are
478 crash_fadump(regs, "System Reset");
483 * We aren't the primary crash CPU. We need to send it
484 * to a holding pattern to avoid it ending up in the panic
487 crash_kexec_secondary(regs);
490 * No debugger or crash dump registered, print logs then
493 die("System Reset", regs, SIGABRT);
495 mdelay(2*MSEC_PER_SEC); /* Wait a little while for others to print */
496 add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE);
497 nmi_panic(regs, "System Reset");
500 #ifdef CONFIG_PPC_BOOK3S_64
501 BUG_ON(get_paca()->in_nmi == 0);
502 if (get_paca()->in_nmi > 1)
503 die("Unrecoverable nested System Reset", regs, SIGABRT);
505 /* Must die if the interrupt is not recoverable */
506 if (!(regs->msr & MSR_RI))
507 die("Unrecoverable System Reset", regs, SIGABRT);
510 mtspr(SPRN_HSRR0, hsrr0);
511 mtspr(SPRN_HSRR1, hsrr1);
516 this_cpu_set_ftrace_enabled(ftrace_enabled);
518 /* What should we do here? We could issue a shutdown or hard reset. */
520 NOKPROBE_SYMBOL(system_reset_exception);
523 * I/O accesses can cause machine checks on powermacs.
524 * Check if the NIP corresponds to the address of a sync
525 * instruction for which there is an entry in the exception
529 static inline int check_io_access(struct pt_regs *regs)
532 unsigned long msr = regs->msr;
533 const struct exception_table_entry *entry;
534 unsigned int *nip = (unsigned int *)regs->nip;
536 if (((msr & 0xffff0000) == 0 || (msr & (0x80000 | 0x40000)))
537 && (entry = search_exception_tables(regs->nip)) != NULL) {
539 * Check that it's a sync instruction, or somewhere
540 * in the twi; isync; nop sequence that inb/inw/inl uses.
541 * As the address is in the exception table
542 * we should be able to read the instr there.
543 * For the debug message, we look at the preceding
546 if (*nip == PPC_INST_NOP)
548 else if (*nip == PPC_INST_ISYNC)
550 if (*nip == PPC_INST_SYNC || (*nip >> 26) == OP_TRAP) {
554 rb = (*nip >> 11) & 0x1f;
555 printk(KERN_DEBUG "%s bad port %lx at %p\n",
556 (*nip & 0x100)? "OUT to": "IN from",
557 regs->gpr[rb] - _IO_BASE, nip);
559 regs->nip = extable_fixup(entry);
563 #endif /* CONFIG_PPC32 */
567 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
568 /* On 4xx, the reason for the machine check or program exception
570 #define get_reason(regs) ((regs)->dsisr)
571 #define REASON_FP ESR_FP
572 #define REASON_ILLEGAL (ESR_PIL | ESR_PUO)
573 #define REASON_PRIVILEGED ESR_PPR
574 #define REASON_TRAP ESR_PTR
575 #define REASON_PREFIXED 0
576 #define REASON_BOUNDARY 0
578 /* single-step stuff */
579 #define single_stepping(regs) (current->thread.debug.dbcr0 & DBCR0_IC)
580 #define clear_single_step(regs) (current->thread.debug.dbcr0 &= ~DBCR0_IC)
581 #define clear_br_trace(regs) do {} while(0)
583 /* On non-4xx, the reason for the machine check or program
584 exception is in the MSR. */
585 #define get_reason(regs) ((regs)->msr)
586 #define REASON_TM SRR1_PROGTM
587 #define REASON_FP SRR1_PROGFPE
588 #define REASON_ILLEGAL SRR1_PROGILL
589 #define REASON_PRIVILEGED SRR1_PROGPRIV
590 #define REASON_TRAP SRR1_PROGTRAP
591 #define REASON_PREFIXED SRR1_PREFIXED
592 #define REASON_BOUNDARY SRR1_BOUNDARY
594 #define single_stepping(regs) ((regs)->msr & MSR_SE)
595 #define clear_single_step(regs) ((regs)->msr &= ~MSR_SE)
596 #define clear_br_trace(regs) ((regs)->msr &= ~MSR_BE)
599 #define inst_length(reason) (((reason) & REASON_PREFIXED) ? 8 : 4)
601 #if defined(CONFIG_E500)
602 int machine_check_e500mc(struct pt_regs *regs)
604 unsigned long mcsr = mfspr(SPRN_MCSR);
605 unsigned long pvr = mfspr(SPRN_PVR);
606 unsigned long reason = mcsr;
609 if (reason & MCSR_LD) {
610 recoverable = fsl_rio_mcheck_exception(regs);
611 if (recoverable == 1)
615 printk("Machine check in kernel mode.\n");
616 printk("Caused by (from MCSR=%lx): ", reason);
618 if (reason & MCSR_MCP)
619 pr_cont("Machine Check Signal\n");
621 if (reason & MCSR_ICPERR) {
622 pr_cont("Instruction Cache Parity Error\n");
625 * This is recoverable by invalidating the i-cache.
627 mtspr(SPRN_L1CSR1, mfspr(SPRN_L1CSR1) | L1CSR1_ICFI);
628 while (mfspr(SPRN_L1CSR1) & L1CSR1_ICFI)
632 * This will generally be accompanied by an instruction
633 * fetch error report -- only treat MCSR_IF as fatal
634 * if it wasn't due to an L1 parity error.
639 if (reason & MCSR_DCPERR_MC) {
640 pr_cont("Data Cache Parity Error\n");
643 * In write shadow mode we auto-recover from the error, but it
644 * may still get logged and cause a machine check. We should
645 * only treat the non-write shadow case as non-recoverable.
647 /* On e6500 core, L1 DCWS (Data cache write shadow mode) bit
648 * is not implemented but L1 data cache always runs in write
649 * shadow mode. Hence on data cache parity errors HW will
650 * automatically invalidate the L1 Data Cache.
652 if (PVR_VER(pvr) != PVR_VER_E6500) {
653 if (!(mfspr(SPRN_L1CSR2) & L1CSR2_DCWS))
658 if (reason & MCSR_L2MMU_MHIT) {
659 pr_cont("Hit on multiple TLB entries\n");
663 if (reason & MCSR_NMI)
664 pr_cont("Non-maskable interrupt\n");
666 if (reason & MCSR_IF) {
667 pr_cont("Instruction Fetch Error Report\n");
671 if (reason & MCSR_LD) {
672 pr_cont("Load Error Report\n");
676 if (reason & MCSR_ST) {
677 pr_cont("Store Error Report\n");
681 if (reason & MCSR_LDG) {
682 pr_cont("Guarded Load Error Report\n");
686 if (reason & MCSR_TLBSYNC)
687 pr_cont("Simultaneous tlbsync operations\n");
689 if (reason & MCSR_BSL2_ERR) {
690 pr_cont("Level 2 Cache Error\n");
694 if (reason & MCSR_MAV) {
697 addr = mfspr(SPRN_MCAR);
698 addr |= (u64)mfspr(SPRN_MCARU) << 32;
700 pr_cont("Machine Check %s Address: %#llx\n",
701 reason & MCSR_MEA ? "Effective" : "Physical", addr);
705 mtspr(SPRN_MCSR, mcsr);
706 return mfspr(SPRN_MCSR) == 0 && recoverable;
709 int machine_check_e500(struct pt_regs *regs)
711 unsigned long reason = mfspr(SPRN_MCSR);
713 if (reason & MCSR_BUS_RBERR) {
714 if (fsl_rio_mcheck_exception(regs))
716 if (fsl_pci_mcheck_exception(regs))
720 printk("Machine check in kernel mode.\n");
721 printk("Caused by (from MCSR=%lx): ", reason);
723 if (reason & MCSR_MCP)
724 pr_cont("Machine Check Signal\n");
725 if (reason & MCSR_ICPERR)
726 pr_cont("Instruction Cache Parity Error\n");
727 if (reason & MCSR_DCP_PERR)
728 pr_cont("Data Cache Push Parity Error\n");
729 if (reason & MCSR_DCPERR)
730 pr_cont("Data Cache Parity Error\n");
731 if (reason & MCSR_BUS_IAERR)
732 pr_cont("Bus - Instruction Address Error\n");
733 if (reason & MCSR_BUS_RAERR)
734 pr_cont("Bus - Read Address Error\n");
735 if (reason & MCSR_BUS_WAERR)
736 pr_cont("Bus - Write Address Error\n");
737 if (reason & MCSR_BUS_IBERR)
738 pr_cont("Bus - Instruction Data Error\n");
739 if (reason & MCSR_BUS_RBERR)
740 pr_cont("Bus - Read Data Bus Error\n");
741 if (reason & MCSR_BUS_WBERR)
742 pr_cont("Bus - Write Data Bus Error\n");
743 if (reason & MCSR_BUS_IPERR)
744 pr_cont("Bus - Instruction Parity Error\n");
745 if (reason & MCSR_BUS_RPERR)
746 pr_cont("Bus - Read Parity Error\n");
751 int machine_check_generic(struct pt_regs *regs)
755 #elif defined(CONFIG_PPC32)
756 int machine_check_generic(struct pt_regs *regs)
758 unsigned long reason = regs->msr;
760 printk("Machine check in kernel mode.\n");
761 printk("Caused by (from SRR1=%lx): ", reason);
762 switch (reason & 0x601F0000) {
764 pr_cont("Machine check signal\n");
767 case 0x140000: /* 7450 MSS error and TEA */
768 pr_cont("Transfer error ack signal\n");
771 pr_cont("Data parity error signal\n");
774 pr_cont("Address parity error signal\n");
777 pr_cont("L1 Data Cache error\n");
780 pr_cont("L1 Instruction Cache error\n");
783 pr_cont("L2 data cache parity error\n");
786 pr_cont("Unknown values in msr\n");
790 #endif /* everything else */
792 void die_mce(const char *str, struct pt_regs *regs, long err)
795 * The machine check wants to kill the interrupted context, but
796 * do_exit() checks for in_interrupt() and panics in that case, so
797 * exit the irq/nmi before calling die.
799 if (!IS_ENABLED(CONFIG_PPC_BOOK3S_64))
803 NOKPROBE_SYMBOL(die_mce);
805 void machine_check_exception(struct pt_regs *regs)
810 * BOOK3S_64 does not call this handler as a non-maskable interrupt
811 * (it uses its own early real-mode handler to handle the MCE proper
812 * and then raises irq_work to call this handler when interrupts are
815 * This is silly. The BOOK3S_64 should just call a different function
816 * rather than expecting semantics to magically change. Something
817 * like 'non_nmi_machine_check_exception()', perhaps?
819 const bool nmi = !IS_ENABLED(CONFIG_PPC_BOOK3S_64);
821 if (nmi) nmi_enter();
823 __this_cpu_inc(irq_stat.mce_exceptions);
825 add_taint(TAINT_MACHINE_CHECK, LOCKDEP_NOW_UNRELIABLE);
827 /* See if any machine dependent calls. In theory, we would want
828 * to call the CPU first, and call the ppc_md. one if the CPU
829 * one returns a positive number. However there is existing code
830 * that assumes the board gets a first chance, so let's keep it
831 * that way for now and fix things later. --BenH.
833 if (ppc_md.machine_check_exception)
834 recover = ppc_md.machine_check_exception(regs);
835 else if (cur_cpu_spec->machine_check)
836 recover = cur_cpu_spec->machine_check(regs);
841 if (debugger_fault_handler(regs))
844 if (check_io_access(regs))
847 die_mce("Machine check", regs, SIGBUS);
850 /* Must die if the interrupt is not recoverable */
851 if (!(regs->msr & MSR_RI))
852 die_mce("Unrecoverable Machine check", regs, SIGBUS);
856 NOKPROBE_SYMBOL(machine_check_exception);
858 void SMIException(struct pt_regs *regs)
860 die("System Management Interrupt", regs, SIGABRT);
864 static void p9_hmi_special_emu(struct pt_regs *regs)
866 unsigned int ra, rb, t, i, sel, instr, rc;
867 const void __user *addr;
868 u8 vbuf[16] __aligned(16), *vdst;
869 unsigned long ea, msr, msr_mask;
872 if (__get_user_inatomic(instr, (unsigned int __user *)regs->nip))
876 * lxvb16x opcode: 0x7c0006d8
877 * lxvd2x opcode: 0x7c000698
878 * lxvh8x opcode: 0x7c000658
879 * lxvw4x opcode: 0x7c000618
881 if ((instr & 0xfc00073e) != 0x7c000618) {
882 pr_devel("HMI vec emu: not vector CI %i:%s[%d] nip=%016lx"
884 smp_processor_id(), current->comm, current->pid,
889 /* Grab vector registers into the task struct */
890 msr = regs->msr; /* Grab msr before we flush the bits */
891 flush_vsx_to_thread(current);
892 enable_kernel_altivec();
895 * Is userspace running with a different endian (this is rare but
898 swap = (msr & MSR_LE) != (MSR_KERNEL & MSR_LE);
900 /* Decode the instruction */
901 ra = (instr >> 16) & 0x1f;
902 rb = (instr >> 11) & 0x1f;
903 t = (instr >> 21) & 0x1f;
905 vdst = (u8 *)¤t->thread.vr_state.vr[t];
907 vdst = (u8 *)¤t->thread.fp_state.fpr[t][0];
909 /* Grab the vector address */
910 ea = regs->gpr[rb] + (ra ? regs->gpr[ra] : 0);
913 addr = (__force const void __user *)ea;
916 if (!access_ok(addr, 16)) {
917 pr_devel("HMI vec emu: bad access %i:%s[%d] nip=%016lx"
918 " instr=%08x addr=%016lx\n",
919 smp_processor_id(), current->comm, current->pid,
920 regs->nip, instr, (unsigned long)addr);
924 /* Read the vector */
926 if ((unsigned long)addr & 0xfUL)
928 rc = __copy_from_user_inatomic(vbuf, addr, 16);
930 __get_user_atomic_128_aligned(vbuf, addr, rc);
932 pr_devel("HMI vec emu: page fault %i:%s[%d] nip=%016lx"
933 " instr=%08x addr=%016lx\n",
934 smp_processor_id(), current->comm, current->pid,
935 regs->nip, instr, (unsigned long)addr);
939 pr_devel("HMI vec emu: emulated vector CI %i:%s[%d] nip=%016lx"
940 " instr=%08x addr=%016lx\n",
941 smp_processor_id(), current->comm, current->pid, regs->nip,
942 instr, (unsigned long) addr);
944 /* Grab instruction "selector" */
945 sel = (instr >> 6) & 3;
948 * Check to make sure the facility is actually enabled. This
949 * could happen if we get a false positive hit.
951 * lxvd2x/lxvw4x always check MSR VSX sel = 0,2
952 * lxvh8x/lxvb16x check MSR VSX or VEC depending on VSR used sel = 1,3
955 if ((sel & 1) && (instr & 1)) /* lxvh8x & lxvb16x + VSR >= 32 */
957 if (!(msr & msr_mask)) {
958 pr_devel("HMI vec emu: MSR fac clear %i:%s[%d] nip=%016lx"
959 " instr=%08x msr:%016lx\n",
960 smp_processor_id(), current->comm, current->pid,
961 regs->nip, instr, msr);
965 /* Do logging here before we modify sel based on endian */
968 PPC_WARN_EMULATED(lxvw4x, regs);
971 PPC_WARN_EMULATED(lxvh8x, regs);
974 PPC_WARN_EMULATED(lxvd2x, regs);
976 case 3: /* lxvb16x */
977 PPC_WARN_EMULATED(lxvb16x, regs);
981 #ifdef __LITTLE_ENDIAN__
983 * An LE kernel stores the vector in the task struct as an LE
984 * byte array (effectively swapping both the components and
985 * the content of the components). Those instructions expect
986 * the components to remain in ascending address order, so we
989 * If we are running a BE user space, the expectation is that
990 * of a simple memcpy, so forcing the emulation to look like
991 * a lxvb16x should do the trick.
998 for (i = 0; i < 4; i++)
999 ((u32 *)vdst)[i] = ((u32 *)vbuf)[3-i];
1001 case 1: /* lxvh8x */
1002 for (i = 0; i < 8; i++)
1003 ((u16 *)vdst)[i] = ((u16 *)vbuf)[7-i];
1005 case 2: /* lxvd2x */
1006 for (i = 0; i < 2; i++)
1007 ((u64 *)vdst)[i] = ((u64 *)vbuf)[1-i];
1009 case 3: /* lxvb16x */
1010 for (i = 0; i < 16; i++)
1011 vdst[i] = vbuf[15-i];
1014 #else /* __LITTLE_ENDIAN__ */
1015 /* On a big endian kernel, a BE userspace only needs a memcpy */
1019 /* Otherwise, we need to swap the content of the components */
1021 case 0: /* lxvw4x */
1022 for (i = 0; i < 4; i++)
1023 ((u32 *)vdst)[i] = cpu_to_le32(((u32 *)vbuf)[i]);
1025 case 1: /* lxvh8x */
1026 for (i = 0; i < 8; i++)
1027 ((u16 *)vdst)[i] = cpu_to_le16(((u16 *)vbuf)[i]);
1029 case 2: /* lxvd2x */
1030 for (i = 0; i < 2; i++)
1031 ((u64 *)vdst)[i] = cpu_to_le64(((u64 *)vbuf)[i]);
1033 case 3: /* lxvb16x */
1034 memcpy(vdst, vbuf, 16);
1037 #endif /* !__LITTLE_ENDIAN__ */
1039 /* Go to next instruction */
1042 #endif /* CONFIG_VSX */
1044 void handle_hmi_exception(struct pt_regs *regs)
1046 struct pt_regs *old_regs;
1048 old_regs = set_irq_regs(regs);
1052 /* Real mode flagged P9 special emu is needed */
1053 if (local_paca->hmi_p9_special_emu) {
1054 local_paca->hmi_p9_special_emu = 0;
1057 * We don't want to take page faults while doing the
1058 * emulation, we just replay the instruction if necessary.
1060 pagefault_disable();
1061 p9_hmi_special_emu(regs);
1064 #endif /* CONFIG_VSX */
1066 if (ppc_md.handle_hmi_exception)
1067 ppc_md.handle_hmi_exception(regs);
1070 set_irq_regs(old_regs);
1073 void unknown_exception(struct pt_regs *regs)
1075 enum ctx_state prev_state = exception_enter();
1077 printk("Bad trap at PC: %lx, SR: %lx, vector=%lx\n",
1078 regs->nip, regs->msr, regs->trap);
1080 _exception(SIGTRAP, regs, TRAP_UNK, 0);
1082 exception_exit(prev_state);
1085 void unknown_async_exception(struct pt_regs *regs)
1087 enum ctx_state prev_state = exception_enter();
1089 printk("Bad trap at PC: %lx, SR: %lx, vector=%lx\n",
1090 regs->nip, regs->msr, regs->trap);
1092 _exception(SIGTRAP, regs, TRAP_UNK, 0);
1094 exception_exit(prev_state);
1097 void instruction_breakpoint_exception(struct pt_regs *regs)
1099 enum ctx_state prev_state = exception_enter();
1101 if (notify_die(DIE_IABR_MATCH, "iabr_match", regs, 5,
1102 5, SIGTRAP) == NOTIFY_STOP)
1104 if (debugger_iabr_match(regs))
1106 _exception(SIGTRAP, regs, TRAP_BRKPT, regs->nip);
1109 exception_exit(prev_state);
1112 void RunModeException(struct pt_regs *regs)
1114 _exception(SIGTRAP, regs, TRAP_UNK, 0);
1117 void single_step_exception(struct pt_regs *regs)
1119 enum ctx_state prev_state = exception_enter();
1121 clear_single_step(regs);
1122 clear_br_trace(regs);
1124 if (kprobe_post_handler(regs))
1127 if (notify_die(DIE_SSTEP, "single_step", regs, 5,
1128 5, SIGTRAP) == NOTIFY_STOP)
1130 if (debugger_sstep(regs))
1133 _exception(SIGTRAP, regs, TRAP_TRACE, regs->nip);
1136 exception_exit(prev_state);
1138 NOKPROBE_SYMBOL(single_step_exception);
1141 * After we have successfully emulated an instruction, we have to
1142 * check if the instruction was being single-stepped, and if so,
1143 * pretend we got a single-step exception. This was pointed out
1144 * by Kumar Gala. -- paulus
1146 static void emulate_single_step(struct pt_regs *regs)
1148 if (single_stepping(regs))
1149 single_step_exception(regs);
1152 static inline int __parse_fpscr(unsigned long fpscr)
1154 int ret = FPE_FLTUNK;
1156 /* Invalid operation */
1157 if ((fpscr & FPSCR_VE) && (fpscr & FPSCR_VX))
1161 else if ((fpscr & FPSCR_OE) && (fpscr & FPSCR_OX))
1165 else if ((fpscr & FPSCR_UE) && (fpscr & FPSCR_UX))
1168 /* Divide by zero */
1169 else if ((fpscr & FPSCR_ZE) && (fpscr & FPSCR_ZX))
1172 /* Inexact result */
1173 else if ((fpscr & FPSCR_XE) && (fpscr & FPSCR_XX))
1179 static void parse_fpe(struct pt_regs *regs)
1183 flush_fp_to_thread(current);
1185 #ifdef CONFIG_PPC_FPU_REGS
1186 code = __parse_fpscr(current->thread.fp_state.fpscr);
1189 _exception(SIGFPE, regs, code, regs->nip);
1193 * Illegal instruction emulation support. Originally written to
1194 * provide the PVR to user applications using the mfspr rd, PVR.
1195 * Return non-zero if we can't emulate, or -EFAULT if the associated
1196 * memory access caused an access fault. Return zero on success.
1198 * There are a couple of ways to do this, either "decode" the instruction
1199 * or directly match lots of bits. In this case, matching lots of
1200 * bits is faster and easier.
1203 static int emulate_string_inst(struct pt_regs *regs, u32 instword)
1205 u8 rT = (instword >> 21) & 0x1f;
1206 u8 rA = (instword >> 16) & 0x1f;
1207 u8 NB_RB = (instword >> 11) & 0x1f;
1212 /* Early out if we are an invalid form of lswx */
1213 if ((instword & PPC_INST_STRING_MASK) == PPC_INST_LSWX)
1214 if ((rT == rA) || (rT == NB_RB))
1217 EA = (rA == 0) ? 0 : regs->gpr[rA];
1219 switch (instword & PPC_INST_STRING_MASK) {
1221 case PPC_INST_STSWX:
1223 num_bytes = regs->xer & 0x7f;
1226 case PPC_INST_STSWI:
1227 num_bytes = (NB_RB == 0) ? 32 : NB_RB;
1233 while (num_bytes != 0)
1236 u32 shift = 8 * (3 - (pos & 0x3));
1238 /* if process is 32-bit, clear upper 32 bits of EA */
1239 if ((regs->msr & MSR_64BIT) == 0)
1242 switch ((instword & PPC_INST_STRING_MASK)) {
1245 if (get_user(val, (u8 __user *)EA))
1247 /* first time updating this reg,
1251 regs->gpr[rT] |= val << shift;
1253 case PPC_INST_STSWI:
1254 case PPC_INST_STSWX:
1255 val = regs->gpr[rT] >> shift;
1256 if (put_user(val, (u8 __user *)EA))
1260 /* move EA to next address */
1264 /* manage our position within the register */
1275 static int emulate_popcntb_inst(struct pt_regs *regs, u32 instword)
1280 ra = (instword >> 16) & 0x1f;
1281 rs = (instword >> 21) & 0x1f;
1283 tmp = regs->gpr[rs];
1284 tmp = tmp - ((tmp >> 1) & 0x5555555555555555ULL);
1285 tmp = (tmp & 0x3333333333333333ULL) + ((tmp >> 2) & 0x3333333333333333ULL);
1286 tmp = (tmp + (tmp >> 4)) & 0x0f0f0f0f0f0f0f0fULL;
1287 regs->gpr[ra] = tmp;
1292 static int emulate_isel(struct pt_regs *regs, u32 instword)
1294 u8 rT = (instword >> 21) & 0x1f;
1295 u8 rA = (instword >> 16) & 0x1f;
1296 u8 rB = (instword >> 11) & 0x1f;
1297 u8 BC = (instword >> 6) & 0x1f;
1301 tmp = (rA == 0) ? 0 : regs->gpr[rA];
1302 bit = (regs->ccr >> (31 - BC)) & 0x1;
1304 regs->gpr[rT] = bit ? tmp : regs->gpr[rB];
1309 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1310 static inline bool tm_abort_check(struct pt_regs *regs, int cause)
1312 /* If we're emulating a load/store in an active transaction, we cannot
1313 * emulate it as the kernel operates in transaction suspended context.
1314 * We need to abort the transaction. This creates a persistent TM
1315 * abort so tell the user what caused it with a new code.
1317 if (MSR_TM_TRANSACTIONAL(regs->msr)) {
1325 static inline bool tm_abort_check(struct pt_regs *regs, int reason)
1331 static int emulate_instruction(struct pt_regs *regs)
1336 if (!user_mode(regs))
1338 CHECK_FULL_REGS(regs);
1340 if (get_user(instword, (u32 __user *)(regs->nip)))
1343 /* Emulate the mfspr rD, PVR. */
1344 if ((instword & PPC_INST_MFSPR_PVR_MASK) == PPC_INST_MFSPR_PVR) {
1345 PPC_WARN_EMULATED(mfpvr, regs);
1346 rd = (instword >> 21) & 0x1f;
1347 regs->gpr[rd] = mfspr(SPRN_PVR);
1351 /* Emulating the dcba insn is just a no-op. */
1352 if ((instword & PPC_INST_DCBA_MASK) == PPC_INST_DCBA) {
1353 PPC_WARN_EMULATED(dcba, regs);
1357 /* Emulate the mcrxr insn. */
1358 if ((instword & PPC_INST_MCRXR_MASK) == PPC_INST_MCRXR) {
1359 int shift = (instword >> 21) & 0x1c;
1360 unsigned long msk = 0xf0000000UL >> shift;
1362 PPC_WARN_EMULATED(mcrxr, regs);
1363 regs->ccr = (regs->ccr & ~msk) | ((regs->xer >> shift) & msk);
1364 regs->xer &= ~0xf0000000UL;
1368 /* Emulate load/store string insn. */
1369 if ((instword & PPC_INST_STRING_GEN_MASK) == PPC_INST_STRING) {
1370 if (tm_abort_check(regs,
1371 TM_CAUSE_EMULATE | TM_CAUSE_PERSISTENT))
1373 PPC_WARN_EMULATED(string, regs);
1374 return emulate_string_inst(regs, instword);
1377 /* Emulate the popcntb (Population Count Bytes) instruction. */
1378 if ((instword & PPC_INST_POPCNTB_MASK) == PPC_INST_POPCNTB) {
1379 PPC_WARN_EMULATED(popcntb, regs);
1380 return emulate_popcntb_inst(regs, instword);
1383 /* Emulate isel (Integer Select) instruction */
1384 if ((instword & PPC_INST_ISEL_MASK) == PPC_INST_ISEL) {
1385 PPC_WARN_EMULATED(isel, regs);
1386 return emulate_isel(regs, instword);
1389 /* Emulate sync instruction variants */
1390 if ((instword & PPC_INST_SYNC_MASK) == PPC_INST_SYNC) {
1391 PPC_WARN_EMULATED(sync, regs);
1392 asm volatile("sync");
1397 /* Emulate the mfspr rD, DSCR. */
1398 if ((((instword & PPC_INST_MFSPR_DSCR_USER_MASK) ==
1399 PPC_INST_MFSPR_DSCR_USER) ||
1400 ((instword & PPC_INST_MFSPR_DSCR_MASK) ==
1401 PPC_INST_MFSPR_DSCR)) &&
1402 cpu_has_feature(CPU_FTR_DSCR)) {
1403 PPC_WARN_EMULATED(mfdscr, regs);
1404 rd = (instword >> 21) & 0x1f;
1405 regs->gpr[rd] = mfspr(SPRN_DSCR);
1408 /* Emulate the mtspr DSCR, rD. */
1409 if ((((instword & PPC_INST_MTSPR_DSCR_USER_MASK) ==
1410 PPC_INST_MTSPR_DSCR_USER) ||
1411 ((instword & PPC_INST_MTSPR_DSCR_MASK) ==
1412 PPC_INST_MTSPR_DSCR)) &&
1413 cpu_has_feature(CPU_FTR_DSCR)) {
1414 PPC_WARN_EMULATED(mtdscr, regs);
1415 rd = (instword >> 21) & 0x1f;
1416 current->thread.dscr = regs->gpr[rd];
1417 current->thread.dscr_inherit = 1;
1418 mtspr(SPRN_DSCR, current->thread.dscr);
1426 int is_valid_bugaddr(unsigned long addr)
1428 return is_kernel_addr(addr);
1431 #ifdef CONFIG_MATH_EMULATION
1432 static int emulate_math(struct pt_regs *regs)
1435 extern int do_mathemu(struct pt_regs *regs);
1437 ret = do_mathemu(regs);
1439 PPC_WARN_EMULATED(math, regs);
1443 emulate_single_step(regs);
1447 code = __parse_fpscr(current->thread.fp_state.fpscr);
1448 _exception(SIGFPE, regs, code, regs->nip);
1452 _exception(SIGSEGV, regs, SEGV_MAPERR, regs->nip);
1459 static inline int emulate_math(struct pt_regs *regs) { return -1; }
1462 void program_check_exception(struct pt_regs *regs)
1464 enum ctx_state prev_state = exception_enter();
1465 unsigned int reason = get_reason(regs);
1467 /* We can now get here via a FP Unavailable exception if the core
1468 * has no FPU, in that case the reason flags will be 0 */
1470 if (reason & REASON_FP) {
1471 /* IEEE FP exception */
1475 if (reason & REASON_TRAP) {
1476 unsigned long bugaddr;
1477 /* Debugger is first in line to stop recursive faults in
1478 * rcu_lock, notify_die, or atomic_notifier_call_chain */
1479 if (debugger_bpt(regs))
1482 if (kprobe_handler(regs))
1485 /* trap exception */
1486 if (notify_die(DIE_BPT, "breakpoint", regs, 5, 5, SIGTRAP)
1490 bugaddr = regs->nip;
1492 * Fixup bugaddr for BUG_ON() in real mode
1494 if (!is_kernel_addr(bugaddr) && !(regs->msr & MSR_IR))
1495 bugaddr += PAGE_OFFSET;
1497 if (!(regs->msr & MSR_PR) && /* not user-mode */
1498 report_bug(bugaddr, regs) == BUG_TRAP_TYPE_WARN) {
1502 _exception(SIGTRAP, regs, TRAP_BRKPT, regs->nip);
1505 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1506 if (reason & REASON_TM) {
1507 /* This is a TM "Bad Thing Exception" program check.
1509 * - An rfid/hrfid/mtmsrd attempts to cause an illegal
1510 * transition in TM states.
1511 * - A trechkpt is attempted when transactional.
1512 * - A treclaim is attempted when non transactional.
1513 * - A tend is illegally attempted.
1514 * - writing a TM SPR when transactional.
1516 * If usermode caused this, it's done something illegal and
1517 * gets a SIGILL slap on the wrist. We call it an illegal
1518 * operand to distinguish from the instruction just being bad
1519 * (e.g. executing a 'tend' on a CPU without TM!); it's an
1520 * illegal /placement/ of a valid instruction.
1522 if (user_mode(regs)) {
1523 _exception(SIGILL, regs, ILL_ILLOPN, regs->nip);
1526 printk(KERN_EMERG "Unexpected TM Bad Thing exception "
1527 "at %lx (msr 0x%lx) tm_scratch=%llx\n",
1528 regs->nip, regs->msr, get_paca()->tm_scratch);
1529 die("Unrecoverable exception", regs, SIGABRT);
1535 * If we took the program check in the kernel skip down to sending a
1536 * SIGILL. The subsequent cases all relate to emulating instructions
1537 * which we should only do for userspace. We also do not want to enable
1538 * interrupts for kernel faults because that might lead to further
1539 * faults, and loose the context of the original exception.
1541 if (!user_mode(regs))
1544 /* We restore the interrupt state now */
1545 if (!arch_irq_disabled_regs(regs))
1548 /* (reason & REASON_ILLEGAL) would be the obvious thing here,
1549 * but there seems to be a hardware bug on the 405GP (RevD)
1550 * that means ESR is sometimes set incorrectly - either to
1551 * ESR_DST (!?) or 0. In the process of chasing this with the
1552 * hardware people - not sure if it can happen on any illegal
1553 * instruction or only on FP instructions, whether there is a
1554 * pattern to occurrences etc. -dgibson 31/Mar/2003
1556 if (!emulate_math(regs))
1559 /* Try to emulate it if we should. */
1560 if (reason & (REASON_ILLEGAL | REASON_PRIVILEGED)) {
1561 switch (emulate_instruction(regs)) {
1564 emulate_single_step(regs);
1567 _exception(SIGSEGV, regs, SEGV_MAPERR, regs->nip);
1573 if (reason & REASON_PRIVILEGED)
1574 _exception(SIGILL, regs, ILL_PRVOPC, regs->nip);
1576 _exception(SIGILL, regs, ILL_ILLOPC, regs->nip);
1579 exception_exit(prev_state);
1581 NOKPROBE_SYMBOL(program_check_exception);
1584 * This occurs when running in hypervisor mode on POWER6 or later
1585 * and an illegal instruction is encountered.
1587 void emulation_assist_interrupt(struct pt_regs *regs)
1589 regs->msr |= REASON_ILLEGAL;
1590 program_check_exception(regs);
1592 NOKPROBE_SYMBOL(emulation_assist_interrupt);
1594 void alignment_exception(struct pt_regs *regs)
1596 enum ctx_state prev_state = exception_enter();
1597 int sig, code, fixed = 0;
1598 unsigned long reason;
1600 /* We restore the interrupt state now */
1601 if (!arch_irq_disabled_regs(regs))
1604 reason = get_reason(regs);
1606 if (reason & REASON_BOUNDARY) {
1612 if (tm_abort_check(regs, TM_CAUSE_ALIGNMENT | TM_CAUSE_PERSISTENT))
1615 /* we don't implement logging of alignment exceptions */
1616 if (!(current->thread.align_ctl & PR_UNALIGN_SIGBUS))
1617 fixed = fix_alignment(regs);
1620 /* skip over emulated instruction */
1621 regs->nip += inst_length(reason);
1622 emulate_single_step(regs);
1626 /* Operand address was bad */
1627 if (fixed == -EFAULT) {
1635 if (user_mode(regs))
1636 _exception(sig, regs, code, regs->dar);
1638 bad_page_fault(regs, sig);
1641 exception_exit(prev_state);
1644 void StackOverflow(struct pt_regs *regs)
1646 pr_crit("Kernel stack overflow in process %s[%d], r1=%lx\n",
1647 current->comm, task_pid_nr(current), regs->gpr[1]);
1650 panic("kernel stack overflow");
1653 void stack_overflow_exception(struct pt_regs *regs)
1655 enum ctx_state prev_state = exception_enter();
1657 die("Kernel stack overflow", regs, SIGSEGV);
1659 exception_exit(prev_state);
1662 void kernel_fp_unavailable_exception(struct pt_regs *regs)
1664 enum ctx_state prev_state = exception_enter();
1666 printk(KERN_EMERG "Unrecoverable FP Unavailable Exception "
1667 "%lx at %lx\n", regs->trap, regs->nip);
1668 die("Unrecoverable FP Unavailable Exception", regs, SIGABRT);
1670 exception_exit(prev_state);
1673 void altivec_unavailable_exception(struct pt_regs *regs)
1675 enum ctx_state prev_state = exception_enter();
1677 if (user_mode(regs)) {
1678 /* A user program has executed an altivec instruction,
1679 but this kernel doesn't support altivec. */
1680 _exception(SIGILL, regs, ILL_ILLOPC, regs->nip);
1684 printk(KERN_EMERG "Unrecoverable VMX/Altivec Unavailable Exception "
1685 "%lx at %lx\n", regs->trap, regs->nip);
1686 die("Unrecoverable VMX/Altivec Unavailable Exception", regs, SIGABRT);
1689 exception_exit(prev_state);
1692 void vsx_unavailable_exception(struct pt_regs *regs)
1694 if (user_mode(regs)) {
1695 /* A user program has executed an vsx instruction,
1696 but this kernel doesn't support vsx. */
1697 _exception(SIGILL, regs, ILL_ILLOPC, regs->nip);
1701 printk(KERN_EMERG "Unrecoverable VSX Unavailable Exception "
1702 "%lx at %lx\n", regs->trap, regs->nip);
1703 die("Unrecoverable VSX Unavailable Exception", regs, SIGABRT);
1707 static void tm_unavailable(struct pt_regs *regs)
1709 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1710 if (user_mode(regs)) {
1711 current->thread.load_tm++;
1712 regs->msr |= MSR_TM;
1714 tm_restore_sprs(¤t->thread);
1718 pr_emerg("Unrecoverable TM Unavailable Exception "
1719 "%lx at %lx\n", regs->trap, regs->nip);
1720 die("Unrecoverable TM Unavailable Exception", regs, SIGABRT);
1723 void facility_unavailable_exception(struct pt_regs *regs)
1725 static char *facility_strings[] = {
1726 [FSCR_FP_LG] = "FPU",
1727 [FSCR_VECVSX_LG] = "VMX/VSX",
1728 [FSCR_DSCR_LG] = "DSCR",
1729 [FSCR_PM_LG] = "PMU SPRs",
1730 [FSCR_BHRB_LG] = "BHRB",
1731 [FSCR_TM_LG] = "TM",
1732 [FSCR_EBB_LG] = "EBB",
1733 [FSCR_TAR_LG] = "TAR",
1734 [FSCR_MSGP_LG] = "MSGP",
1735 [FSCR_SCV_LG] = "SCV",
1736 [FSCR_PREFIX_LG] = "PREFIX",
1738 char *facility = "unknown";
1744 hv = (TRAP(regs) == 0xf80);
1746 value = mfspr(SPRN_HFSCR);
1748 value = mfspr(SPRN_FSCR);
1750 status = value >> 56;
1751 if ((hv || status >= 2) &&
1752 (status < ARRAY_SIZE(facility_strings)) &&
1753 facility_strings[status])
1754 facility = facility_strings[status];
1756 /* We should not have taken this interrupt in kernel */
1757 if (!user_mode(regs)) {
1758 pr_emerg("Facility '%s' unavailable (%d) exception in kernel mode at %lx\n",
1759 facility, status, regs->nip);
1760 die("Unexpected facility unavailable exception", regs, SIGABRT);
1763 /* We restore the interrupt state now */
1764 if (!arch_irq_disabled_regs(regs))
1767 if (status == FSCR_DSCR_LG) {
1769 * User is accessing the DSCR register using the problem
1770 * state only SPR number (0x03) either through a mfspr or
1771 * a mtspr instruction. If it is a write attempt through
1772 * a mtspr, then we set the inherit bit. This also allows
1773 * the user to write or read the register directly in the
1774 * future by setting via the FSCR DSCR bit. But in case it
1775 * is a read DSCR attempt through a mfspr instruction, we
1776 * just emulate the instruction instead. This code path will
1777 * always emulate all the mfspr instructions till the user
1778 * has attempted at least one mtspr instruction. This way it
1779 * preserves the same behaviour when the user is accessing
1780 * the DSCR through privilege level only SPR number (0x11)
1781 * which is emulated through illegal instruction exception.
1782 * We always leave HFSCR DSCR set.
1784 if (get_user(instword, (u32 __user *)(regs->nip))) {
1785 pr_err("Failed to fetch the user instruction\n");
1789 /* Write into DSCR (mtspr 0x03, RS) */
1790 if ((instword & PPC_INST_MTSPR_DSCR_USER_MASK)
1791 == PPC_INST_MTSPR_DSCR_USER) {
1792 rd = (instword >> 21) & 0x1f;
1793 current->thread.dscr = regs->gpr[rd];
1794 current->thread.dscr_inherit = 1;
1795 current->thread.fscr |= FSCR_DSCR;
1796 mtspr(SPRN_FSCR, current->thread.fscr);
1799 /* Read from DSCR (mfspr RT, 0x03) */
1800 if ((instword & PPC_INST_MFSPR_DSCR_USER_MASK)
1801 == PPC_INST_MFSPR_DSCR_USER) {
1802 if (emulate_instruction(regs)) {
1803 pr_err("DSCR based mfspr emulation failed\n");
1807 emulate_single_step(regs);
1812 if (status == FSCR_TM_LG) {
1814 * If we're here then the hardware is TM aware because it
1815 * generated an exception with FSRM_TM set.
1817 * If cpu_has_feature(CPU_FTR_TM) is false, then either firmware
1818 * told us not to do TM, or the kernel is not built with TM
1821 * If both of those things are true, then userspace can spam the
1822 * console by triggering the printk() below just by continually
1823 * doing tbegin (or any TM instruction). So in that case just
1824 * send the process a SIGILL immediately.
1826 if (!cpu_has_feature(CPU_FTR_TM))
1829 tm_unavailable(regs);
1833 pr_err_ratelimited("%sFacility '%s' unavailable (%d), exception at 0x%lx, MSR=%lx\n",
1834 hv ? "Hypervisor " : "", facility, status, regs->nip, regs->msr);
1837 _exception(SIGILL, regs, ILL_ILLOPC, regs->nip);
1841 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1843 void fp_unavailable_tm(struct pt_regs *regs)
1845 /* Note: This does not handle any kind of FP laziness. */
1847 TM_DEBUG("FP Unavailable trap whilst transactional at 0x%lx, MSR=%lx\n",
1848 regs->nip, regs->msr);
1850 /* We can only have got here if the task started using FP after
1851 * beginning the transaction. So, the transactional regs are just a
1852 * copy of the checkpointed ones. But, we still need to recheckpoint
1853 * as we're enabling FP for the process; it will return, abort the
1854 * transaction, and probably retry but now with FP enabled. So the
1855 * checkpointed FP registers need to be loaded.
1857 tm_reclaim_current(TM_CAUSE_FAC_UNAV);
1860 * Reclaim initially saved out bogus (lazy) FPRs to ckfp_state, and
1861 * then it was overwrite by the thr->fp_state by tm_reclaim_thread().
1863 * At this point, ck{fp,vr}_state contains the exact values we want to
1867 /* Enable FP for the task: */
1868 current->thread.load_fp = 1;
1871 * Recheckpoint all the checkpointed ckpt, ck{fp, vr}_state registers.
1873 tm_recheckpoint(¤t->thread);
1876 void altivec_unavailable_tm(struct pt_regs *regs)
1878 /* See the comments in fp_unavailable_tm(). This function operates
1882 TM_DEBUG("Vector Unavailable trap whilst transactional at 0x%lx,"
1884 regs->nip, regs->msr);
1885 tm_reclaim_current(TM_CAUSE_FAC_UNAV);
1886 current->thread.load_vec = 1;
1887 tm_recheckpoint(¤t->thread);
1888 current->thread.used_vr = 1;
1891 void vsx_unavailable_tm(struct pt_regs *regs)
1893 /* See the comments in fp_unavailable_tm(). This works similarly,
1894 * though we're loading both FP and VEC registers in here.
1896 * If FP isn't in use, load FP regs. If VEC isn't in use, load VEC
1897 * regs. Either way, set MSR_VSX.
1900 TM_DEBUG("VSX Unavailable trap whilst transactional at 0x%lx,"
1902 regs->nip, regs->msr);
1904 current->thread.used_vsr = 1;
1906 /* This reclaims FP and/or VR regs if they're already enabled */
1907 tm_reclaim_current(TM_CAUSE_FAC_UNAV);
1909 current->thread.load_vec = 1;
1910 current->thread.load_fp = 1;
1912 tm_recheckpoint(¤t->thread);
1914 #endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
1916 static void performance_monitor_exception_nmi(struct pt_regs *regs)
1920 __this_cpu_inc(irq_stat.pmu_irqs);
1927 static void performance_monitor_exception_async(struct pt_regs *regs)
1931 __this_cpu_inc(irq_stat.pmu_irqs);
1938 void performance_monitor_exception(struct pt_regs *regs)
1941 * On 64-bit, if perf interrupts hit in a local_irq_disable
1942 * (soft-masked) region, we consider them as NMIs. This is required to
1943 * prevent hash faults on user addresses when reading callchains (and
1944 * looks better from an irq tracing perspective).
1946 if (IS_ENABLED(CONFIG_PPC64) && unlikely(arch_irq_disabled_regs(regs)))
1947 performance_monitor_exception_nmi(regs);
1949 performance_monitor_exception_async(regs);
1952 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
1953 static void handle_debug(struct pt_regs *regs, unsigned long debug_status)
1957 * Determine the cause of the debug event, clear the
1958 * event flags and send a trap to the handler. Torez
1960 if (debug_status & (DBSR_DAC1R | DBSR_DAC1W)) {
1961 dbcr_dac(current) &= ~(DBCR_DAC1R | DBCR_DAC1W);
1962 #ifdef CONFIG_PPC_ADV_DEBUG_DAC_RANGE
1963 current->thread.debug.dbcr2 &= ~DBCR2_DAC12MODE;
1965 do_send_trap(regs, mfspr(SPRN_DAC1), debug_status,
1968 } else if (debug_status & (DBSR_DAC2R | DBSR_DAC2W)) {
1969 dbcr_dac(current) &= ~(DBCR_DAC2R | DBCR_DAC2W);
1970 do_send_trap(regs, mfspr(SPRN_DAC2), debug_status,
1973 } else if (debug_status & DBSR_IAC1) {
1974 current->thread.debug.dbcr0 &= ~DBCR0_IAC1;
1975 dbcr_iac_range(current) &= ~DBCR_IAC12MODE;
1976 do_send_trap(regs, mfspr(SPRN_IAC1), debug_status,
1979 } else if (debug_status & DBSR_IAC2) {
1980 current->thread.debug.dbcr0 &= ~DBCR0_IAC2;
1981 do_send_trap(regs, mfspr(SPRN_IAC2), debug_status,
1984 } else if (debug_status & DBSR_IAC3) {
1985 current->thread.debug.dbcr0 &= ~DBCR0_IAC3;
1986 dbcr_iac_range(current) &= ~DBCR_IAC34MODE;
1987 do_send_trap(regs, mfspr(SPRN_IAC3), debug_status,
1990 } else if (debug_status & DBSR_IAC4) {
1991 current->thread.debug.dbcr0 &= ~DBCR0_IAC4;
1992 do_send_trap(regs, mfspr(SPRN_IAC4), debug_status,
1997 * At the point this routine was called, the MSR(DE) was turned off.
1998 * Check all other debug flags and see if that bit needs to be turned
2001 if (DBCR_ACTIVE_EVENTS(current->thread.debug.dbcr0,
2002 current->thread.debug.dbcr1))
2003 regs->msr |= MSR_DE;
2005 /* Make sure the IDM flag is off */
2006 current->thread.debug.dbcr0 &= ~DBCR0_IDM;
2009 mtspr(SPRN_DBCR0, current->thread.debug.dbcr0);
2012 void DebugException(struct pt_regs *regs)
2014 unsigned long debug_status = regs->dsisr;
2016 current->thread.debug.dbsr = debug_status;
2018 /* Hack alert: On BookE, Branch Taken stops on the branch itself, while
2019 * on server, it stops on the target of the branch. In order to simulate
2020 * the server behaviour, we thus restart right away with a single step
2021 * instead of stopping here when hitting a BT
2023 if (debug_status & DBSR_BT) {
2024 regs->msr &= ~MSR_DE;
2027 mtspr(SPRN_DBCR0, mfspr(SPRN_DBCR0) & ~DBCR0_BT);
2028 /* Clear the BT event */
2029 mtspr(SPRN_DBSR, DBSR_BT);
2031 /* Do the single step trick only when coming from userspace */
2032 if (user_mode(regs)) {
2033 current->thread.debug.dbcr0 &= ~DBCR0_BT;
2034 current->thread.debug.dbcr0 |= DBCR0_IDM | DBCR0_IC;
2035 regs->msr |= MSR_DE;
2039 if (kprobe_post_handler(regs))
2042 if (notify_die(DIE_SSTEP, "block_step", regs, 5,
2043 5, SIGTRAP) == NOTIFY_STOP) {
2046 if (debugger_sstep(regs))
2048 } else if (debug_status & DBSR_IC) { /* Instruction complete */
2049 regs->msr &= ~MSR_DE;
2051 /* Disable instruction completion */
2052 mtspr(SPRN_DBCR0, mfspr(SPRN_DBCR0) & ~DBCR0_IC);
2053 /* Clear the instruction completion event */
2054 mtspr(SPRN_DBSR, DBSR_IC);
2056 if (kprobe_post_handler(regs))
2059 if (notify_die(DIE_SSTEP, "single_step", regs, 5,
2060 5, SIGTRAP) == NOTIFY_STOP) {
2064 if (debugger_sstep(regs))
2067 if (user_mode(regs)) {
2068 current->thread.debug.dbcr0 &= ~DBCR0_IC;
2069 if (DBCR_ACTIVE_EVENTS(current->thread.debug.dbcr0,
2070 current->thread.debug.dbcr1))
2071 regs->msr |= MSR_DE;
2073 /* Make sure the IDM bit is off */
2074 current->thread.debug.dbcr0 &= ~DBCR0_IDM;
2077 _exception(SIGTRAP, regs, TRAP_TRACE, regs->nip);
2079 handle_debug(regs, debug_status);
2081 NOKPROBE_SYMBOL(DebugException);
2082 #endif /* CONFIG_PPC_ADV_DEBUG_REGS */
2084 #ifdef CONFIG_ALTIVEC
2085 void altivec_assist_exception(struct pt_regs *regs)
2089 if (!user_mode(regs)) {
2090 printk(KERN_EMERG "VMX/Altivec assist exception in kernel mode"
2091 " at %lx\n", regs->nip);
2092 die("Kernel VMX/Altivec assist exception", regs, SIGILL);
2095 flush_altivec_to_thread(current);
2097 PPC_WARN_EMULATED(altivec, regs);
2098 err = emulate_altivec(regs);
2100 regs->nip += 4; /* skip emulated instruction */
2101 emulate_single_step(regs);
2105 if (err == -EFAULT) {
2106 /* got an error reading the instruction */
2107 _exception(SIGSEGV, regs, SEGV_ACCERR, regs->nip);
2109 /* didn't recognize the instruction */
2110 /* XXX quick hack for now: set the non-Java bit in the VSCR */
2111 printk_ratelimited(KERN_ERR "Unrecognized altivec instruction "
2112 "in %s at %lx\n", current->comm, regs->nip);
2113 current->thread.vr_state.vscr.u[3] |= 0x10000;
2116 #endif /* CONFIG_ALTIVEC */
2118 #ifdef CONFIG_FSL_BOOKE
2119 void CacheLockingException(struct pt_regs *regs)
2121 unsigned long error_code = regs->dsisr;
2123 /* We treat cache locking instructions from the user
2124 * as priv ops, in the future we could try to do
2127 if (error_code & (ESR_DLK|ESR_ILK))
2128 _exception(SIGILL, regs, ILL_PRVOPC, regs->nip);
2131 #endif /* CONFIG_FSL_BOOKE */
2134 void SPEFloatingPointException(struct pt_regs *regs)
2136 extern int do_spe_mathemu(struct pt_regs *regs);
2137 unsigned long spefscr;
2139 int code = FPE_FLTUNK;
2142 /* We restore the interrupt state now */
2143 if (!arch_irq_disabled_regs(regs))
2146 flush_spe_to_thread(current);
2148 spefscr = current->thread.spefscr;
2149 fpexc_mode = current->thread.fpexc_mode;
2151 if ((spefscr & SPEFSCR_FOVF) && (fpexc_mode & PR_FP_EXC_OVF)) {
2154 else if ((spefscr & SPEFSCR_FUNF) && (fpexc_mode & PR_FP_EXC_UND)) {
2157 else if ((spefscr & SPEFSCR_FDBZ) && (fpexc_mode & PR_FP_EXC_DIV))
2159 else if ((spefscr & SPEFSCR_FINV) && (fpexc_mode & PR_FP_EXC_INV)) {
2162 else if ((spefscr & (SPEFSCR_FG | SPEFSCR_FX)) && (fpexc_mode & PR_FP_EXC_RES))
2165 err = do_spe_mathemu(regs);
2167 regs->nip += 4; /* skip emulated instruction */
2168 emulate_single_step(regs);
2172 if (err == -EFAULT) {
2173 /* got an error reading the instruction */
2174 _exception(SIGSEGV, regs, SEGV_ACCERR, regs->nip);
2175 } else if (err == -EINVAL) {
2176 /* didn't recognize the instruction */
2177 printk(KERN_ERR "unrecognized spe instruction "
2178 "in %s at %lx\n", current->comm, regs->nip);
2180 _exception(SIGFPE, regs, code, regs->nip);
2186 void SPEFloatingPointRoundException(struct pt_regs *regs)
2188 extern int speround_handler(struct pt_regs *regs);
2191 /* We restore the interrupt state now */
2192 if (!arch_irq_disabled_regs(regs))
2196 if (regs->msr & MSR_SPE)
2197 giveup_spe(current);
2201 err = speround_handler(regs);
2203 regs->nip += 4; /* skip emulated instruction */
2204 emulate_single_step(regs);
2208 if (err == -EFAULT) {
2209 /* got an error reading the instruction */
2210 _exception(SIGSEGV, regs, SEGV_ACCERR, regs->nip);
2211 } else if (err == -EINVAL) {
2212 /* didn't recognize the instruction */
2213 printk(KERN_ERR "unrecognized spe instruction "
2214 "in %s at %lx\n", current->comm, regs->nip);
2216 _exception(SIGFPE, regs, FPE_FLTUNK, regs->nip);
2223 * We enter here if we get an unrecoverable exception, that is, one
2224 * that happened at a point where the RI (recoverable interrupt) bit
2225 * in the MSR is 0. This indicates that SRR0/1 are live, and that
2226 * we therefore lost state by taking this exception.
2228 void unrecoverable_exception(struct pt_regs *regs)
2230 pr_emerg("Unrecoverable exception %lx at %lx (msr=%lx)\n",
2231 regs->trap, regs->nip, regs->msr);
2232 die("Unrecoverable exception", regs, SIGABRT);
2234 NOKPROBE_SYMBOL(unrecoverable_exception);
2236 #if defined(CONFIG_BOOKE_WDT) || defined(CONFIG_40x)
2238 * Default handler for a Watchdog exception,
2239 * spins until a reboot occurs
2241 void __attribute__ ((weak)) WatchdogHandler(struct pt_regs *regs)
2243 /* Generic WatchdogHandler, implement your own */
2244 mtspr(SPRN_TCR, mfspr(SPRN_TCR)&(~TCR_WIE));
2248 void WatchdogException(struct pt_regs *regs)
2250 printk (KERN_EMERG "PowerPC Book-E Watchdog Exception\n");
2251 WatchdogHandler(regs);
2256 * We enter here if we discover during exception entry that we are
2257 * running in supervisor mode with a userspace value in the stack pointer.
2259 void kernel_bad_stack(struct pt_regs *regs)
2261 printk(KERN_EMERG "Bad kernel stack pointer %lx at %lx\n",
2262 regs->gpr[1], regs->nip);
2263 die("Bad kernel stack pointer", regs, SIGABRT);
2265 NOKPROBE_SYMBOL(kernel_bad_stack);
2267 void __init trap_init(void)
2272 #ifdef CONFIG_PPC_EMULATED_STATS
2274 #define WARN_EMULATED_SETUP(type) .type = { .name = #type }
2276 struct ppc_emulated ppc_emulated = {
2277 #ifdef CONFIG_ALTIVEC
2278 WARN_EMULATED_SETUP(altivec),
2280 WARN_EMULATED_SETUP(dcba),
2281 WARN_EMULATED_SETUP(dcbz),
2282 WARN_EMULATED_SETUP(fp_pair),
2283 WARN_EMULATED_SETUP(isel),
2284 WARN_EMULATED_SETUP(mcrxr),
2285 WARN_EMULATED_SETUP(mfpvr),
2286 WARN_EMULATED_SETUP(multiple),
2287 WARN_EMULATED_SETUP(popcntb),
2288 WARN_EMULATED_SETUP(spe),
2289 WARN_EMULATED_SETUP(string),
2290 WARN_EMULATED_SETUP(sync),
2291 WARN_EMULATED_SETUP(unaligned),
2292 #ifdef CONFIG_MATH_EMULATION
2293 WARN_EMULATED_SETUP(math),
2296 WARN_EMULATED_SETUP(vsx),
2299 WARN_EMULATED_SETUP(mfdscr),
2300 WARN_EMULATED_SETUP(mtdscr),
2301 WARN_EMULATED_SETUP(lq_stq),
2302 WARN_EMULATED_SETUP(lxvw4x),
2303 WARN_EMULATED_SETUP(lxvh8x),
2304 WARN_EMULATED_SETUP(lxvd2x),
2305 WARN_EMULATED_SETUP(lxvb16x),
2309 u32 ppc_warn_emulated;
2311 void ppc_warn_emulated_print(const char *type)
2313 pr_warn_ratelimited("%s used emulated %s instruction\n", current->comm,
2317 static int __init ppc_warn_emulated_init(void)
2321 struct ppc_emulated_entry *entries = (void *)&ppc_emulated;
2323 dir = debugfs_create_dir("emulated_instructions",
2324 powerpc_debugfs_root);
2326 debugfs_create_u32("do_warn", 0644, dir, &ppc_warn_emulated);
2328 for (i = 0; i < sizeof(ppc_emulated)/sizeof(*entries); i++)
2329 debugfs_create_u32(entries[i].name, 0644, dir,
2330 (u32 *)&entries[i].val.counter);
2335 device_initcall(ppc_warn_emulated_init);
2337 #endif /* CONFIG_PPC_EMULATED_STATS */