2 * Copyright (C) 1991, 1992 Linus Torvalds
3 * Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs
5 #include <linux/kallsyms.h>
6 #include <linux/kprobes.h>
7 #include <linux/uaccess.h>
8 #include <linux/utsname.h>
9 #include <linux/hardirq.h>
10 #include <linux/kdebug.h>
11 #include <linux/module.h>
12 #include <linux/ptrace.h>
13 #include <linux/sched/debug.h>
14 #include <linux/sched/task_stack.h>
15 #include <linux/ftrace.h>
16 #include <linux/kexec.h>
17 #include <linux/bug.h>
18 #include <linux/nmi.h>
19 #include <linux/sysfs.h>
21 #include <asm/cpu_entry_area.h>
22 #include <asm/stacktrace.h>
23 #include <asm/unwind.h>
25 int panic_on_unrecovered_nmi;
27 static int die_counter;
29 static struct pt_regs exec_summary_regs;
31 bool in_task_stack(unsigned long *stack, struct task_struct *task,
32 struct stack_info *info)
34 unsigned long *begin = task_stack_page(task);
35 unsigned long *end = task_stack_page(task) + THREAD_SIZE;
37 if (stack < begin || stack >= end)
40 info->type = STACK_TYPE_TASK;
48 bool in_entry_stack(unsigned long *stack, struct stack_info *info)
50 struct entry_stack *ss = cpu_entry_stack(smp_processor_id());
55 if ((void *)stack < begin || (void *)stack >= end)
58 info->type = STACK_TYPE_ENTRY;
66 static void printk_stack_address(unsigned long address, int reliable,
70 printk("%s %s%pB\n", log_lvl, reliable ? "" : "? ", (void *)address);
74 * There are a couple of reasons for the 2/3rd prologue, courtesy of Linus:
76 * In case where we don't have the exact kernel image (which, if we did, we can
77 * simply disassemble and navigate to the RIP), the purpose of the bigger
78 * prologue is to have more context and to be able to correlate the code from
79 * the different toolchains better.
81 * In addition, it helps in recreating the register allocation of the failing
82 * kernel and thus make sense of the register dump.
84 * What is more, the additional complication of a variable length insn arch like
85 * x86 warrants having longer byte sequence before rIP so that the disassembler
86 * can "sync" up properly and find instruction boundaries when decoding the
89 * Thus, the 2/3rds prologue and 64 byte OPCODE_BUFSIZE is just a random
90 * guesstimate in attempt to achieve all of the above.
92 void show_opcodes(u8 *rip, const char *loglvl)
94 #define PROLOGUE_SIZE 42
95 #define EPILOGUE_SIZE 21
96 #define OPCODE_BUFSIZE (PROLOGUE_SIZE + 1 + EPILOGUE_SIZE)
97 u8 opcodes[OPCODE_BUFSIZE];
99 if (probe_kernel_read(opcodes, rip - PROLOGUE_SIZE, OPCODE_BUFSIZE)) {
100 printk("%sCode: Bad RIP value.\n", loglvl);
102 printk("%sCode: %" __stringify(PROLOGUE_SIZE) "ph <%02x> %"
103 __stringify(EPILOGUE_SIZE) "ph\n", loglvl, opcodes,
104 opcodes[PROLOGUE_SIZE], opcodes + PROLOGUE_SIZE + 1);
108 void show_ip(struct pt_regs *regs, const char *loglvl)
111 printk("%sEIP: %pS\n", loglvl, (void *)regs->ip);
113 printk("%sRIP: %04x:%pS\n", loglvl, (int)regs->cs, (void *)regs->ip);
115 show_opcodes((u8 *)regs->ip, loglvl);
118 void show_iret_regs(struct pt_regs *regs)
120 show_ip(regs, KERN_DEFAULT);
121 printk(KERN_DEFAULT "RSP: %04x:%016lx EFLAGS: %08lx", (int)regs->ss,
122 regs->sp, regs->flags);
125 static void show_regs_if_on_stack(struct stack_info *info, struct pt_regs *regs,
129 * These on_stack() checks aren't strictly necessary: the unwind code
130 * has already validated the 'regs' pointer. The checks are done for
131 * ordering reasons: if the registers are on the next stack, we don't
132 * want to print them out yet. Otherwise they'll be shown as part of
133 * the wrong stack. Later, when show_trace_log_lvl() switches to the
134 * next stack, this function will be called again with the same regs so
135 * they can be printed in the right context.
137 if (!partial && on_stack(info, regs, sizeof(*regs))) {
138 __show_regs(regs, 0);
140 } else if (partial && on_stack(info, (void *)regs + IRET_FRAME_OFFSET,
143 * When an interrupt or exception occurs in entry code, the
144 * full pt_regs might not have been saved yet. In that case
145 * just print the iret frame.
147 show_iret_regs(regs);
151 void show_trace_log_lvl(struct task_struct *task, struct pt_regs *regs,
152 unsigned long *stack, char *log_lvl)
154 struct unwind_state state;
155 struct stack_info stack_info = {0};
156 unsigned long visit_mask = 0;
158 bool partial = false;
160 printk("%sCall Trace:\n", log_lvl);
162 unwind_start(&state, task, regs, stack);
163 stack = stack ? : get_stack_pointer(task, regs);
164 regs = unwind_get_entry_regs(&state, &partial);
167 * Iterate through the stacks, starting with the current stack pointer.
168 * Each stack has a pointer to the next one.
170 * x86-64 can have several stacks:
173 * - HW exception stacks (double fault, nmi, debug, mce)
176 * x86-32 can have up to four stacks:
182 for ( ; stack; stack = PTR_ALIGN(stack_info.next_sp, sizeof(long))) {
183 const char *stack_name;
185 if (get_stack_info(stack, task, &stack_info, &visit_mask)) {
187 * We weren't on a valid stack. It's possible that
188 * we overflowed a valid stack into a guard page.
189 * See if the next page up is valid so that we can
190 * generate some kind of backtrace if this happens.
192 stack = (unsigned long *)PAGE_ALIGN((unsigned long)stack);
193 if (get_stack_info(stack, task, &stack_info, &visit_mask))
197 stack_name = stack_type_name(stack_info.type);
199 printk("%s <%s>\n", log_lvl, stack_name);
202 show_regs_if_on_stack(&stack_info, regs, partial);
205 * Scan the stack, printing any text addresses we find. At the
206 * same time, follow proper stack frames with the unwinder.
208 * Addresses found during the scan which are not reported by
209 * the unwinder are considered to be additional clues which are
210 * sometimes useful for debugging and are prefixed with '?'.
211 * This also serves as a failsafe option in case the unwinder
212 * goes off in the weeds.
214 for (; stack < stack_info.end; stack++) {
215 unsigned long real_addr;
217 unsigned long addr = READ_ONCE_NOCHECK(*stack);
218 unsigned long *ret_addr_p =
219 unwind_get_return_address_ptr(&state);
221 if (!__kernel_text_address(addr))
225 * Don't print regs->ip again if it was already printed
226 * by show_regs_if_on_stack().
228 if (regs && stack == ®s->ip)
231 if (stack == ret_addr_p)
235 * When function graph tracing is enabled for a
236 * function, its return address on the stack is
237 * replaced with the address of an ftrace handler
238 * (return_to_handler). In that case, before printing
239 * the "real" address, we want to print the handler
240 * address as an "unreliable" hint that function graph
241 * tracing was involved.
243 real_addr = ftrace_graph_ret_addr(task, &graph_idx,
245 if (real_addr != addr)
246 printk_stack_address(addr, 0, log_lvl);
247 printk_stack_address(real_addr, reliable, log_lvl);
254 * Get the next frame from the unwinder. No need to
255 * check for an error: if anything goes wrong, the rest
256 * of the addresses will just be printed as unreliable.
258 unwind_next_frame(&state);
260 /* if the frame has entry regs, print them */
261 regs = unwind_get_entry_regs(&state, &partial);
263 show_regs_if_on_stack(&stack_info, regs, partial);
267 printk("%s </%s>\n", log_lvl, stack_name);
271 void show_stack(struct task_struct *task, unsigned long *sp)
273 task = task ? : current;
276 * Stack frames below this one aren't interesting. Don't show them
277 * if we're printing for %current.
279 if (!sp && task == current)
280 sp = get_stack_pointer(current, NULL);
282 show_trace_log_lvl(task, NULL, sp, KERN_DEFAULT);
285 void show_stack_regs(struct pt_regs *regs)
287 show_trace_log_lvl(current, regs, NULL, KERN_DEFAULT);
290 static arch_spinlock_t die_lock = __ARCH_SPIN_LOCK_UNLOCKED;
291 static int die_owner = -1;
292 static unsigned int die_nest_count;
294 unsigned long oops_begin(void)
301 /* racy, but better than risking deadlock. */
302 raw_local_irq_save(flags);
303 cpu = smp_processor_id();
304 if (!arch_spin_trylock(&die_lock)) {
305 if (cpu == die_owner)
306 /* nested oops. should stop eventually */;
308 arch_spin_lock(&die_lock);
316 NOKPROBE_SYMBOL(oops_begin);
318 void __noreturn rewind_stack_do_exit(int signr);
320 void oops_end(unsigned long flags, struct pt_regs *regs, int signr)
322 if (regs && kexec_should_crash(current))
327 add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE);
330 /* Nest count reaches zero, release the lock. */
331 arch_spin_unlock(&die_lock);
332 raw_local_irq_restore(flags);
335 /* Executive summary in case the oops scrolled away */
336 __show_regs(&exec_summary_regs, true);
341 panic("Fatal exception in interrupt");
343 panic("Fatal exception");
346 * We're not going to return, but we might be on an IST stack or
347 * have very little stack space left. Rewind the stack and kill
350 rewind_stack_do_exit(signr);
352 NOKPROBE_SYMBOL(oops_end);
354 int __die(const char *str, struct pt_regs *regs, long err)
356 /* Save the regs of the first oops for the executive summary later. */
358 exec_summary_regs = *regs;
361 "%s: %04lx [#%d]%s%s%s%s%s\n", str, err & 0xffff, ++die_counter,
362 IS_ENABLED(CONFIG_PREEMPT) ? " PREEMPT" : "",
363 IS_ENABLED(CONFIG_SMP) ? " SMP" : "",
364 debug_pagealloc_enabled() ? " DEBUG_PAGEALLOC" : "",
365 IS_ENABLED(CONFIG_KASAN) ? " KASAN" : "",
366 IS_ENABLED(CONFIG_PAGE_TABLE_ISOLATION) ?
367 (boot_cpu_has(X86_FEATURE_PTI) ? " PTI" : " NOPTI") : "");
372 if (notify_die(DIE_OOPS, str, regs, err,
373 current->thread.trap_nr, SIGSEGV) == NOTIFY_STOP)
378 NOKPROBE_SYMBOL(__die);
381 * This is gone through when something in the kernel has done something bad
382 * and is about to be terminated:
384 void die(const char *str, struct pt_regs *regs, long err)
386 unsigned long flags = oops_begin();
389 if (__die(str, regs, err))
391 oops_end(flags, regs, sig);
394 void show_regs(struct pt_regs *regs)
398 show_regs_print_info(KERN_DEFAULT);
400 if (IS_ENABLED(CONFIG_X86_32))
401 all = !user_mode(regs);
403 __show_regs(regs, all);
406 * When in-kernel, we also print out the stack at the time of the fault..
408 if (!user_mode(regs))
409 show_trace_log_lvl(current, regs, NULL, KERN_DEFAULT);