1 /* SPDX-License-Identifier: GPL-2.0-only */
3 * Copyright (C) 2012 ARM Ltd.
5 #ifndef __ASM_STACKTRACE_H
6 #define __ASM_STACKTRACE_H
8 #include <linux/percpu.h>
9 #include <linux/sched.h>
10 #include <linux/sched/task_stack.h>
11 #include <linux/types.h>
12 #include <linux/llist.h>
14 #include <asm/memory.h>
15 #include <asm/ptrace.h>
23 STACK_TYPE_SDEI_NORMAL,
24 STACK_TYPE_SDEI_CRITICAL,
35 * A snapshot of a frame record or fp/lr register values, along with some
36 * accounting information necessary for robust unwinding.
38 * @fp: The fp value in the frame record (or the real fp)
39 * @pc: The lr value in the frame record (or the real lr)
41 * @stacks_done: Stacks which have been entirely unwound, for which it is no
42 * longer valid to unwind to.
44 * @prev_fp: The fp that pointed to this frame record, or a synthetic value
45 * of 0. This is used to ensure that within a stack, each
46 * subsequent frame record is at an increasing address.
47 * @prev_type: The type of stack this frame record was on, or a synthetic
48 * value of STACK_TYPE_UNKNOWN. This is used to detect a
49 * transition from one stack to another.
51 * @kr_cur: When KRETPROBES is selected, holds the kretprobe instance
52 * associated with the most recently encountered replacement lr
58 DECLARE_BITMAP(stacks_done, __NR_STACK_TYPES);
59 unsigned long prev_fp;
60 enum stack_type prev_type;
61 #ifdef CONFIG_KRETPROBES
62 struct llist_node *kr_cur;
66 extern int unwind_frame(struct task_struct *tsk, struct stackframe *frame);
67 extern void walk_stackframe(struct task_struct *tsk, struct stackframe *frame,
68 bool (*fn)(void *, unsigned long), void *data);
69 extern void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk,
72 DECLARE_PER_CPU(unsigned long *, irq_stack_ptr);
74 static inline bool on_stack(unsigned long sp, unsigned long size,
75 unsigned long low, unsigned long high,
76 enum stack_type type, struct stack_info *info)
81 if (sp < low || sp + size < sp || sp + size > high)
92 static inline bool on_irq_stack(unsigned long sp, unsigned long size,
93 struct stack_info *info)
95 unsigned long low = (unsigned long)raw_cpu_read(irq_stack_ptr);
96 unsigned long high = low + IRQ_STACK_SIZE;
98 return on_stack(sp, size, low, high, STACK_TYPE_IRQ, info);
101 static inline bool on_task_stack(const struct task_struct *tsk,
102 unsigned long sp, unsigned long size,
103 struct stack_info *info)
105 unsigned long low = (unsigned long)task_stack_page(tsk);
106 unsigned long high = low + THREAD_SIZE;
108 return on_stack(sp, size, low, high, STACK_TYPE_TASK, info);
111 #ifdef CONFIG_VMAP_STACK
112 DECLARE_PER_CPU(unsigned long [OVERFLOW_STACK_SIZE/sizeof(long)], overflow_stack);
114 static inline bool on_overflow_stack(unsigned long sp, unsigned long size,
115 struct stack_info *info)
117 unsigned long low = (unsigned long)raw_cpu_ptr(overflow_stack);
118 unsigned long high = low + OVERFLOW_STACK_SIZE;
120 return on_stack(sp, size, low, high, STACK_TYPE_OVERFLOW, info);
123 static inline bool on_overflow_stack(unsigned long sp, unsigned long size,
124 struct stack_info *info) { return false; }
129 * We can only safely access per-cpu stacks from current in a non-preemptible
132 static inline bool on_accessible_stack(const struct task_struct *tsk,
133 unsigned long sp, unsigned long size,
134 struct stack_info *info)
137 info->type = STACK_TYPE_UNKNOWN;
139 if (on_task_stack(tsk, sp, size, info))
141 if (tsk != current || preemptible())
143 if (on_irq_stack(sp, size, info))
145 if (on_overflow_stack(sp, size, info))
147 if (on_sdei_stack(sp, size, info))
153 void start_backtrace(struct stackframe *frame, unsigned long fp,
156 #endif /* __ASM_STACKTRACE_H */