Merge tag 'kvm-ppc-fixes-4.14-2' of git://git.kernel.org/pub/scm/linux/kernel/git...
[linux-2.6-microblaze.git] / kernel / trace / trace_stack.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2008 Steven Rostedt <srostedt@redhat.com>
4  *
5  */
6 #include <linux/sched/task_stack.h>
7 #include <linux/stacktrace.h>
8 #include <linux/kallsyms.h>
9 #include <linux/seq_file.h>
10 #include <linux/spinlock.h>
11 #include <linux/uaccess.h>
12 #include <linux/ftrace.h>
13 #include <linux/module.h>
14 #include <linux/sysctl.h>
15 #include <linux/init.h>
16
17 #include <asm/setup.h>
18
19 #include "trace.h"
20
21 static unsigned long stack_dump_trace[STACK_TRACE_ENTRIES+1] =
22          { [0 ... (STACK_TRACE_ENTRIES)] = ULONG_MAX };
23 unsigned stack_trace_index[STACK_TRACE_ENTRIES];
24
25 /*
26  * Reserve one entry for the passed in ip. This will allow
27  * us to remove most or all of the stack size overhead
28  * added by the stack tracer itself.
29  */
30 struct stack_trace stack_trace_max = {
31         .max_entries            = STACK_TRACE_ENTRIES - 1,
32         .entries                = &stack_dump_trace[0],
33 };
34
35 unsigned long stack_trace_max_size;
36 arch_spinlock_t stack_trace_max_lock =
37         (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
38
39 DEFINE_PER_CPU(int, disable_stack_tracer);
40 static DEFINE_MUTEX(stack_sysctl_mutex);
41
42 int stack_tracer_enabled;
43 static int last_stack_tracer_enabled;
44
45 void stack_trace_print(void)
46 {
47         long i;
48         int size;
49
50         pr_emerg("        Depth    Size   Location    (%d entries)\n"
51                            "        -----    ----   --------\n",
52                            stack_trace_max.nr_entries);
53
54         for (i = 0; i < stack_trace_max.nr_entries; i++) {
55                 if (stack_dump_trace[i] == ULONG_MAX)
56                         break;
57                 if (i+1 == stack_trace_max.nr_entries ||
58                                 stack_dump_trace[i+1] == ULONG_MAX)
59                         size = stack_trace_index[i];
60                 else
61                         size = stack_trace_index[i] - stack_trace_index[i+1];
62
63                 pr_emerg("%3ld) %8d   %5d   %pS\n", i, stack_trace_index[i],
64                                 size, (void *)stack_dump_trace[i]);
65         }
66 }
67
68 /*
69  * When arch-specific code overrides this function, the following
70  * data should be filled up, assuming stack_trace_max_lock is held to
71  * prevent concurrent updates.
72  *     stack_trace_index[]
73  *     stack_trace_max
74  *     stack_trace_max_size
75  */
76 void __weak
77 check_stack(unsigned long ip, unsigned long *stack)
78 {
79         unsigned long this_size, flags; unsigned long *p, *top, *start;
80         static int tracer_frame;
81         int frame_size = ACCESS_ONCE(tracer_frame);
82         int i, x;
83
84         this_size = ((unsigned long)stack) & (THREAD_SIZE-1);
85         this_size = THREAD_SIZE - this_size;
86         /* Remove the frame of the tracer */
87         this_size -= frame_size;
88
89         if (this_size <= stack_trace_max_size)
90                 return;
91
92         /* we do not handle interrupt stacks yet */
93         if (!object_is_on_stack(stack))
94                 return;
95
96         /* Can't do this from NMI context (can cause deadlocks) */
97         if (in_nmi())
98                 return;
99
100         local_irq_save(flags);
101         arch_spin_lock(&stack_trace_max_lock);
102
103         /* In case another CPU set the tracer_frame on us */
104         if (unlikely(!frame_size))
105                 this_size -= tracer_frame;
106
107         /* a race could have already updated it */
108         if (this_size <= stack_trace_max_size)
109                 goto out;
110
111         stack_trace_max_size = this_size;
112
113         stack_trace_max.nr_entries = 0;
114         stack_trace_max.skip = 3;
115
116         save_stack_trace(&stack_trace_max);
117
118         /* Skip over the overhead of the stack tracer itself */
119         for (i = 0; i < stack_trace_max.nr_entries; i++) {
120                 if (stack_dump_trace[i] == ip)
121                         break;
122         }
123
124         /*
125          * Some archs may not have the passed in ip in the dump.
126          * If that happens, we need to show everything.
127          */
128         if (i == stack_trace_max.nr_entries)
129                 i = 0;
130
131         /*
132          * Now find where in the stack these are.
133          */
134         x = 0;
135         start = stack;
136         top = (unsigned long *)
137                 (((unsigned long)start & ~(THREAD_SIZE-1)) + THREAD_SIZE);
138
139         /*
140          * Loop through all the entries. One of the entries may
141          * for some reason be missed on the stack, so we may
142          * have to account for them. If they are all there, this
143          * loop will only happen once. This code only takes place
144          * on a new max, so it is far from a fast path.
145          */
146         while (i < stack_trace_max.nr_entries) {
147                 int found = 0;
148
149                 stack_trace_index[x] = this_size;
150                 p = start;
151
152                 for (; p < top && i < stack_trace_max.nr_entries; p++) {
153                         if (stack_dump_trace[i] == ULONG_MAX)
154                                 break;
155                         /*
156                          * The READ_ONCE_NOCHECK is used to let KASAN know that
157                          * this is not a stack-out-of-bounds error.
158                          */
159                         if ((READ_ONCE_NOCHECK(*p)) == stack_dump_trace[i]) {
160                                 stack_dump_trace[x] = stack_dump_trace[i++];
161                                 this_size = stack_trace_index[x++] =
162                                         (top - p) * sizeof(unsigned long);
163                                 found = 1;
164                                 /* Start the search from here */
165                                 start = p + 1;
166                                 /*
167                                  * We do not want to show the overhead
168                                  * of the stack tracer stack in the
169                                  * max stack. If we haven't figured
170                                  * out what that is, then figure it out
171                                  * now.
172                                  */
173                                 if (unlikely(!tracer_frame)) {
174                                         tracer_frame = (p - stack) *
175                                                 sizeof(unsigned long);
176                                         stack_trace_max_size -= tracer_frame;
177                                 }
178                         }
179                 }
180
181                 if (!found)
182                         i++;
183         }
184
185         stack_trace_max.nr_entries = x;
186         for (; x < i; x++)
187                 stack_dump_trace[x] = ULONG_MAX;
188
189         if (task_stack_end_corrupted(current)) {
190                 stack_trace_print();
191                 BUG();
192         }
193
194  out:
195         arch_spin_unlock(&stack_trace_max_lock);
196         local_irq_restore(flags);
197 }
198
199 static void
200 stack_trace_call(unsigned long ip, unsigned long parent_ip,
201                  struct ftrace_ops *op, struct pt_regs *pt_regs)
202 {
203         unsigned long stack;
204
205         preempt_disable_notrace();
206
207         /* no atomic needed, we only modify this variable by this cpu */
208         __this_cpu_inc(disable_stack_tracer);
209         if (__this_cpu_read(disable_stack_tracer) != 1)
210                 goto out;
211
212         ip += MCOUNT_INSN_SIZE;
213
214         check_stack(ip, &stack);
215
216  out:
217         __this_cpu_dec(disable_stack_tracer);
218         /* prevent recursion in schedule */
219         preempt_enable_notrace();
220 }
221
222 static struct ftrace_ops trace_ops __read_mostly =
223 {
224         .func = stack_trace_call,
225         .flags = FTRACE_OPS_FL_RECURSION_SAFE,
226 };
227
228 static ssize_t
229 stack_max_size_read(struct file *filp, char __user *ubuf,
230                     size_t count, loff_t *ppos)
231 {
232         unsigned long *ptr = filp->private_data;
233         char buf[64];
234         int r;
235
236         r = snprintf(buf, sizeof(buf), "%ld\n", *ptr);
237         if (r > sizeof(buf))
238                 r = sizeof(buf);
239         return simple_read_from_buffer(ubuf, count, ppos, buf, r);
240 }
241
242 static ssize_t
243 stack_max_size_write(struct file *filp, const char __user *ubuf,
244                      size_t count, loff_t *ppos)
245 {
246         long *ptr = filp->private_data;
247         unsigned long val, flags;
248         int ret;
249
250         ret = kstrtoul_from_user(ubuf, count, 10, &val);
251         if (ret)
252                 return ret;
253
254         local_irq_save(flags);
255
256         /*
257          * In case we trace inside arch_spin_lock() or after (NMI),
258          * we will cause circular lock, so we also need to increase
259          * the percpu disable_stack_tracer here.
260          */
261         __this_cpu_inc(disable_stack_tracer);
262
263         arch_spin_lock(&stack_trace_max_lock);
264         *ptr = val;
265         arch_spin_unlock(&stack_trace_max_lock);
266
267         __this_cpu_dec(disable_stack_tracer);
268         local_irq_restore(flags);
269
270         return count;
271 }
272
273 static const struct file_operations stack_max_size_fops = {
274         .open           = tracing_open_generic,
275         .read           = stack_max_size_read,
276         .write          = stack_max_size_write,
277         .llseek         = default_llseek,
278 };
279
280 static void *
281 __next(struct seq_file *m, loff_t *pos)
282 {
283         long n = *pos - 1;
284
285         if (n > stack_trace_max.nr_entries || stack_dump_trace[n] == ULONG_MAX)
286                 return NULL;
287
288         m->private = (void *)n;
289         return &m->private;
290 }
291
292 static void *
293 t_next(struct seq_file *m, void *v, loff_t *pos)
294 {
295         (*pos)++;
296         return __next(m, pos);
297 }
298
299 static void *t_start(struct seq_file *m, loff_t *pos)
300 {
301         local_irq_disable();
302
303         __this_cpu_inc(disable_stack_tracer);
304
305         arch_spin_lock(&stack_trace_max_lock);
306
307         if (*pos == 0)
308                 return SEQ_START_TOKEN;
309
310         return __next(m, pos);
311 }
312
313 static void t_stop(struct seq_file *m, void *p)
314 {
315         arch_spin_unlock(&stack_trace_max_lock);
316
317         __this_cpu_dec(disable_stack_tracer);
318
319         local_irq_enable();
320 }
321
322 static void trace_lookup_stack(struct seq_file *m, long i)
323 {
324         unsigned long addr = stack_dump_trace[i];
325
326         seq_printf(m, "%pS\n", (void *)addr);
327 }
328
329 static void print_disabled(struct seq_file *m)
330 {
331         seq_puts(m, "#\n"
332                  "#  Stack tracer disabled\n"
333                  "#\n"
334                  "# To enable the stack tracer, either add 'stacktrace' to the\n"
335                  "# kernel command line\n"
336                  "# or 'echo 1 > /proc/sys/kernel/stack_tracer_enabled'\n"
337                  "#\n");
338 }
339
340 static int t_show(struct seq_file *m, void *v)
341 {
342         long i;
343         int size;
344
345         if (v == SEQ_START_TOKEN) {
346                 seq_printf(m, "        Depth    Size   Location"
347                            "    (%d entries)\n"
348                            "        -----    ----   --------\n",
349                            stack_trace_max.nr_entries);
350
351                 if (!stack_tracer_enabled && !stack_trace_max_size)
352                         print_disabled(m);
353
354                 return 0;
355         }
356
357         i = *(long *)v;
358
359         if (i >= stack_trace_max.nr_entries ||
360             stack_dump_trace[i] == ULONG_MAX)
361                 return 0;
362
363         if (i+1 == stack_trace_max.nr_entries ||
364             stack_dump_trace[i+1] == ULONG_MAX)
365                 size = stack_trace_index[i];
366         else
367                 size = stack_trace_index[i] - stack_trace_index[i+1];
368
369         seq_printf(m, "%3ld) %8d   %5d   ", i, stack_trace_index[i], size);
370
371         trace_lookup_stack(m, i);
372
373         return 0;
374 }
375
376 static const struct seq_operations stack_trace_seq_ops = {
377         .start          = t_start,
378         .next           = t_next,
379         .stop           = t_stop,
380         .show           = t_show,
381 };
382
383 static int stack_trace_open(struct inode *inode, struct file *file)
384 {
385         return seq_open(file, &stack_trace_seq_ops);
386 }
387
388 static const struct file_operations stack_trace_fops = {
389         .open           = stack_trace_open,
390         .read           = seq_read,
391         .llseek         = seq_lseek,
392         .release        = seq_release,
393 };
394
395 #ifdef CONFIG_DYNAMIC_FTRACE
396
397 static int
398 stack_trace_filter_open(struct inode *inode, struct file *file)
399 {
400         struct ftrace_ops *ops = inode->i_private;
401
402         return ftrace_regex_open(ops, FTRACE_ITER_FILTER,
403                                  inode, file);
404 }
405
406 static const struct file_operations stack_trace_filter_fops = {
407         .open = stack_trace_filter_open,
408         .read = seq_read,
409         .write = ftrace_filter_write,
410         .llseek = tracing_lseek,
411         .release = ftrace_regex_release,
412 };
413
414 #endif /* CONFIG_DYNAMIC_FTRACE */
415
416 int
417 stack_trace_sysctl(struct ctl_table *table, int write,
418                    void __user *buffer, size_t *lenp,
419                    loff_t *ppos)
420 {
421         int ret;
422
423         mutex_lock(&stack_sysctl_mutex);
424
425         ret = proc_dointvec(table, write, buffer, lenp, ppos);
426
427         if (ret || !write ||
428             (last_stack_tracer_enabled == !!stack_tracer_enabled))
429                 goto out;
430
431         last_stack_tracer_enabled = !!stack_tracer_enabled;
432
433         if (stack_tracer_enabled)
434                 register_ftrace_function(&trace_ops);
435         else
436                 unregister_ftrace_function(&trace_ops);
437
438  out:
439         mutex_unlock(&stack_sysctl_mutex);
440         return ret;
441 }
442
443 static char stack_trace_filter_buf[COMMAND_LINE_SIZE+1] __initdata;
444
445 static __init int enable_stacktrace(char *str)
446 {
447         if (strncmp(str, "_filter=", 8) == 0)
448                 strncpy(stack_trace_filter_buf, str+8, COMMAND_LINE_SIZE);
449
450         stack_tracer_enabled = 1;
451         last_stack_tracer_enabled = 1;
452         return 1;
453 }
454 __setup("stacktrace", enable_stacktrace);
455
456 static __init int stack_trace_init(void)
457 {
458         struct dentry *d_tracer;
459
460         d_tracer = tracing_init_dentry();
461         if (IS_ERR(d_tracer))
462                 return 0;
463
464         trace_create_file("stack_max_size", 0644, d_tracer,
465                         &stack_trace_max_size, &stack_max_size_fops);
466
467         trace_create_file("stack_trace", 0444, d_tracer,
468                         NULL, &stack_trace_fops);
469
470 #ifdef CONFIG_DYNAMIC_FTRACE
471         trace_create_file("stack_trace_filter", 0444, d_tracer,
472                           &trace_ops, &stack_trace_filter_fops);
473 #endif
474
475         if (stack_trace_filter_buf[0])
476                 ftrace_set_early_filter(&trace_ops, stack_trace_filter_buf, 1);
477
478         if (stack_tracer_enabled)
479                 register_ftrace_function(&trace_ops);
480
481         return 0;
482 }
483
484 device_initcall(stack_trace_init);