1 // SPDX-License-Identifier: GPL-2.0-only
3 * thread-stack.c: Synthesize a thread's stack using call / return events
4 * Copyright (c) 2014, Intel Corporation.
7 #include <linux/rbtree.h>
8 #include <linux/list.h>
9 #include <linux/log2.h>
10 #include <linux/zalloc.h>
20 #include "call-path.h"
21 #include "thread-stack.h"
23 #define STACK_GROWTH 2048
26 * State of retpoline detection.
28 * RETPOLINE_NONE: no retpoline detection
29 * X86_RETPOLINE_POSSIBLE: x86 retpoline possible
30 * X86_RETPOLINE_DETECTED: x86 retpoline detected
32 enum retpoline_state_t {
34 X86_RETPOLINE_POSSIBLE,
35 X86_RETPOLINE_DETECTED,
39 * struct thread_stack_entry - thread stack entry.
40 * @ret_addr: return address
41 * @timestamp: timestamp (if known)
42 * @ref: external reference (e.g. db_id of sample)
43 * @branch_count: the branch count when the entry was created
44 * @insn_count: the instruction count when the entry was created
45 * @cyc_count the cycle count when the entry was created
46 * @db_id: id used for db-export
48 * @no_call: a 'call' was not seen
49 * @trace_end: a 'call' but trace ended
50 * @non_call: a branch but not a 'call' to the start of a different symbol
52 struct thread_stack_entry {
67 * struct thread_stack - thread stack constructed from 'call' and 'return'
69 * @stack: array that holds the stack
70 * @cnt: number of entries in the stack
71 * @sz: current maximum stack size
72 * @trace_nr: current trace number
73 * @branch_count: running branch count
74 * @insn_count: running instruction count
75 * @cyc_count running cycle count
76 * @kernel_start: kernel start address
77 * @last_time: last timestamp
78 * @crp: call/return processor
80 * @arr_sz: size of array if this is the first element of an array
81 * @rstate: used to detect retpolines
84 struct thread_stack_entry *stack;
93 struct call_return_processor *crp;
96 enum retpoline_state_t rstate;
100 * Assume pid == tid == 0 identifies the idle task as defined by
101 * perf_session__register_idle_thread(). The idle task is really 1 task per cpu,
102 * and therefore requires a stack for each cpu.
104 static inline bool thread_stack__per_cpu(struct thread *thread)
106 return !(thread->tid || thread->pid_);
109 static int thread_stack__grow(struct thread_stack *ts)
111 struct thread_stack_entry *new_stack;
114 new_sz = ts->sz + STACK_GROWTH;
115 sz = new_sz * sizeof(struct thread_stack_entry);
117 new_stack = realloc(ts->stack, sz);
121 ts->stack = new_stack;
127 static int thread_stack__init(struct thread_stack *ts, struct thread *thread,
128 struct call_return_processor *crp)
132 err = thread_stack__grow(ts);
136 if (thread->mg && thread->mg->machine) {
137 struct machine *machine = thread->mg->machine;
138 const char *arch = perf_env__arch(machine->env);
140 ts->kernel_start = machine__kernel_start(machine);
141 if (!strcmp(arch, "x86"))
142 ts->rstate = X86_RETPOLINE_POSSIBLE;
144 ts->kernel_start = 1ULL << 63;
151 static struct thread_stack *thread_stack__new(struct thread *thread, int cpu,
152 struct call_return_processor *crp)
154 struct thread_stack *ts = thread->ts, *new_ts;
155 unsigned int old_sz = ts ? ts->arr_sz : 0;
156 unsigned int new_sz = 1;
158 if (thread_stack__per_cpu(thread) && cpu > 0)
159 new_sz = roundup_pow_of_two(cpu + 1);
161 if (!ts || new_sz > old_sz) {
162 new_ts = calloc(new_sz, sizeof(*ts));
166 memcpy(new_ts, ts, old_sz * sizeof(*ts));
167 new_ts->arr_sz = new_sz;
173 if (thread_stack__per_cpu(thread) && cpu > 0 &&
174 (unsigned int)cpu < ts->arr_sz)
178 thread_stack__init(ts, thread, crp))
184 static struct thread_stack *thread__cpu_stack(struct thread *thread, int cpu)
186 struct thread_stack *ts = thread->ts;
191 if (!ts || (unsigned int)cpu >= ts->arr_sz)
202 static inline struct thread_stack *thread__stack(struct thread *thread,
208 if (thread_stack__per_cpu(thread))
209 return thread__cpu_stack(thread, cpu);
214 static int thread_stack__push(struct thread_stack *ts, u64 ret_addr,
219 if (ts->cnt == ts->sz) {
220 err = thread_stack__grow(ts);
222 pr_warning("Out of memory: discarding thread stack\n");
227 ts->stack[ts->cnt].trace_end = trace_end;
228 ts->stack[ts->cnt++].ret_addr = ret_addr;
233 static void thread_stack__pop(struct thread_stack *ts, u64 ret_addr)
238 * In some cases there may be functions which are not seen to return.
239 * For example when setjmp / longjmp has been used. Or the perf context
240 * switch in the kernel which doesn't stop and start tracing in exactly
241 * the same code path. When that happens the return address will be
242 * further down the stack. If the return address is not found at all,
243 * we assume the opposite (i.e. this is a return for a call that wasn't
244 * seen for some reason) and leave the stack alone.
246 for (i = ts->cnt; i; ) {
247 if (ts->stack[--i].ret_addr == ret_addr) {
254 static void thread_stack__pop_trace_end(struct thread_stack *ts)
258 for (i = ts->cnt; i; ) {
259 if (ts->stack[--i].trace_end)
266 static bool thread_stack__in_kernel(struct thread_stack *ts)
271 return ts->stack[ts->cnt - 1].cp->in_kernel;
274 static int thread_stack__call_return(struct thread *thread,
275 struct thread_stack *ts, size_t idx,
276 u64 timestamp, u64 ref, bool no_return)
278 struct call_return_processor *crp = ts->crp;
279 struct thread_stack_entry *tse;
280 struct call_return cr = {
287 tse = &ts->stack[idx];
289 cr.call_time = tse->timestamp;
290 cr.return_time = timestamp;
291 cr.branch_count = ts->branch_count - tse->branch_count;
292 cr.insn_count = ts->insn_count - tse->insn_count;
293 cr.cyc_count = ts->cyc_count - tse->cyc_count;
294 cr.db_id = tse->db_id;
295 cr.call_ref = tse->ref;
298 cr.flags |= CALL_RETURN_NO_CALL;
300 cr.flags |= CALL_RETURN_NO_RETURN;
302 cr.flags |= CALL_RETURN_NON_CALL;
305 * The parent db_id must be assigned before exporting the child. Note
306 * it is not possible to export the parent first because its information
307 * is not yet complete because its 'return' has not yet been processed.
309 parent_db_id = idx ? &(tse - 1)->db_id : NULL;
311 return crp->process(&cr, parent_db_id, crp->data);
314 static int __thread_stack__flush(struct thread *thread, struct thread_stack *ts)
316 struct call_return_processor *crp = ts->crp;
325 err = thread_stack__call_return(thread, ts, --ts->cnt,
326 ts->last_time, 0, true);
328 pr_err("Error flushing thread stack!\n");
337 int thread_stack__flush(struct thread *thread)
339 struct thread_stack *ts = thread->ts;
344 for (pos = 0; pos < ts->arr_sz; pos++) {
345 int ret = __thread_stack__flush(thread, ts + pos);
355 int thread_stack__event(struct thread *thread, int cpu, u32 flags, u64 from_ip,
356 u64 to_ip, u16 insn_len, u64 trace_nr)
358 struct thread_stack *ts = thread__stack(thread, cpu);
364 ts = thread_stack__new(thread, cpu, NULL);
366 pr_warning("Out of memory: no thread stack\n");
369 ts->trace_nr = trace_nr;
373 * When the trace is discontinuous, the trace_nr changes. In that case
374 * the stack might be completely invalid. Better to report nothing than
375 * to report something misleading, so flush the stack.
377 if (trace_nr != ts->trace_nr) {
379 __thread_stack__flush(thread, ts);
380 ts->trace_nr = trace_nr;
383 /* Stop here if thread_stack__process() is in use */
387 if (flags & PERF_IP_FLAG_CALL) {
392 ret_addr = from_ip + insn_len;
393 if (ret_addr == to_ip)
394 return 0; /* Zero-length calls are excluded */
395 return thread_stack__push(ts, ret_addr,
396 flags & PERF_IP_FLAG_TRACE_END);
397 } else if (flags & PERF_IP_FLAG_TRACE_BEGIN) {
399 * If the caller did not change the trace number (which would
400 * have flushed the stack) then try to make sense of the stack.
401 * Possibly, tracing began after returning to the current
402 * address, so try to pop that. Also, do not expect a call made
403 * when the trace ended, to return, so pop that.
405 thread_stack__pop(ts, to_ip);
406 thread_stack__pop_trace_end(ts);
407 } else if ((flags & PERF_IP_FLAG_RETURN) && from_ip) {
408 thread_stack__pop(ts, to_ip);
414 void thread_stack__set_trace_nr(struct thread *thread, int cpu, u64 trace_nr)
416 struct thread_stack *ts = thread__stack(thread, cpu);
421 if (trace_nr != ts->trace_nr) {
423 __thread_stack__flush(thread, ts);
424 ts->trace_nr = trace_nr;
428 static void __thread_stack__free(struct thread *thread, struct thread_stack *ts)
430 __thread_stack__flush(thread, ts);
434 static void thread_stack__reset(struct thread *thread, struct thread_stack *ts)
436 unsigned int arr_sz = ts->arr_sz;
438 __thread_stack__free(thread, ts);
439 memset(ts, 0, sizeof(*ts));
443 void thread_stack__free(struct thread *thread)
445 struct thread_stack *ts = thread->ts;
449 for (pos = 0; pos < ts->arr_sz; pos++)
450 __thread_stack__free(thread, ts + pos);
455 static inline u64 callchain_context(u64 ip, u64 kernel_start)
457 return ip < kernel_start ? PERF_CONTEXT_USER : PERF_CONTEXT_KERNEL;
460 void thread_stack__sample(struct thread *thread, int cpu,
461 struct ip_callchain *chain,
462 size_t sz, u64 ip, u64 kernel_start)
464 struct thread_stack *ts = thread__stack(thread, cpu);
465 u64 context = callchain_context(ip, kernel_start);
474 chain->ips[0] = context;
482 last_context = context;
484 for (i = 2, j = 1; i < sz && j <= ts->cnt; i++, j++) {
485 ip = ts->stack[ts->cnt - j].ret_addr;
486 context = callchain_context(ip, kernel_start);
487 if (context != last_context) {
490 chain->ips[i++] = context;
491 last_context = context;
499 struct call_return_processor *
500 call_return_processor__new(int (*process)(struct call_return *cr, u64 *parent_db_id, void *data),
503 struct call_return_processor *crp;
505 crp = zalloc(sizeof(struct call_return_processor));
508 crp->cpr = call_path_root__new();
511 crp->process = process;
520 void call_return_processor__free(struct call_return_processor *crp)
523 call_path_root__free(crp->cpr);
528 static int thread_stack__push_cp(struct thread_stack *ts, u64 ret_addr,
529 u64 timestamp, u64 ref, struct call_path *cp,
530 bool no_call, bool trace_end)
532 struct thread_stack_entry *tse;
538 if (ts->cnt == ts->sz) {
539 err = thread_stack__grow(ts);
544 tse = &ts->stack[ts->cnt++];
545 tse->ret_addr = ret_addr;
546 tse->timestamp = timestamp;
548 tse->branch_count = ts->branch_count;
549 tse->insn_count = ts->insn_count;
550 tse->cyc_count = ts->cyc_count;
552 tse->no_call = no_call;
553 tse->trace_end = trace_end;
554 tse->non_call = false;
560 static int thread_stack__pop_cp(struct thread *thread, struct thread_stack *ts,
561 u64 ret_addr, u64 timestamp, u64 ref,
570 struct thread_stack_entry *tse = &ts->stack[0];
572 if (tse->cp->sym == sym)
573 return thread_stack__call_return(thread, ts, --ts->cnt,
574 timestamp, ref, false);
577 if (ts->stack[ts->cnt - 1].ret_addr == ret_addr &&
578 !ts->stack[ts->cnt - 1].non_call) {
579 return thread_stack__call_return(thread, ts, --ts->cnt,
580 timestamp, ref, false);
582 size_t i = ts->cnt - 1;
585 if (ts->stack[i].ret_addr != ret_addr ||
586 ts->stack[i].non_call)
589 while (ts->cnt > i) {
590 err = thread_stack__call_return(thread, ts,
597 return thread_stack__call_return(thread, ts, --ts->cnt,
598 timestamp, ref, false);
605 static int thread_stack__bottom(struct thread_stack *ts,
606 struct perf_sample *sample,
607 struct addr_location *from_al,
608 struct addr_location *to_al, u64 ref)
610 struct call_path_root *cpr = ts->crp->cpr;
611 struct call_path *cp;
618 } else if (sample->addr) {
625 cp = call_path__findnew(cpr, &cpr->call_path, sym, ip,
628 return thread_stack__push_cp(ts, ip, sample->time, ref, cp,
632 static int thread_stack__pop_ks(struct thread *thread, struct thread_stack *ts,
633 struct perf_sample *sample, u64 ref)
635 u64 tm = sample->time;
638 /* Return to userspace, so pop all kernel addresses */
639 while (thread_stack__in_kernel(ts)) {
640 err = thread_stack__call_return(thread, ts, --ts->cnt,
649 static int thread_stack__no_call_return(struct thread *thread,
650 struct thread_stack *ts,
651 struct perf_sample *sample,
652 struct addr_location *from_al,
653 struct addr_location *to_al, u64 ref)
655 struct call_path_root *cpr = ts->crp->cpr;
656 struct call_path *root = &cpr->call_path;
657 struct symbol *fsym = from_al->sym;
658 struct symbol *tsym = to_al->sym;
659 struct call_path *cp, *parent;
660 u64 ks = ts->kernel_start;
661 u64 addr = sample->addr;
662 u64 tm = sample->time;
666 if (ip >= ks && addr < ks) {
667 /* Return to userspace, so pop all kernel addresses */
668 err = thread_stack__pop_ks(thread, ts, sample, ref);
672 /* If the stack is empty, push the userspace address */
674 cp = call_path__findnew(cpr, root, tsym, addr, ks);
675 return thread_stack__push_cp(ts, 0, tm, ref, cp, true,
678 } else if (thread_stack__in_kernel(ts) && ip < ks) {
679 /* Return to userspace, so pop all kernel addresses */
680 err = thread_stack__pop_ks(thread, ts, sample, ref);
686 parent = ts->stack[ts->cnt - 1].cp;
690 if (parent->sym == from_al->sym) {
692 * At the bottom of the stack, assume the missing 'call' was
693 * before the trace started. So, pop the current symbol and push
697 err = thread_stack__call_return(thread, ts, --ts->cnt,
704 cp = call_path__findnew(cpr, root, tsym, addr, ks);
706 return thread_stack__push_cp(ts, addr, tm, ref, cp,
711 * Otherwise assume the 'return' is being used as a jump (e.g.
712 * retpoline) and just push the 'to' symbol.
714 cp = call_path__findnew(cpr, parent, tsym, addr, ks);
716 err = thread_stack__push_cp(ts, 0, tm, ref, cp, true, false);
718 ts->stack[ts->cnt - 1].non_call = true;
724 * Assume 'parent' has not yet returned, so push 'to', and then push and
728 cp = call_path__findnew(cpr, parent, tsym, addr, ks);
730 err = thread_stack__push_cp(ts, addr, tm, ref, cp, true, false);
734 cp = call_path__findnew(cpr, cp, fsym, ip, ks);
736 err = thread_stack__push_cp(ts, ip, tm, ref, cp, true, false);
740 return thread_stack__call_return(thread, ts, --ts->cnt, tm, ref, false);
743 static int thread_stack__trace_begin(struct thread *thread,
744 struct thread_stack *ts, u64 timestamp,
747 struct thread_stack_entry *tse;
754 tse = &ts->stack[ts->cnt - 1];
755 if (tse->trace_end) {
756 err = thread_stack__call_return(thread, ts, --ts->cnt,
757 timestamp, ref, false);
765 static int thread_stack__trace_end(struct thread_stack *ts,
766 struct perf_sample *sample, u64 ref)
768 struct call_path_root *cpr = ts->crp->cpr;
769 struct call_path *cp;
772 /* No point having 'trace end' on the bottom of the stack */
773 if (!ts->cnt || (ts->cnt == 1 && ts->stack[0].ref == ref))
776 cp = call_path__findnew(cpr, ts->stack[ts->cnt - 1].cp, NULL, 0,
779 ret_addr = sample->ip + sample->insn_len;
781 return thread_stack__push_cp(ts, ret_addr, sample->time, ref, cp,
785 static bool is_x86_retpoline(const char *name)
787 const char *p = strstr(name, "__x86_indirect_thunk_");
789 return p == name || !strcmp(name, "__indirect_thunk_start");
793 * x86 retpoline functions pollute the call graph. This function removes them.
794 * This does not handle function return thunks, nor is there any improvement
795 * for the handling of inline thunks or extern thunks.
797 static int thread_stack__x86_retpoline(struct thread_stack *ts,
798 struct perf_sample *sample,
799 struct addr_location *to_al)
801 struct thread_stack_entry *tse = &ts->stack[ts->cnt - 1];
802 struct call_path_root *cpr = ts->crp->cpr;
803 struct symbol *sym = tse->cp->sym;
804 struct symbol *tsym = to_al->sym;
805 struct call_path *cp;
807 if (sym && is_x86_retpoline(sym->name)) {
809 * This is a x86 retpoline fn. It pollutes the call graph by
810 * showing up everywhere there is an indirect branch, but does
811 * not itself mean anything. Here the top-of-stack is removed,
812 * by decrementing the stack count, and then further down, the
813 * resulting top-of-stack is replaced with the actual target.
814 * The result is that the retpoline functions will no longer
815 * appear in the call graph. Note this only affects the call
816 * graph, since all the original branches are left unchanged.
819 sym = ts->stack[ts->cnt - 2].cp->sym;
820 if (sym && sym == tsym && to_al->addr != tsym->start) {
822 * Target is back to the middle of the symbol we came
823 * from so assume it is an indirect jmp and forget it
829 } else if (sym && sym == tsym) {
831 * Target is back to the symbol we came from so assume it is an
832 * indirect jmp and forget it altogether.
838 cp = call_path__findnew(cpr, ts->stack[ts->cnt - 2].cp, tsym,
839 sample->addr, ts->kernel_start);
843 /* Replace the top-of-stack with the actual target */
844 ts->stack[ts->cnt - 1].cp = cp;
849 int thread_stack__process(struct thread *thread, struct comm *comm,
850 struct perf_sample *sample,
851 struct addr_location *from_al,
852 struct addr_location *to_al, u64 ref,
853 struct call_return_processor *crp)
855 struct thread_stack *ts = thread__stack(thread, sample->cpu);
856 enum retpoline_state_t rstate;
859 if (ts && !ts->crp) {
860 /* Supersede thread_stack__event() */
861 thread_stack__reset(thread, ts);
866 ts = thread_stack__new(thread, sample->cpu, crp);
873 if (rstate == X86_RETPOLINE_DETECTED)
874 ts->rstate = X86_RETPOLINE_POSSIBLE;
876 /* Flush stack on exec */
877 if (ts->comm != comm && thread->pid_ == thread->tid) {
878 err = __thread_stack__flush(thread, ts);
884 /* If the stack is empty, put the current symbol on the stack */
886 err = thread_stack__bottom(ts, sample, from_al, to_al, ref);
891 ts->branch_count += 1;
892 ts->insn_count += sample->insn_cnt;
893 ts->cyc_count += sample->cyc_cnt;
894 ts->last_time = sample->time;
896 if (sample->flags & PERF_IP_FLAG_CALL) {
897 bool trace_end = sample->flags & PERF_IP_FLAG_TRACE_END;
898 struct call_path_root *cpr = ts->crp->cpr;
899 struct call_path *cp;
902 if (!sample->ip || !sample->addr)
905 ret_addr = sample->ip + sample->insn_len;
906 if (ret_addr == sample->addr)
907 return 0; /* Zero-length calls are excluded */
909 cp = call_path__findnew(cpr, ts->stack[ts->cnt - 1].cp,
910 to_al->sym, sample->addr,
912 err = thread_stack__push_cp(ts, ret_addr, sample->time, ref,
913 cp, false, trace_end);
916 * A call to the same symbol but not the start of the symbol,
917 * may be the start of a x86 retpoline.
919 if (!err && rstate == X86_RETPOLINE_POSSIBLE && to_al->sym &&
920 from_al->sym == to_al->sym &&
921 to_al->addr != to_al->sym->start)
922 ts->rstate = X86_RETPOLINE_DETECTED;
924 } else if (sample->flags & PERF_IP_FLAG_RETURN) {
926 u32 return_from_kernel = PERF_IP_FLAG_SYSCALLRET |
927 PERF_IP_FLAG_INTERRUPT;
929 if (!(sample->flags & return_from_kernel))
932 /* Pop kernel stack */
933 return thread_stack__pop_ks(thread, ts, sample, ref);
939 /* x86 retpoline 'return' doesn't match the stack */
940 if (rstate == X86_RETPOLINE_DETECTED && ts->cnt > 2 &&
941 ts->stack[ts->cnt - 1].ret_addr != sample->addr)
942 return thread_stack__x86_retpoline(ts, sample, to_al);
944 err = thread_stack__pop_cp(thread, ts, sample->addr,
945 sample->time, ref, from_al->sym);
949 err = thread_stack__no_call_return(thread, ts, sample,
950 from_al, to_al, ref);
952 } else if (sample->flags & PERF_IP_FLAG_TRACE_BEGIN) {
953 err = thread_stack__trace_begin(thread, ts, sample->time, ref);
954 } else if (sample->flags & PERF_IP_FLAG_TRACE_END) {
955 err = thread_stack__trace_end(ts, sample, ref);
956 } else if (sample->flags & PERF_IP_FLAG_BRANCH &&
957 from_al->sym != to_al->sym && to_al->sym &&
958 to_al->addr == to_al->sym->start) {
959 struct call_path_root *cpr = ts->crp->cpr;
960 struct call_path *cp;
963 * The compiler might optimize a call/ret combination by making
964 * it a jmp. Make that visible by recording on the stack a
965 * branch to the start of a different symbol. Note, that means
966 * when a ret pops the stack, all jmps must be popped off first.
968 cp = call_path__findnew(cpr, ts->stack[ts->cnt - 1].cp,
969 to_al->sym, sample->addr,
971 err = thread_stack__push_cp(ts, 0, sample->time, ref, cp, false,
974 ts->stack[ts->cnt - 1].non_call = true;
980 size_t thread_stack__depth(struct thread *thread, int cpu)
982 struct thread_stack *ts = thread__stack(thread, cpu);