1 // SPDX-License-Identifier: GPL-2.0-only
3 * intel_pt.c: Intel Processor Trace support
4 * Copyright (c) 2013-2015, Intel Corporation.
11 #include <linux/kernel.h>
12 #include <linux/string.h>
13 #include <linux/types.h>
14 #include <linux/zalloc.h>
27 #include "thread-stack.h"
29 #include "callchain.h"
36 #include "util/perf_api_probe.h"
37 #include "util/synthetic-events.h"
38 #include "time-utils.h"
40 #include "../arch/x86/include/uapi/asm/perf_regs.h"
42 #include "intel-pt-decoder/intel-pt-log.h"
43 #include "intel-pt-decoder/intel-pt-decoder.h"
44 #include "intel-pt-decoder/intel-pt-insn-decoder.h"
45 #include "intel-pt-decoder/intel-pt-pkt-decoder.h"
47 #define MAX_TIMESTAMP (~0ULL)
55 struct auxtrace auxtrace;
56 struct auxtrace_queues queues;
57 struct auxtrace_heap heap;
59 struct perf_session *session;
60 struct machine *machine;
61 struct evsel *switch_evsel;
62 struct thread *unknown_thread;
63 bool timeless_decoding;
72 bool use_thread_stack;
74 unsigned int br_stack_sz;
75 unsigned int br_stack_sz_plus;
76 int have_sched_switch;
82 struct perf_tsc_conversion tc;
83 bool cap_user_time_zero;
85 struct itrace_synth_opts synth_opts;
87 bool sample_instructions;
88 u64 instructions_sample_type;
93 u64 branches_sample_type;
96 bool sample_transactions;
97 u64 transactions_sample_type;
100 bool sample_ptwrites;
101 u64 ptwrites_sample_type;
104 bool sample_pwr_events;
105 u64 pwr_events_sample_type;
113 struct evsel *pebs_evsel;
122 unsigned max_non_turbo_ratio;
125 unsigned long num_events;
128 struct addr_filters filts;
130 struct range *time_ranges;
131 unsigned int range_cnt;
133 struct ip_callchain *chain;
134 struct branch_stack *br_stack;
138 INTEL_PT_SS_NOT_TRACING,
141 INTEL_PT_SS_EXPECTING_SWITCH_EVENT,
142 INTEL_PT_SS_EXPECTING_SWITCH_IP,
145 struct intel_pt_queue {
147 unsigned int queue_nr;
148 struct auxtrace_buffer *buffer;
149 struct auxtrace_buffer *old_buffer;
151 const struct intel_pt_state *state;
152 struct ip_callchain *chain;
153 struct branch_stack *last_branch;
154 union perf_event *event_buf;
157 bool step_through_buffers;
158 bool use_buffer_pid_tid;
164 struct thread *thread;
171 unsigned int sel_idx;
177 u64 last_in_insn_cnt;
179 u64 last_br_insn_cnt;
181 unsigned int cbr_seen;
182 char insn[INTEL_PT_INSN_BUF_SZ];
185 static void intel_pt_dump(struct intel_pt *pt __maybe_unused,
186 unsigned char *buf, size_t len)
188 struct intel_pt_pkt packet;
191 char desc[INTEL_PT_PKT_DESC_MAX];
192 const char *color = PERF_COLOR_BLUE;
193 enum intel_pt_pkt_ctx ctx = INTEL_PT_NO_CTX;
195 color_fprintf(stdout, color,
196 ". ... Intel Processor Trace data: size %zu bytes\n",
200 ret = intel_pt_get_packet(buf, len, &packet, &ctx);
206 color_fprintf(stdout, color, " %08x: ", pos);
207 for (i = 0; i < pkt_len; i++)
208 color_fprintf(stdout, color, " %02x", buf[i]);
210 color_fprintf(stdout, color, " ");
212 ret = intel_pt_pkt_desc(&packet, desc,
213 INTEL_PT_PKT_DESC_MAX);
215 color_fprintf(stdout, color, " %s\n", desc);
217 color_fprintf(stdout, color, " Bad packet!\n");
225 static void intel_pt_dump_event(struct intel_pt *pt, unsigned char *buf,
229 intel_pt_dump(pt, buf, len);
232 static void intel_pt_log_event(union perf_event *event)
234 FILE *f = intel_pt_log_fp();
236 if (!intel_pt_enable_logging || !f)
239 perf_event__fprintf(event, f);
242 static void intel_pt_dump_sample(struct perf_session *session,
243 struct perf_sample *sample)
245 struct intel_pt *pt = container_of(session->auxtrace, struct intel_pt,
249 intel_pt_dump(pt, sample->aux_sample.data, sample->aux_sample.size);
252 static int intel_pt_do_fix_overlap(struct intel_pt *pt, struct auxtrace_buffer *a,
253 struct auxtrace_buffer *b)
255 bool consecutive = false;
258 start = intel_pt_find_overlap(a->data, a->size, b->data, b->size,
259 pt->have_tsc, &consecutive);
262 b->use_size = b->data + b->size - start;
264 if (b->use_size && consecutive)
265 b->consecutive = true;
269 static int intel_pt_get_buffer(struct intel_pt_queue *ptq,
270 struct auxtrace_buffer *buffer,
271 struct auxtrace_buffer *old_buffer,
272 struct intel_pt_buffer *b)
277 int fd = perf_data__fd(ptq->pt->session->data);
279 buffer->data = auxtrace_buffer__get_data(buffer, fd);
284 might_overlap = ptq->pt->snapshot_mode || ptq->pt->sampling_mode;
285 if (might_overlap && !buffer->consecutive && old_buffer &&
286 intel_pt_do_fix_overlap(ptq->pt, old_buffer, buffer))
289 if (buffer->use_data) {
290 b->len = buffer->use_size;
291 b->buf = buffer->use_data;
293 b->len = buffer->size;
294 b->buf = buffer->data;
296 b->ref_timestamp = buffer->reference;
298 if (!old_buffer || (might_overlap && !buffer->consecutive)) {
299 b->consecutive = false;
300 b->trace_nr = buffer->buffer_nr + 1;
302 b->consecutive = true;
308 /* Do not drop buffers with references - refer intel_pt_get_trace() */
309 static void intel_pt_lookahead_drop_buffer(struct intel_pt_queue *ptq,
310 struct auxtrace_buffer *buffer)
312 if (!buffer || buffer == ptq->buffer || buffer == ptq->old_buffer)
315 auxtrace_buffer__drop_data(buffer);
318 /* Must be serialized with respect to intel_pt_get_trace() */
319 static int intel_pt_lookahead(void *data, intel_pt_lookahead_cb_t cb,
322 struct intel_pt_queue *ptq = data;
323 struct auxtrace_buffer *buffer = ptq->buffer;
324 struct auxtrace_buffer *old_buffer = ptq->old_buffer;
325 struct auxtrace_queue *queue;
328 queue = &ptq->pt->queues.queue_array[ptq->queue_nr];
331 struct intel_pt_buffer b = { .len = 0 };
333 buffer = auxtrace_buffer__next(queue, buffer);
337 err = intel_pt_get_buffer(ptq, buffer, old_buffer, &b);
342 intel_pt_lookahead_drop_buffer(ptq, old_buffer);
345 intel_pt_lookahead_drop_buffer(ptq, buffer);
349 err = cb(&b, cb_data);
354 if (buffer != old_buffer)
355 intel_pt_lookahead_drop_buffer(ptq, buffer);
356 intel_pt_lookahead_drop_buffer(ptq, old_buffer);
362 * This function assumes data is processed sequentially only.
363 * Must be serialized with respect to intel_pt_lookahead()
365 static int intel_pt_get_trace(struct intel_pt_buffer *b, void *data)
367 struct intel_pt_queue *ptq = data;
368 struct auxtrace_buffer *buffer = ptq->buffer;
369 struct auxtrace_buffer *old_buffer = ptq->old_buffer;
370 struct auxtrace_queue *queue;
378 queue = &ptq->pt->queues.queue_array[ptq->queue_nr];
380 buffer = auxtrace_buffer__next(queue, buffer);
383 auxtrace_buffer__drop_data(old_buffer);
388 ptq->buffer = buffer;
390 err = intel_pt_get_buffer(ptq, buffer, old_buffer, b);
394 if (ptq->step_through_buffers)
399 auxtrace_buffer__drop_data(old_buffer);
400 ptq->old_buffer = buffer;
402 auxtrace_buffer__drop_data(buffer);
403 return intel_pt_get_trace(b, data);
409 struct intel_pt_cache_entry {
410 struct auxtrace_cache_entry entry;
413 enum intel_pt_insn_op op;
414 enum intel_pt_insn_branch branch;
417 char insn[INTEL_PT_INSN_BUF_SZ];
420 static int intel_pt_config_div(const char *var, const char *value, void *data)
425 if (!strcmp(var, "intel-pt.cache-divisor")) {
426 val = strtol(value, NULL, 0);
427 if (val > 0 && val <= INT_MAX)
434 static int intel_pt_cache_divisor(void)
441 perf_config(intel_pt_config_div, &d);
449 static unsigned int intel_pt_cache_size(struct dso *dso,
450 struct machine *machine)
454 size = dso__data_size(dso, machine);
455 size /= intel_pt_cache_divisor();
458 if (size > (1 << 21))
460 return 32 - __builtin_clz(size);
463 static struct auxtrace_cache *intel_pt_cache(struct dso *dso,
464 struct machine *machine)
466 struct auxtrace_cache *c;
469 if (dso->auxtrace_cache)
470 return dso->auxtrace_cache;
472 bits = intel_pt_cache_size(dso, machine);
474 /* Ignoring cache creation failure */
475 c = auxtrace_cache__new(bits, sizeof(struct intel_pt_cache_entry), 200);
477 dso->auxtrace_cache = c;
482 static int intel_pt_cache_add(struct dso *dso, struct machine *machine,
483 u64 offset, u64 insn_cnt, u64 byte_cnt,
484 struct intel_pt_insn *intel_pt_insn)
486 struct auxtrace_cache *c = intel_pt_cache(dso, machine);
487 struct intel_pt_cache_entry *e;
493 e = auxtrace_cache__alloc_entry(c);
497 e->insn_cnt = insn_cnt;
498 e->byte_cnt = byte_cnt;
499 e->op = intel_pt_insn->op;
500 e->branch = intel_pt_insn->branch;
501 e->length = intel_pt_insn->length;
502 e->rel = intel_pt_insn->rel;
503 memcpy(e->insn, intel_pt_insn->buf, INTEL_PT_INSN_BUF_SZ);
505 err = auxtrace_cache__add(c, offset, &e->entry);
507 auxtrace_cache__free_entry(c, e);
512 static struct intel_pt_cache_entry *
513 intel_pt_cache_lookup(struct dso *dso, struct machine *machine, u64 offset)
515 struct auxtrace_cache *c = intel_pt_cache(dso, machine);
520 return auxtrace_cache__lookup(dso->auxtrace_cache, offset);
523 static inline u8 intel_pt_cpumode(struct intel_pt *pt, uint64_t ip)
525 return ip >= pt->kernel_start ?
526 PERF_RECORD_MISC_KERNEL :
527 PERF_RECORD_MISC_USER;
530 static int intel_pt_walk_next_insn(struct intel_pt_insn *intel_pt_insn,
531 uint64_t *insn_cnt_ptr, uint64_t *ip,
532 uint64_t to_ip, uint64_t max_insn_cnt,
535 struct intel_pt_queue *ptq = data;
536 struct machine *machine = ptq->pt->machine;
537 struct thread *thread;
538 struct addr_location al;
539 unsigned char buf[INTEL_PT_INSN_BUF_SZ];
543 u64 offset, start_offset, start_ip;
547 intel_pt_insn->length = 0;
549 if (to_ip && *ip == to_ip)
552 cpumode = intel_pt_cpumode(ptq->pt, *ip);
554 thread = ptq->thread;
556 if (cpumode != PERF_RECORD_MISC_KERNEL)
558 thread = ptq->pt->unknown_thread;
562 if (!thread__find_map(thread, cpumode, *ip, &al) || !al.map->dso)
565 if (al.map->dso->data.status == DSO_DATA_STATUS_ERROR &&
566 dso__data_status_seen(al.map->dso,
567 DSO_DATA_STATUS_SEEN_ITRACE))
570 offset = al.map->map_ip(al.map, *ip);
572 if (!to_ip && one_map) {
573 struct intel_pt_cache_entry *e;
575 e = intel_pt_cache_lookup(al.map->dso, machine, offset);
577 (!max_insn_cnt || e->insn_cnt <= max_insn_cnt)) {
578 *insn_cnt_ptr = e->insn_cnt;
580 intel_pt_insn->op = e->op;
581 intel_pt_insn->branch = e->branch;
582 intel_pt_insn->length = e->length;
583 intel_pt_insn->rel = e->rel;
584 memcpy(intel_pt_insn->buf, e->insn,
585 INTEL_PT_INSN_BUF_SZ);
586 intel_pt_log_insn_no_data(intel_pt_insn, *ip);
591 start_offset = offset;
594 /* Load maps to ensure dso->is_64_bit has been updated */
597 x86_64 = al.map->dso->is_64_bit;
600 len = dso__data_read_offset(al.map->dso, machine,
602 INTEL_PT_INSN_BUF_SZ);
606 if (intel_pt_get_insn(buf, len, x86_64, intel_pt_insn))
609 intel_pt_log_insn(intel_pt_insn, *ip);
613 if (intel_pt_insn->branch != INTEL_PT_BR_NO_BRANCH)
616 if (max_insn_cnt && insn_cnt >= max_insn_cnt)
619 *ip += intel_pt_insn->length;
621 if (to_ip && *ip == to_ip)
624 if (*ip >= al.map->end)
627 offset += intel_pt_insn->length;
632 *insn_cnt_ptr = insn_cnt;
638 * Didn't lookup in the 'to_ip' case, so do it now to prevent duplicate
642 struct intel_pt_cache_entry *e;
644 e = intel_pt_cache_lookup(al.map->dso, machine, start_offset);
649 /* Ignore cache errors */
650 intel_pt_cache_add(al.map->dso, machine, start_offset, insn_cnt,
651 *ip - start_ip, intel_pt_insn);
656 *insn_cnt_ptr = insn_cnt;
660 static bool intel_pt_match_pgd_ip(struct intel_pt *pt, uint64_t ip,
661 uint64_t offset, const char *filename)
663 struct addr_filter *filt;
664 bool have_filter = false;
665 bool hit_tracestop = false;
666 bool hit_filter = false;
668 list_for_each_entry(filt, &pt->filts.head, list) {
672 if ((filename && !filt->filename) ||
673 (!filename && filt->filename) ||
674 (filename && strcmp(filename, filt->filename)))
677 if (!(offset >= filt->addr && offset < filt->addr + filt->size))
680 intel_pt_log("TIP.PGD ip %#"PRIx64" offset %#"PRIx64" in %s hit filter: %s offset %#"PRIx64" size %#"PRIx64"\n",
681 ip, offset, filename ? filename : "[kernel]",
682 filt->start ? "filter" : "stop",
683 filt->addr, filt->size);
688 hit_tracestop = true;
691 if (!hit_tracestop && !hit_filter)
692 intel_pt_log("TIP.PGD ip %#"PRIx64" offset %#"PRIx64" in %s is not in a filter region\n",
693 ip, offset, filename ? filename : "[kernel]");
695 return hit_tracestop || (have_filter && !hit_filter);
698 static int __intel_pt_pgd_ip(uint64_t ip, void *data)
700 struct intel_pt_queue *ptq = data;
701 struct thread *thread;
702 struct addr_location al;
706 if (ip >= ptq->pt->kernel_start)
707 return intel_pt_match_pgd_ip(ptq->pt, ip, ip, NULL);
709 cpumode = PERF_RECORD_MISC_USER;
711 thread = ptq->thread;
715 if (!thread__find_map(thread, cpumode, ip, &al) || !al.map->dso)
718 offset = al.map->map_ip(al.map, ip);
720 return intel_pt_match_pgd_ip(ptq->pt, ip, offset,
721 al.map->dso->long_name);
724 static bool intel_pt_pgd_ip(uint64_t ip, void *data)
726 return __intel_pt_pgd_ip(ip, data) > 0;
729 static bool intel_pt_get_config(struct intel_pt *pt,
730 struct perf_event_attr *attr, u64 *config)
732 if (attr->type == pt->pmu_type) {
734 *config = attr->config;
741 static bool intel_pt_exclude_kernel(struct intel_pt *pt)
745 evlist__for_each_entry(pt->session->evlist, evsel) {
746 if (intel_pt_get_config(pt, &evsel->core.attr, NULL) &&
747 !evsel->core.attr.exclude_kernel)
753 static bool intel_pt_return_compression(struct intel_pt *pt)
758 if (!pt->noretcomp_bit)
761 evlist__for_each_entry(pt->session->evlist, evsel) {
762 if (intel_pt_get_config(pt, &evsel->core.attr, &config) &&
763 (config & pt->noretcomp_bit))
769 static bool intel_pt_branch_enable(struct intel_pt *pt)
774 evlist__for_each_entry(pt->session->evlist, evsel) {
775 if (intel_pt_get_config(pt, &evsel->core.attr, &config) &&
776 (config & 1) && !(config & 0x2000))
782 static unsigned int intel_pt_mtc_period(struct intel_pt *pt)
788 if (!pt->mtc_freq_bits)
791 for (shift = 0, config = pt->mtc_freq_bits; !(config & 1); shift++)
794 evlist__for_each_entry(pt->session->evlist, evsel) {
795 if (intel_pt_get_config(pt, &evsel->core.attr, &config))
796 return (config & pt->mtc_freq_bits) >> shift;
801 static bool intel_pt_timeless_decoding(struct intel_pt *pt)
804 bool timeless_decoding = true;
807 if (!pt->tsc_bit || !pt->cap_user_time_zero)
810 evlist__for_each_entry(pt->session->evlist, evsel) {
811 if (!(evsel->core.attr.sample_type & PERF_SAMPLE_TIME))
813 if (intel_pt_get_config(pt, &evsel->core.attr, &config)) {
814 if (config & pt->tsc_bit)
815 timeless_decoding = false;
820 return timeless_decoding;
823 static bool intel_pt_tracing_kernel(struct intel_pt *pt)
827 evlist__for_each_entry(pt->session->evlist, evsel) {
828 if (intel_pt_get_config(pt, &evsel->core.attr, NULL) &&
829 !evsel->core.attr.exclude_kernel)
835 static bool intel_pt_have_tsc(struct intel_pt *pt)
838 bool have_tsc = false;
844 evlist__for_each_entry(pt->session->evlist, evsel) {
845 if (intel_pt_get_config(pt, &evsel->core.attr, &config)) {
846 if (config & pt->tsc_bit)
855 static bool intel_pt_sampling_mode(struct intel_pt *pt)
859 evlist__for_each_entry(pt->session->evlist, evsel) {
860 if ((evsel->core.attr.sample_type & PERF_SAMPLE_AUX) &&
861 evsel->core.attr.aux_sample_size)
867 static u64 intel_pt_ns_to_ticks(const struct intel_pt *pt, u64 ns)
871 quot = ns / pt->tc.time_mult;
872 rem = ns % pt->tc.time_mult;
873 return (quot << pt->tc.time_shift) + (rem << pt->tc.time_shift) /
877 static struct ip_callchain *intel_pt_alloc_chain(struct intel_pt *pt)
879 size_t sz = sizeof(struct ip_callchain);
881 /* Add 1 to callchain_sz for callchain context */
882 sz += (pt->synth_opts.callchain_sz + 1) * sizeof(u64);
886 static int intel_pt_callchain_init(struct intel_pt *pt)
890 evlist__for_each_entry(pt->session->evlist, evsel) {
891 if (!(evsel->core.attr.sample_type & PERF_SAMPLE_CALLCHAIN))
892 evsel->synth_sample_type |= PERF_SAMPLE_CALLCHAIN;
895 pt->chain = intel_pt_alloc_chain(pt);
902 static void intel_pt_add_callchain(struct intel_pt *pt,
903 struct perf_sample *sample)
905 struct thread *thread = machine__findnew_thread(pt->machine,
909 thread_stack__sample_late(thread, sample->cpu, pt->chain,
910 pt->synth_opts.callchain_sz + 1, sample->ip,
913 sample->callchain = pt->chain;
916 static struct branch_stack *intel_pt_alloc_br_stack(unsigned int entry_cnt)
918 size_t sz = sizeof(struct branch_stack);
920 sz += entry_cnt * sizeof(struct branch_entry);
924 static int intel_pt_br_stack_init(struct intel_pt *pt)
928 evlist__for_each_entry(pt->session->evlist, evsel) {
929 if (!(evsel->core.attr.sample_type & PERF_SAMPLE_BRANCH_STACK))
930 evsel->synth_sample_type |= PERF_SAMPLE_BRANCH_STACK;
933 pt->br_stack = intel_pt_alloc_br_stack(pt->br_stack_sz);
940 static void intel_pt_add_br_stack(struct intel_pt *pt,
941 struct perf_sample *sample)
943 struct thread *thread = machine__findnew_thread(pt->machine,
947 thread_stack__br_sample_late(thread, sample->cpu, pt->br_stack,
948 pt->br_stack_sz, sample->ip,
951 sample->branch_stack = pt->br_stack;
954 /* INTEL_PT_LBR_0, INTEL_PT_LBR_1 and INTEL_PT_LBR_2 */
955 #define LBRS_MAX (INTEL_PT_BLK_ITEM_ID_CNT * 3U)
957 static struct intel_pt_queue *intel_pt_alloc_queue(struct intel_pt *pt,
958 unsigned int queue_nr)
960 struct intel_pt_params params = { .get_trace = 0, };
961 struct perf_env *env = pt->machine->env;
962 struct intel_pt_queue *ptq;
964 ptq = zalloc(sizeof(struct intel_pt_queue));
968 if (pt->synth_opts.callchain) {
969 ptq->chain = intel_pt_alloc_chain(pt);
974 if (pt->synth_opts.last_branch || pt->synth_opts.other_events) {
975 unsigned int entry_cnt = max(LBRS_MAX, pt->br_stack_sz);
977 ptq->last_branch = intel_pt_alloc_br_stack(entry_cnt);
978 if (!ptq->last_branch)
982 ptq->event_buf = malloc(PERF_SAMPLE_MAX_SIZE);
987 ptq->queue_nr = queue_nr;
988 ptq->exclude_kernel = intel_pt_exclude_kernel(pt);
994 params.get_trace = intel_pt_get_trace;
995 params.walk_insn = intel_pt_walk_next_insn;
996 params.lookahead = intel_pt_lookahead;
998 params.return_compression = intel_pt_return_compression(pt);
999 params.branch_enable = intel_pt_branch_enable(pt);
1000 params.max_non_turbo_ratio = pt->max_non_turbo_ratio;
1001 params.mtc_period = intel_pt_mtc_period(pt);
1002 params.tsc_ctc_ratio_n = pt->tsc_ctc_ratio_n;
1003 params.tsc_ctc_ratio_d = pt->tsc_ctc_ratio_d;
1005 if (pt->filts.cnt > 0)
1006 params.pgd_ip = intel_pt_pgd_ip;
1008 if (pt->synth_opts.instructions) {
1009 if (pt->synth_opts.period) {
1010 switch (pt->synth_opts.period_type) {
1011 case PERF_ITRACE_PERIOD_INSTRUCTIONS:
1012 params.period_type =
1013 INTEL_PT_PERIOD_INSTRUCTIONS;
1014 params.period = pt->synth_opts.period;
1016 case PERF_ITRACE_PERIOD_TICKS:
1017 params.period_type = INTEL_PT_PERIOD_TICKS;
1018 params.period = pt->synth_opts.period;
1020 case PERF_ITRACE_PERIOD_NANOSECS:
1021 params.period_type = INTEL_PT_PERIOD_TICKS;
1022 params.period = intel_pt_ns_to_ticks(pt,
1023 pt->synth_opts.period);
1030 if (!params.period) {
1031 params.period_type = INTEL_PT_PERIOD_INSTRUCTIONS;
1036 if (env->cpuid && !strncmp(env->cpuid, "GenuineIntel,6,92,", 18))
1037 params.flags |= INTEL_PT_FUP_WITH_NLIP;
1039 ptq->decoder = intel_pt_decoder_new(¶ms);
1046 zfree(&ptq->event_buf);
1047 zfree(&ptq->last_branch);
1053 static void intel_pt_free_queue(void *priv)
1055 struct intel_pt_queue *ptq = priv;
1059 thread__zput(ptq->thread);
1060 intel_pt_decoder_free(ptq->decoder);
1061 zfree(&ptq->event_buf);
1062 zfree(&ptq->last_branch);
1067 static void intel_pt_set_pid_tid_cpu(struct intel_pt *pt,
1068 struct auxtrace_queue *queue)
1070 struct intel_pt_queue *ptq = queue->priv;
1072 if (queue->tid == -1 || pt->have_sched_switch) {
1073 ptq->tid = machine__get_current_tid(pt->machine, ptq->cpu);
1074 thread__zput(ptq->thread);
1077 if (!ptq->thread && ptq->tid != -1)
1078 ptq->thread = machine__find_thread(pt->machine, -1, ptq->tid);
1081 ptq->pid = ptq->thread->pid_;
1082 if (queue->cpu == -1)
1083 ptq->cpu = ptq->thread->cpu;
1087 static void intel_pt_sample_flags(struct intel_pt_queue *ptq)
1089 if (ptq->state->flags & INTEL_PT_ABORT_TX) {
1090 ptq->flags = PERF_IP_FLAG_BRANCH | PERF_IP_FLAG_TX_ABORT;
1091 } else if (ptq->state->flags & INTEL_PT_ASYNC) {
1092 if (ptq->state->to_ip)
1093 ptq->flags = PERF_IP_FLAG_BRANCH | PERF_IP_FLAG_CALL |
1094 PERF_IP_FLAG_ASYNC |
1095 PERF_IP_FLAG_INTERRUPT;
1097 ptq->flags = PERF_IP_FLAG_BRANCH |
1098 PERF_IP_FLAG_TRACE_END;
1101 if (ptq->state->from_ip)
1102 ptq->flags = intel_pt_insn_type(ptq->state->insn_op);
1104 ptq->flags = PERF_IP_FLAG_BRANCH |
1105 PERF_IP_FLAG_TRACE_BEGIN;
1106 if (ptq->state->flags & INTEL_PT_IN_TX)
1107 ptq->flags |= PERF_IP_FLAG_IN_TX;
1108 ptq->insn_len = ptq->state->insn_len;
1109 memcpy(ptq->insn, ptq->state->insn, INTEL_PT_INSN_BUF_SZ);
1112 if (ptq->state->type & INTEL_PT_TRACE_BEGIN)
1113 ptq->flags |= PERF_IP_FLAG_TRACE_BEGIN;
1114 if (ptq->state->type & INTEL_PT_TRACE_END)
1115 ptq->flags |= PERF_IP_FLAG_TRACE_END;
1118 static void intel_pt_setup_time_range(struct intel_pt *pt,
1119 struct intel_pt_queue *ptq)
1124 ptq->sel_timestamp = pt->time_ranges[0].start;
1127 if (ptq->sel_timestamp) {
1128 ptq->sel_start = true;
1130 ptq->sel_timestamp = pt->time_ranges[0].end;
1131 ptq->sel_start = false;
1135 static int intel_pt_setup_queue(struct intel_pt *pt,
1136 struct auxtrace_queue *queue,
1137 unsigned int queue_nr)
1139 struct intel_pt_queue *ptq = queue->priv;
1141 if (list_empty(&queue->head))
1145 ptq = intel_pt_alloc_queue(pt, queue_nr);
1150 if (queue->cpu != -1)
1151 ptq->cpu = queue->cpu;
1152 ptq->tid = queue->tid;
1154 ptq->cbr_seen = UINT_MAX;
1156 if (pt->sampling_mode && !pt->snapshot_mode &&
1157 pt->timeless_decoding)
1158 ptq->step_through_buffers = true;
1160 ptq->sync_switch = pt->sync_switch;
1162 intel_pt_setup_time_range(pt, ptq);
1165 if (!ptq->on_heap &&
1166 (!ptq->sync_switch ||
1167 ptq->switch_state != INTEL_PT_SS_EXPECTING_SWITCH_EVENT)) {
1168 const struct intel_pt_state *state;
1171 if (pt->timeless_decoding)
1174 intel_pt_log("queue %u getting timestamp\n", queue_nr);
1175 intel_pt_log("queue %u decoding cpu %d pid %d tid %d\n",
1176 queue_nr, ptq->cpu, ptq->pid, ptq->tid);
1178 if (ptq->sel_start && ptq->sel_timestamp) {
1179 ret = intel_pt_fast_forward(ptq->decoder,
1180 ptq->sel_timestamp);
1186 state = intel_pt_decode(ptq->decoder);
1188 if (state->err == INTEL_PT_ERR_NODATA) {
1189 intel_pt_log("queue %u has no timestamp\n",
1195 if (state->timestamp)
1199 ptq->timestamp = state->timestamp;
1200 intel_pt_log("queue %u timestamp 0x%" PRIx64 "\n",
1201 queue_nr, ptq->timestamp);
1203 ptq->have_sample = true;
1204 if (ptq->sel_start && ptq->sel_timestamp &&
1205 ptq->timestamp < ptq->sel_timestamp)
1206 ptq->have_sample = false;
1207 intel_pt_sample_flags(ptq);
1208 ret = auxtrace_heap__add(&pt->heap, queue_nr, ptq->timestamp);
1211 ptq->on_heap = true;
1217 static int intel_pt_setup_queues(struct intel_pt *pt)
1222 for (i = 0; i < pt->queues.nr_queues; i++) {
1223 ret = intel_pt_setup_queue(pt, &pt->queues.queue_array[i], i);
1230 static inline bool intel_pt_skip_event(struct intel_pt *pt)
1232 return pt->synth_opts.initial_skip &&
1233 pt->num_events++ < pt->synth_opts.initial_skip;
1237 * Cannot count CBR as skipped because it won't go away until cbr == cbr_seen.
1238 * Also ensure CBR is first non-skipped event by allowing for 4 more samples
1239 * from this decoder state.
1241 static inline bool intel_pt_skip_cbr_event(struct intel_pt *pt)
1243 return pt->synth_opts.initial_skip &&
1244 pt->num_events + 4 < pt->synth_opts.initial_skip;
1247 static void intel_pt_prep_a_sample(struct intel_pt_queue *ptq,
1248 union perf_event *event,
1249 struct perf_sample *sample)
1251 event->sample.header.type = PERF_RECORD_SAMPLE;
1252 event->sample.header.size = sizeof(struct perf_event_header);
1254 sample->pid = ptq->pid;
1255 sample->tid = ptq->tid;
1256 sample->cpu = ptq->cpu;
1257 sample->insn_len = ptq->insn_len;
1258 memcpy(sample->insn, ptq->insn, INTEL_PT_INSN_BUF_SZ);
1261 static void intel_pt_prep_b_sample(struct intel_pt *pt,
1262 struct intel_pt_queue *ptq,
1263 union perf_event *event,
1264 struct perf_sample *sample)
1266 intel_pt_prep_a_sample(ptq, event, sample);
1268 if (!pt->timeless_decoding)
1269 sample->time = tsc_to_perf_time(ptq->timestamp, &pt->tc);
1271 sample->ip = ptq->state->from_ip;
1272 sample->cpumode = intel_pt_cpumode(pt, sample->ip);
1273 sample->addr = ptq->state->to_ip;
1275 sample->flags = ptq->flags;
1277 event->sample.header.misc = sample->cpumode;
1280 static int intel_pt_inject_event(union perf_event *event,
1281 struct perf_sample *sample, u64 type)
1283 event->header.size = perf_event__sample_event_size(sample, type, 0);
1284 return perf_event__synthesize_sample(event, type, 0, sample);
1287 static inline int intel_pt_opt_inject(struct intel_pt *pt,
1288 union perf_event *event,
1289 struct perf_sample *sample, u64 type)
1291 if (!pt->synth_opts.inject)
1294 return intel_pt_inject_event(event, sample, type);
1297 static int intel_pt_deliver_synth_event(struct intel_pt *pt,
1298 union perf_event *event,
1299 struct perf_sample *sample, u64 type)
1303 ret = intel_pt_opt_inject(pt, event, sample, type);
1307 ret = perf_session__deliver_synth_event(pt->session, event, sample);
1309 pr_err("Intel PT: failed to deliver event, error %d\n", ret);
1314 static int intel_pt_synth_branch_sample(struct intel_pt_queue *ptq)
1316 struct intel_pt *pt = ptq->pt;
1317 union perf_event *event = ptq->event_buf;
1318 struct perf_sample sample = { .ip = 0, };
1319 struct dummy_branch_stack {
1322 struct branch_entry entries;
1325 if (pt->branches_filter && !(pt->branches_filter & ptq->flags))
1328 if (intel_pt_skip_event(pt))
1331 intel_pt_prep_b_sample(pt, ptq, event, &sample);
1333 sample.id = ptq->pt->branches_id;
1334 sample.stream_id = ptq->pt->branches_id;
1337 * perf report cannot handle events without a branch stack when using
1338 * SORT_MODE__BRANCH so make a dummy one.
1340 if (pt->synth_opts.last_branch && sort__mode == SORT_MODE__BRANCH) {
1341 dummy_bs = (struct dummy_branch_stack){
1349 sample.branch_stack = (struct branch_stack *)&dummy_bs;
1352 sample.cyc_cnt = ptq->ipc_cyc_cnt - ptq->last_br_cyc_cnt;
1353 if (sample.cyc_cnt) {
1354 sample.insn_cnt = ptq->ipc_insn_cnt - ptq->last_br_insn_cnt;
1355 ptq->last_br_insn_cnt = ptq->ipc_insn_cnt;
1356 ptq->last_br_cyc_cnt = ptq->ipc_cyc_cnt;
1359 return intel_pt_deliver_synth_event(pt, event, &sample,
1360 pt->branches_sample_type);
1363 static void intel_pt_prep_sample(struct intel_pt *pt,
1364 struct intel_pt_queue *ptq,
1365 union perf_event *event,
1366 struct perf_sample *sample)
1368 intel_pt_prep_b_sample(pt, ptq, event, sample);
1370 if (pt->synth_opts.callchain) {
1371 thread_stack__sample(ptq->thread, ptq->cpu, ptq->chain,
1372 pt->synth_opts.callchain_sz + 1,
1373 sample->ip, pt->kernel_start);
1374 sample->callchain = ptq->chain;
1377 if (pt->synth_opts.last_branch) {
1378 thread_stack__br_sample(ptq->thread, ptq->cpu, ptq->last_branch,
1380 sample->branch_stack = ptq->last_branch;
1384 static int intel_pt_synth_instruction_sample(struct intel_pt_queue *ptq)
1386 struct intel_pt *pt = ptq->pt;
1387 union perf_event *event = ptq->event_buf;
1388 struct perf_sample sample = { .ip = 0, };
1390 if (intel_pt_skip_event(pt))
1393 intel_pt_prep_sample(pt, ptq, event, &sample);
1395 sample.id = ptq->pt->instructions_id;
1396 sample.stream_id = ptq->pt->instructions_id;
1397 sample.period = ptq->state->tot_insn_cnt - ptq->last_insn_cnt;
1399 sample.cyc_cnt = ptq->ipc_cyc_cnt - ptq->last_in_cyc_cnt;
1400 if (sample.cyc_cnt) {
1401 sample.insn_cnt = ptq->ipc_insn_cnt - ptq->last_in_insn_cnt;
1402 ptq->last_in_insn_cnt = ptq->ipc_insn_cnt;
1403 ptq->last_in_cyc_cnt = ptq->ipc_cyc_cnt;
1406 ptq->last_insn_cnt = ptq->state->tot_insn_cnt;
1408 return intel_pt_deliver_synth_event(pt, event, &sample,
1409 pt->instructions_sample_type);
1412 static int intel_pt_synth_transaction_sample(struct intel_pt_queue *ptq)
1414 struct intel_pt *pt = ptq->pt;
1415 union perf_event *event = ptq->event_buf;
1416 struct perf_sample sample = { .ip = 0, };
1418 if (intel_pt_skip_event(pt))
1421 intel_pt_prep_sample(pt, ptq, event, &sample);
1423 sample.id = ptq->pt->transactions_id;
1424 sample.stream_id = ptq->pt->transactions_id;
1426 return intel_pt_deliver_synth_event(pt, event, &sample,
1427 pt->transactions_sample_type);
1430 static void intel_pt_prep_p_sample(struct intel_pt *pt,
1431 struct intel_pt_queue *ptq,
1432 union perf_event *event,
1433 struct perf_sample *sample)
1435 intel_pt_prep_sample(pt, ptq, event, sample);
1438 * Zero IP is used to mean "trace start" but that is not the case for
1439 * power or PTWRITE events with no IP, so clear the flags.
1445 static int intel_pt_synth_ptwrite_sample(struct intel_pt_queue *ptq)
1447 struct intel_pt *pt = ptq->pt;
1448 union perf_event *event = ptq->event_buf;
1449 struct perf_sample sample = { .ip = 0, };
1450 struct perf_synth_intel_ptwrite raw;
1452 if (intel_pt_skip_event(pt))
1455 intel_pt_prep_p_sample(pt, ptq, event, &sample);
1457 sample.id = ptq->pt->ptwrites_id;
1458 sample.stream_id = ptq->pt->ptwrites_id;
1461 raw.ip = !!(ptq->state->flags & INTEL_PT_FUP_IP);
1462 raw.payload = cpu_to_le64(ptq->state->ptw_payload);
1464 sample.raw_size = perf_synth__raw_size(raw);
1465 sample.raw_data = perf_synth__raw_data(&raw);
1467 return intel_pt_deliver_synth_event(pt, event, &sample,
1468 pt->ptwrites_sample_type);
1471 static int intel_pt_synth_cbr_sample(struct intel_pt_queue *ptq)
1473 struct intel_pt *pt = ptq->pt;
1474 union perf_event *event = ptq->event_buf;
1475 struct perf_sample sample = { .ip = 0, };
1476 struct perf_synth_intel_cbr raw;
1479 if (intel_pt_skip_cbr_event(pt))
1482 ptq->cbr_seen = ptq->state->cbr;
1484 intel_pt_prep_p_sample(pt, ptq, event, &sample);
1486 sample.id = ptq->pt->cbr_id;
1487 sample.stream_id = ptq->pt->cbr_id;
1489 flags = (u16)ptq->state->cbr_payload | (pt->max_non_turbo_ratio << 16);
1490 raw.flags = cpu_to_le32(flags);
1491 raw.freq = cpu_to_le32(raw.cbr * pt->cbr2khz);
1494 sample.raw_size = perf_synth__raw_size(raw);
1495 sample.raw_data = perf_synth__raw_data(&raw);
1497 return intel_pt_deliver_synth_event(pt, event, &sample,
1498 pt->pwr_events_sample_type);
1501 static int intel_pt_synth_mwait_sample(struct intel_pt_queue *ptq)
1503 struct intel_pt *pt = ptq->pt;
1504 union perf_event *event = ptq->event_buf;
1505 struct perf_sample sample = { .ip = 0, };
1506 struct perf_synth_intel_mwait raw;
1508 if (intel_pt_skip_event(pt))
1511 intel_pt_prep_p_sample(pt, ptq, event, &sample);
1513 sample.id = ptq->pt->mwait_id;
1514 sample.stream_id = ptq->pt->mwait_id;
1517 raw.payload = cpu_to_le64(ptq->state->mwait_payload);
1519 sample.raw_size = perf_synth__raw_size(raw);
1520 sample.raw_data = perf_synth__raw_data(&raw);
1522 return intel_pt_deliver_synth_event(pt, event, &sample,
1523 pt->pwr_events_sample_type);
1526 static int intel_pt_synth_pwre_sample(struct intel_pt_queue *ptq)
1528 struct intel_pt *pt = ptq->pt;
1529 union perf_event *event = ptq->event_buf;
1530 struct perf_sample sample = { .ip = 0, };
1531 struct perf_synth_intel_pwre raw;
1533 if (intel_pt_skip_event(pt))
1536 intel_pt_prep_p_sample(pt, ptq, event, &sample);
1538 sample.id = ptq->pt->pwre_id;
1539 sample.stream_id = ptq->pt->pwre_id;
1542 raw.payload = cpu_to_le64(ptq->state->pwre_payload);
1544 sample.raw_size = perf_synth__raw_size(raw);
1545 sample.raw_data = perf_synth__raw_data(&raw);
1547 return intel_pt_deliver_synth_event(pt, event, &sample,
1548 pt->pwr_events_sample_type);
1551 static int intel_pt_synth_exstop_sample(struct intel_pt_queue *ptq)
1553 struct intel_pt *pt = ptq->pt;
1554 union perf_event *event = ptq->event_buf;
1555 struct perf_sample sample = { .ip = 0, };
1556 struct perf_synth_intel_exstop raw;
1558 if (intel_pt_skip_event(pt))
1561 intel_pt_prep_p_sample(pt, ptq, event, &sample);
1563 sample.id = ptq->pt->exstop_id;
1564 sample.stream_id = ptq->pt->exstop_id;
1567 raw.ip = !!(ptq->state->flags & INTEL_PT_FUP_IP);
1569 sample.raw_size = perf_synth__raw_size(raw);
1570 sample.raw_data = perf_synth__raw_data(&raw);
1572 return intel_pt_deliver_synth_event(pt, event, &sample,
1573 pt->pwr_events_sample_type);
1576 static int intel_pt_synth_pwrx_sample(struct intel_pt_queue *ptq)
1578 struct intel_pt *pt = ptq->pt;
1579 union perf_event *event = ptq->event_buf;
1580 struct perf_sample sample = { .ip = 0, };
1581 struct perf_synth_intel_pwrx raw;
1583 if (intel_pt_skip_event(pt))
1586 intel_pt_prep_p_sample(pt, ptq, event, &sample);
1588 sample.id = ptq->pt->pwrx_id;
1589 sample.stream_id = ptq->pt->pwrx_id;
1592 raw.payload = cpu_to_le64(ptq->state->pwrx_payload);
1594 sample.raw_size = perf_synth__raw_size(raw);
1595 sample.raw_data = perf_synth__raw_data(&raw);
1597 return intel_pt_deliver_synth_event(pt, event, &sample,
1598 pt->pwr_events_sample_type);
1602 * PEBS gp_regs array indexes plus 1 so that 0 means not present. Refer
1603 * intel_pt_add_gp_regs().
1605 static const int pebs_gp_regs[] = {
1606 [PERF_REG_X86_FLAGS] = 1,
1607 [PERF_REG_X86_IP] = 2,
1608 [PERF_REG_X86_AX] = 3,
1609 [PERF_REG_X86_CX] = 4,
1610 [PERF_REG_X86_DX] = 5,
1611 [PERF_REG_X86_BX] = 6,
1612 [PERF_REG_X86_SP] = 7,
1613 [PERF_REG_X86_BP] = 8,
1614 [PERF_REG_X86_SI] = 9,
1615 [PERF_REG_X86_DI] = 10,
1616 [PERF_REG_X86_R8] = 11,
1617 [PERF_REG_X86_R9] = 12,
1618 [PERF_REG_X86_R10] = 13,
1619 [PERF_REG_X86_R11] = 14,
1620 [PERF_REG_X86_R12] = 15,
1621 [PERF_REG_X86_R13] = 16,
1622 [PERF_REG_X86_R14] = 17,
1623 [PERF_REG_X86_R15] = 18,
1626 static u64 *intel_pt_add_gp_regs(struct regs_dump *intr_regs, u64 *pos,
1627 const struct intel_pt_blk_items *items,
1630 const u64 *gp_regs = items->val[INTEL_PT_GP_REGS_POS];
1631 u32 mask = items->mask[INTEL_PT_GP_REGS_POS];
1635 for (i = 0, bit = 1; i < PERF_REG_X86_64_MAX; i++, bit <<= 1) {
1636 /* Get the PEBS gp_regs array index */
1637 int n = pebs_gp_regs[i] - 1;
1642 * Add only registers that were requested (i.e. 'regs_mask') and
1643 * that were provided (i.e. 'mask'), and update the resulting
1644 * mask (i.e. 'intr_regs->mask') accordingly.
1646 if (mask & 1 << n && regs_mask & bit) {
1647 intr_regs->mask |= bit;
1648 *pos++ = gp_regs[n];
1655 #ifndef PERF_REG_X86_XMM0
1656 #define PERF_REG_X86_XMM0 32
1659 static void intel_pt_add_xmm(struct regs_dump *intr_regs, u64 *pos,
1660 const struct intel_pt_blk_items *items,
1663 u32 mask = items->has_xmm & (regs_mask >> PERF_REG_X86_XMM0);
1664 const u64 *xmm = items->xmm;
1667 * If there are any XMM registers, then there should be all of them.
1668 * Nevertheless, follow the logic to add only registers that were
1669 * requested (i.e. 'regs_mask') and that were provided (i.e. 'mask'),
1670 * and update the resulting mask (i.e. 'intr_regs->mask') accordingly.
1672 intr_regs->mask |= (u64)mask << PERF_REG_X86_XMM0;
1674 for (; mask; mask >>= 1, xmm++) {
1680 #define LBR_INFO_MISPRED (1ULL << 63)
1681 #define LBR_INFO_IN_TX (1ULL << 62)
1682 #define LBR_INFO_ABORT (1ULL << 61)
1683 #define LBR_INFO_CYCLES 0xffff
1685 /* Refer kernel's intel_pmu_store_pebs_lbrs() */
1686 static u64 intel_pt_lbr_flags(u64 info)
1689 struct branch_flags flags;
1694 u.flags.mispred = !!(info & LBR_INFO_MISPRED);
1695 u.flags.predicted = !(info & LBR_INFO_MISPRED);
1696 u.flags.in_tx = !!(info & LBR_INFO_IN_TX);
1697 u.flags.abort = !!(info & LBR_INFO_ABORT);
1698 u.flags.cycles = info & LBR_INFO_CYCLES;
1703 static void intel_pt_add_lbrs(struct branch_stack *br_stack,
1704 const struct intel_pt_blk_items *items)
1711 to = &br_stack->entries[0].from;
1713 for (i = INTEL_PT_LBR_0_POS; i <= INTEL_PT_LBR_2_POS; i++) {
1714 u32 mask = items->mask[i];
1715 const u64 *from = items->val[i];
1717 for (; mask; mask >>= 3, from += 3) {
1718 if ((mask & 7) == 7) {
1721 *to++ = intel_pt_lbr_flags(from[2]);
1728 static int intel_pt_synth_pebs_sample(struct intel_pt_queue *ptq)
1730 const struct intel_pt_blk_items *items = &ptq->state->items;
1731 struct perf_sample sample = { .ip = 0, };
1732 union perf_event *event = ptq->event_buf;
1733 struct intel_pt *pt = ptq->pt;
1734 struct evsel *evsel = pt->pebs_evsel;
1735 u64 sample_type = evsel->core.attr.sample_type;
1736 u64 id = evsel->core.id[0];
1738 u64 regs[8 * sizeof(sample.intr_regs.mask)];
1740 if (intel_pt_skip_event(pt))
1743 intel_pt_prep_a_sample(ptq, event, &sample);
1746 sample.stream_id = id;
1748 if (!evsel->core.attr.freq)
1749 sample.period = evsel->core.attr.sample_period;
1751 /* No support for non-zero CS base */
1753 sample.ip = items->ip;
1754 else if (items->has_rip)
1755 sample.ip = items->rip;
1757 sample.ip = ptq->state->from_ip;
1759 /* No support for guest mode at this time */
1760 cpumode = sample.ip < ptq->pt->kernel_start ?
1761 PERF_RECORD_MISC_USER :
1762 PERF_RECORD_MISC_KERNEL;
1764 event->sample.header.misc = cpumode | PERF_RECORD_MISC_EXACT_IP;
1766 sample.cpumode = cpumode;
1768 if (sample_type & PERF_SAMPLE_TIME) {
1771 if (items->has_timestamp)
1772 timestamp = items->timestamp;
1773 else if (!pt->timeless_decoding)
1774 timestamp = ptq->timestamp;
1776 sample.time = tsc_to_perf_time(timestamp, &pt->tc);
1779 if (sample_type & PERF_SAMPLE_CALLCHAIN &&
1780 pt->synth_opts.callchain) {
1781 thread_stack__sample(ptq->thread, ptq->cpu, ptq->chain,
1782 pt->synth_opts.callchain_sz, sample.ip,
1784 sample.callchain = ptq->chain;
1787 if (sample_type & PERF_SAMPLE_REGS_INTR &&
1788 (items->mask[INTEL_PT_GP_REGS_POS] ||
1789 items->mask[INTEL_PT_XMM_POS])) {
1790 u64 regs_mask = evsel->core.attr.sample_regs_intr;
1793 sample.intr_regs.abi = items->is_32_bit ?
1794 PERF_SAMPLE_REGS_ABI_32 :
1795 PERF_SAMPLE_REGS_ABI_64;
1796 sample.intr_regs.regs = regs;
1798 pos = intel_pt_add_gp_regs(&sample.intr_regs, regs, items, regs_mask);
1800 intel_pt_add_xmm(&sample.intr_regs, pos, items, regs_mask);
1803 if (sample_type & PERF_SAMPLE_BRANCH_STACK) {
1804 if (items->mask[INTEL_PT_LBR_0_POS] ||
1805 items->mask[INTEL_PT_LBR_1_POS] ||
1806 items->mask[INTEL_PT_LBR_2_POS]) {
1807 intel_pt_add_lbrs(ptq->last_branch, items);
1808 } else if (pt->synth_opts.last_branch) {
1809 thread_stack__br_sample(ptq->thread, ptq->cpu,
1813 ptq->last_branch->nr = 0;
1815 sample.branch_stack = ptq->last_branch;
1818 if (sample_type & PERF_SAMPLE_ADDR && items->has_mem_access_address)
1819 sample.addr = items->mem_access_address;
1821 if (sample_type & PERF_SAMPLE_WEIGHT) {
1823 * Refer kernel's setup_pebs_adaptive_sample_data() and
1824 * intel_hsw_weight().
1826 if (items->has_mem_access_latency)
1827 sample.weight = items->mem_access_latency;
1828 if (!sample.weight && items->has_tsx_aux_info) {
1829 /* Cycles last block */
1830 sample.weight = (u32)items->tsx_aux_info;
1834 if (sample_type & PERF_SAMPLE_TRANSACTION && items->has_tsx_aux_info) {
1835 u64 ax = items->has_rax ? items->rax : 0;
1836 /* Refer kernel's intel_hsw_transaction() */
1837 u64 txn = (u8)(items->tsx_aux_info >> 32);
1839 /* For RTM XABORTs also log the abort code from AX */
1840 if (txn & PERF_TXN_TRANSACTION && ax & 1)
1841 txn |= ((ax >> 24) & 0xff) << PERF_TXN_ABORT_SHIFT;
1842 sample.transaction = txn;
1845 return intel_pt_deliver_synth_event(pt, event, &sample, sample_type);
1848 static int intel_pt_synth_error(struct intel_pt *pt, int code, int cpu,
1849 pid_t pid, pid_t tid, u64 ip, u64 timestamp)
1851 union perf_event event;
1852 char msg[MAX_AUXTRACE_ERROR_MSG];
1855 intel_pt__strerror(code, msg, MAX_AUXTRACE_ERROR_MSG);
1857 auxtrace_synth_error(&event.auxtrace_error, PERF_AUXTRACE_ERROR_ITRACE,
1858 code, cpu, pid, tid, ip, msg, timestamp);
1860 err = perf_session__deliver_synth_event(pt->session, &event, NULL);
1862 pr_err("Intel Processor Trace: failed to deliver error event, error %d\n",
1868 static int intel_ptq_synth_error(struct intel_pt_queue *ptq,
1869 const struct intel_pt_state *state)
1871 struct intel_pt *pt = ptq->pt;
1872 u64 tm = ptq->timestamp;
1874 tm = pt->timeless_decoding ? 0 : tsc_to_perf_time(tm, &pt->tc);
1876 return intel_pt_synth_error(pt, state->err, ptq->cpu, ptq->pid,
1877 ptq->tid, state->from_ip, tm);
1880 static int intel_pt_next_tid(struct intel_pt *pt, struct intel_pt_queue *ptq)
1882 struct auxtrace_queue *queue;
1883 pid_t tid = ptq->next_tid;
1889 intel_pt_log("switch: cpu %d tid %d\n", ptq->cpu, tid);
1891 err = machine__set_current_tid(pt->machine, ptq->cpu, -1, tid);
1893 queue = &pt->queues.queue_array[ptq->queue_nr];
1894 intel_pt_set_pid_tid_cpu(pt, queue);
1901 static inline bool intel_pt_is_switch_ip(struct intel_pt_queue *ptq, u64 ip)
1903 struct intel_pt *pt = ptq->pt;
1905 return ip == pt->switch_ip &&
1906 (ptq->flags & PERF_IP_FLAG_BRANCH) &&
1907 !(ptq->flags & (PERF_IP_FLAG_CONDITIONAL | PERF_IP_FLAG_ASYNC |
1908 PERF_IP_FLAG_INTERRUPT | PERF_IP_FLAG_TX_ABORT));
1911 #define INTEL_PT_PWR_EVT (INTEL_PT_MWAIT_OP | INTEL_PT_PWR_ENTRY | \
1912 INTEL_PT_EX_STOP | INTEL_PT_PWR_EXIT)
1914 static int intel_pt_sample(struct intel_pt_queue *ptq)
1916 const struct intel_pt_state *state = ptq->state;
1917 struct intel_pt *pt = ptq->pt;
1920 if (!ptq->have_sample)
1923 ptq->have_sample = false;
1925 if (ptq->state->tot_cyc_cnt > ptq->ipc_cyc_cnt) {
1927 * Cycle count and instruction count only go together to create
1928 * a valid IPC ratio when the cycle count changes.
1930 ptq->ipc_insn_cnt = ptq->state->tot_insn_cnt;
1931 ptq->ipc_cyc_cnt = ptq->state->tot_cyc_cnt;
1935 * Do PEBS first to allow for the possibility that the PEBS timestamp
1936 * precedes the current timestamp.
1938 if (pt->sample_pebs && state->type & INTEL_PT_BLK_ITEMS) {
1939 err = intel_pt_synth_pebs_sample(ptq);
1944 if (pt->sample_pwr_events) {
1945 if (ptq->state->cbr != ptq->cbr_seen) {
1946 err = intel_pt_synth_cbr_sample(ptq);
1950 if (state->type & INTEL_PT_PWR_EVT) {
1951 if (state->type & INTEL_PT_MWAIT_OP) {
1952 err = intel_pt_synth_mwait_sample(ptq);
1956 if (state->type & INTEL_PT_PWR_ENTRY) {
1957 err = intel_pt_synth_pwre_sample(ptq);
1961 if (state->type & INTEL_PT_EX_STOP) {
1962 err = intel_pt_synth_exstop_sample(ptq);
1966 if (state->type & INTEL_PT_PWR_EXIT) {
1967 err = intel_pt_synth_pwrx_sample(ptq);
1974 if (pt->sample_instructions && (state->type & INTEL_PT_INSTRUCTION)) {
1975 err = intel_pt_synth_instruction_sample(ptq);
1980 if (pt->sample_transactions && (state->type & INTEL_PT_TRANSACTION)) {
1981 err = intel_pt_synth_transaction_sample(ptq);
1986 if (pt->sample_ptwrites && (state->type & INTEL_PT_PTW)) {
1987 err = intel_pt_synth_ptwrite_sample(ptq);
1992 if (!(state->type & INTEL_PT_BRANCH))
1995 if (pt->use_thread_stack) {
1996 thread_stack__event(ptq->thread, ptq->cpu, ptq->flags,
1997 state->from_ip, state->to_ip, ptq->insn_len,
1998 state->trace_nr, pt->callstack,
1999 pt->br_stack_sz_plus,
2002 thread_stack__set_trace_nr(ptq->thread, ptq->cpu, state->trace_nr);
2005 if (pt->sample_branches) {
2006 err = intel_pt_synth_branch_sample(ptq);
2011 if (!ptq->sync_switch)
2014 if (intel_pt_is_switch_ip(ptq, state->to_ip)) {
2015 switch (ptq->switch_state) {
2016 case INTEL_PT_SS_NOT_TRACING:
2017 case INTEL_PT_SS_UNKNOWN:
2018 case INTEL_PT_SS_EXPECTING_SWITCH_IP:
2019 err = intel_pt_next_tid(pt, ptq);
2022 ptq->switch_state = INTEL_PT_SS_TRACING;
2025 ptq->switch_state = INTEL_PT_SS_EXPECTING_SWITCH_EVENT;
2028 } else if (!state->to_ip) {
2029 ptq->switch_state = INTEL_PT_SS_NOT_TRACING;
2030 } else if (ptq->switch_state == INTEL_PT_SS_NOT_TRACING) {
2031 ptq->switch_state = INTEL_PT_SS_UNKNOWN;
2032 } else if (ptq->switch_state == INTEL_PT_SS_UNKNOWN &&
2033 state->to_ip == pt->ptss_ip &&
2034 (ptq->flags & PERF_IP_FLAG_CALL)) {
2035 ptq->switch_state = INTEL_PT_SS_TRACING;
2041 static u64 intel_pt_switch_ip(struct intel_pt *pt, u64 *ptss_ip)
2043 struct machine *machine = pt->machine;
2045 struct symbol *sym, *start;
2046 u64 ip, switch_ip = 0;
2052 map = machine__kernel_map(machine);
2059 start = dso__first_symbol(map->dso);
2061 for (sym = start; sym; sym = dso__next_symbol(sym)) {
2062 if (sym->binding == STB_GLOBAL &&
2063 !strcmp(sym->name, "__switch_to")) {
2064 ip = map->unmap_ip(map, sym->start);
2065 if (ip >= map->start && ip < map->end) {
2072 if (!switch_ip || !ptss_ip)
2075 if (pt->have_sched_switch == 1)
2076 ptss = "perf_trace_sched_switch";
2078 ptss = "__perf_event_task_sched_out";
2080 for (sym = start; sym; sym = dso__next_symbol(sym)) {
2081 if (!strcmp(sym->name, ptss)) {
2082 ip = map->unmap_ip(map, sym->start);
2083 if (ip >= map->start && ip < map->end) {
2093 static void intel_pt_enable_sync_switch(struct intel_pt *pt)
2097 pt->sync_switch = true;
2099 for (i = 0; i < pt->queues.nr_queues; i++) {
2100 struct auxtrace_queue *queue = &pt->queues.queue_array[i];
2101 struct intel_pt_queue *ptq = queue->priv;
2104 ptq->sync_switch = true;
2109 * To filter against time ranges, it is only necessary to look at the next start
2112 static bool intel_pt_next_time(struct intel_pt_queue *ptq)
2114 struct intel_pt *pt = ptq->pt;
2116 if (ptq->sel_start) {
2117 /* Next time is an end time */
2118 ptq->sel_start = false;
2119 ptq->sel_timestamp = pt->time_ranges[ptq->sel_idx].end;
2121 } else if (ptq->sel_idx + 1 < pt->range_cnt) {
2122 /* Next time is a start time */
2123 ptq->sel_start = true;
2125 ptq->sel_timestamp = pt->time_ranges[ptq->sel_idx].start;
2133 static int intel_pt_time_filter(struct intel_pt_queue *ptq, u64 *ff_timestamp)
2138 if (ptq->sel_start) {
2139 if (ptq->timestamp >= ptq->sel_timestamp) {
2140 /* After start time, so consider next time */
2141 intel_pt_next_time(ptq);
2142 if (!ptq->sel_timestamp) {
2146 /* Check against end time */
2149 /* Before start time, so fast forward */
2150 ptq->have_sample = false;
2151 if (ptq->sel_timestamp > *ff_timestamp) {
2152 if (ptq->sync_switch) {
2153 intel_pt_next_tid(ptq->pt, ptq);
2154 ptq->switch_state = INTEL_PT_SS_UNKNOWN;
2156 *ff_timestamp = ptq->sel_timestamp;
2157 err = intel_pt_fast_forward(ptq->decoder,
2158 ptq->sel_timestamp);
2163 } else if (ptq->timestamp > ptq->sel_timestamp) {
2164 /* After end time, so consider next time */
2165 if (!intel_pt_next_time(ptq)) {
2166 /* No next time range, so stop decoding */
2167 ptq->have_sample = false;
2168 ptq->switch_state = INTEL_PT_SS_NOT_TRACING;
2171 /* Check against next start time */
2174 /* Before end time */
2180 static int intel_pt_run_decoder(struct intel_pt_queue *ptq, u64 *timestamp)
2182 const struct intel_pt_state *state = ptq->state;
2183 struct intel_pt *pt = ptq->pt;
2184 u64 ff_timestamp = 0;
2187 if (!pt->kernel_start) {
2188 pt->kernel_start = machine__kernel_start(pt->machine);
2189 if (pt->per_cpu_mmaps &&
2190 (pt->have_sched_switch == 1 || pt->have_sched_switch == 3) &&
2191 !pt->timeless_decoding && intel_pt_tracing_kernel(pt) &&
2192 !pt->sampling_mode) {
2193 pt->switch_ip = intel_pt_switch_ip(pt, &pt->ptss_ip);
2194 if (pt->switch_ip) {
2195 intel_pt_log("switch_ip: %"PRIx64" ptss_ip: %"PRIx64"\n",
2196 pt->switch_ip, pt->ptss_ip);
2197 intel_pt_enable_sync_switch(pt);
2202 intel_pt_log("queue %u decoding cpu %d pid %d tid %d\n",
2203 ptq->queue_nr, ptq->cpu, ptq->pid, ptq->tid);
2205 err = intel_pt_sample(ptq);
2209 state = intel_pt_decode(ptq->decoder);
2211 if (state->err == INTEL_PT_ERR_NODATA)
2213 if (ptq->sync_switch &&
2214 state->from_ip >= pt->kernel_start) {
2215 ptq->sync_switch = false;
2216 intel_pt_next_tid(pt, ptq);
2218 if (pt->synth_opts.errors) {
2219 err = intel_ptq_synth_error(ptq, state);
2227 ptq->have_sample = true;
2228 intel_pt_sample_flags(ptq);
2230 /* Use estimated TSC upon return to user space */
2232 (state->from_ip >= pt->kernel_start || !state->from_ip) &&
2233 state->to_ip && state->to_ip < pt->kernel_start) {
2234 intel_pt_log("TSC %"PRIx64" est. TSC %"PRIx64"\n",
2235 state->timestamp, state->est_timestamp);
2236 ptq->timestamp = state->est_timestamp;
2237 /* Use estimated TSC in unknown switch state */
2238 } else if (ptq->sync_switch &&
2239 ptq->switch_state == INTEL_PT_SS_UNKNOWN &&
2240 intel_pt_is_switch_ip(ptq, state->to_ip) &&
2241 ptq->next_tid == -1) {
2242 intel_pt_log("TSC %"PRIx64" est. TSC %"PRIx64"\n",
2243 state->timestamp, state->est_timestamp);
2244 ptq->timestamp = state->est_timestamp;
2245 } else if (state->timestamp > ptq->timestamp) {
2246 ptq->timestamp = state->timestamp;
2249 if (ptq->sel_timestamp) {
2250 err = intel_pt_time_filter(ptq, &ff_timestamp);
2255 if (!pt->timeless_decoding && ptq->timestamp >= *timestamp) {
2256 *timestamp = ptq->timestamp;
2263 static inline int intel_pt_update_queues(struct intel_pt *pt)
2265 if (pt->queues.new_data) {
2266 pt->queues.new_data = false;
2267 return intel_pt_setup_queues(pt);
2272 static int intel_pt_process_queues(struct intel_pt *pt, u64 timestamp)
2274 unsigned int queue_nr;
2279 struct auxtrace_queue *queue;
2280 struct intel_pt_queue *ptq;
2282 if (!pt->heap.heap_cnt)
2285 if (pt->heap.heap_array[0].ordinal >= timestamp)
2288 queue_nr = pt->heap.heap_array[0].queue_nr;
2289 queue = &pt->queues.queue_array[queue_nr];
2292 intel_pt_log("queue %u processing 0x%" PRIx64 " to 0x%" PRIx64 "\n",
2293 queue_nr, pt->heap.heap_array[0].ordinal,
2296 auxtrace_heap__pop(&pt->heap);
2298 if (pt->heap.heap_cnt) {
2299 ts = pt->heap.heap_array[0].ordinal + 1;
2306 intel_pt_set_pid_tid_cpu(pt, queue);
2308 ret = intel_pt_run_decoder(ptq, &ts);
2311 auxtrace_heap__add(&pt->heap, queue_nr, ts);
2316 ret = auxtrace_heap__add(&pt->heap, queue_nr, ts);
2320 ptq->on_heap = false;
2327 static int intel_pt_process_timeless_queues(struct intel_pt *pt, pid_t tid,
2330 struct auxtrace_queues *queues = &pt->queues;
2334 for (i = 0; i < queues->nr_queues; i++) {
2335 struct auxtrace_queue *queue = &pt->queues.queue_array[i];
2336 struct intel_pt_queue *ptq = queue->priv;
2338 if (ptq && (tid == -1 || ptq->tid == tid)) {
2340 intel_pt_set_pid_tid_cpu(pt, queue);
2341 intel_pt_run_decoder(ptq, &ts);
2347 static void intel_pt_sample_set_pid_tid_cpu(struct intel_pt_queue *ptq,
2348 struct auxtrace_queue *queue,
2349 struct perf_sample *sample)
2351 struct machine *m = ptq->pt->machine;
2353 ptq->pid = sample->pid;
2354 ptq->tid = sample->tid;
2355 ptq->cpu = queue->cpu;
2357 intel_pt_log("queue %u cpu %d pid %d tid %d\n",
2358 ptq->queue_nr, ptq->cpu, ptq->pid, ptq->tid);
2360 thread__zput(ptq->thread);
2365 if (ptq->pid == -1) {
2366 ptq->thread = machine__find_thread(m, -1, ptq->tid);
2368 ptq->pid = ptq->thread->pid_;
2372 ptq->thread = machine__findnew_thread(m, ptq->pid, ptq->tid);
2375 static int intel_pt_process_timeless_sample(struct intel_pt *pt,
2376 struct perf_sample *sample)
2378 struct auxtrace_queue *queue;
2379 struct intel_pt_queue *ptq;
2382 queue = auxtrace_queues__sample_queue(&pt->queues, sample, pt->session);
2391 ptq->time = sample->time;
2392 intel_pt_sample_set_pid_tid_cpu(ptq, queue, sample);
2393 intel_pt_run_decoder(ptq, &ts);
2397 static int intel_pt_lost(struct intel_pt *pt, struct perf_sample *sample)
2399 return intel_pt_synth_error(pt, INTEL_PT_ERR_LOST, sample->cpu,
2400 sample->pid, sample->tid, 0, sample->time);
2403 static struct intel_pt_queue *intel_pt_cpu_to_ptq(struct intel_pt *pt, int cpu)
2407 if (cpu < 0 || !pt->queues.nr_queues)
2410 if ((unsigned)cpu >= pt->queues.nr_queues)
2411 i = pt->queues.nr_queues - 1;
2415 if (pt->queues.queue_array[i].cpu == cpu)
2416 return pt->queues.queue_array[i].priv;
2418 for (j = 0; i > 0; j++) {
2419 if (pt->queues.queue_array[--i].cpu == cpu)
2420 return pt->queues.queue_array[i].priv;
2423 for (; j < pt->queues.nr_queues; j++) {
2424 if (pt->queues.queue_array[j].cpu == cpu)
2425 return pt->queues.queue_array[j].priv;
2431 static int intel_pt_sync_switch(struct intel_pt *pt, int cpu, pid_t tid,
2434 struct intel_pt_queue *ptq;
2437 if (!pt->sync_switch)
2440 ptq = intel_pt_cpu_to_ptq(pt, cpu);
2441 if (!ptq || !ptq->sync_switch)
2444 switch (ptq->switch_state) {
2445 case INTEL_PT_SS_NOT_TRACING:
2447 case INTEL_PT_SS_UNKNOWN:
2448 case INTEL_PT_SS_TRACING:
2449 ptq->next_tid = tid;
2450 ptq->switch_state = INTEL_PT_SS_EXPECTING_SWITCH_IP;
2452 case INTEL_PT_SS_EXPECTING_SWITCH_EVENT:
2453 if (!ptq->on_heap) {
2454 ptq->timestamp = perf_time_to_tsc(timestamp,
2456 err = auxtrace_heap__add(&pt->heap, ptq->queue_nr,
2460 ptq->on_heap = true;
2462 ptq->switch_state = INTEL_PT_SS_TRACING;
2464 case INTEL_PT_SS_EXPECTING_SWITCH_IP:
2465 intel_pt_log("ERROR: cpu %d expecting switch ip\n", cpu);
2476 static int intel_pt_process_switch(struct intel_pt *pt,
2477 struct perf_sample *sample)
2479 struct evsel *evsel;
2483 evsel = perf_evlist__id2evsel(pt->session->evlist, sample->id);
2484 if (evsel != pt->switch_evsel)
2487 tid = evsel__intval(evsel, sample, "next_pid");
2490 intel_pt_log("sched_switch: cpu %d tid %d time %"PRIu64" tsc %#"PRIx64"\n",
2491 cpu, tid, sample->time, perf_time_to_tsc(sample->time,
2494 ret = intel_pt_sync_switch(pt, cpu, tid, sample->time);
2498 return machine__set_current_tid(pt->machine, cpu, -1, tid);
2501 static int intel_pt_context_switch_in(struct intel_pt *pt,
2502 struct perf_sample *sample)
2504 pid_t pid = sample->pid;
2505 pid_t tid = sample->tid;
2506 int cpu = sample->cpu;
2508 if (pt->sync_switch) {
2509 struct intel_pt_queue *ptq;
2511 ptq = intel_pt_cpu_to_ptq(pt, cpu);
2512 if (ptq && ptq->sync_switch) {
2514 switch (ptq->switch_state) {
2515 case INTEL_PT_SS_NOT_TRACING:
2516 case INTEL_PT_SS_UNKNOWN:
2517 case INTEL_PT_SS_TRACING:
2519 case INTEL_PT_SS_EXPECTING_SWITCH_EVENT:
2520 case INTEL_PT_SS_EXPECTING_SWITCH_IP:
2521 ptq->switch_state = INTEL_PT_SS_TRACING;
2530 * If the current tid has not been updated yet, ensure it is now that
2531 * a "switch in" event has occurred.
2533 if (machine__get_current_tid(pt->machine, cpu) == tid)
2536 return machine__set_current_tid(pt->machine, cpu, pid, tid);
2539 static int intel_pt_context_switch(struct intel_pt *pt, union perf_event *event,
2540 struct perf_sample *sample)
2542 bool out = event->header.misc & PERF_RECORD_MISC_SWITCH_OUT;
2548 if (pt->have_sched_switch == 3) {
2550 return intel_pt_context_switch_in(pt, sample);
2551 if (event->header.type != PERF_RECORD_SWITCH_CPU_WIDE) {
2552 pr_err("Expecting CPU-wide context switch event\n");
2555 pid = event->context_switch.next_prev_pid;
2556 tid = event->context_switch.next_prev_tid;
2565 pr_err("context_switch event has no tid\n");
2569 intel_pt_log("context_switch: cpu %d pid %d tid %d time %"PRIu64" tsc %#"PRIx64"\n",
2570 cpu, pid, tid, sample->time, perf_time_to_tsc(sample->time,
2573 ret = intel_pt_sync_switch(pt, cpu, tid, sample->time);
2577 return machine__set_current_tid(pt->machine, cpu, pid, tid);
2580 static int intel_pt_process_itrace_start(struct intel_pt *pt,
2581 union perf_event *event,
2582 struct perf_sample *sample)
2584 if (!pt->per_cpu_mmaps)
2587 intel_pt_log("itrace_start: cpu %d pid %d tid %d time %"PRIu64" tsc %#"PRIx64"\n",
2588 sample->cpu, event->itrace_start.pid,
2589 event->itrace_start.tid, sample->time,
2590 perf_time_to_tsc(sample->time, &pt->tc));
2592 return machine__set_current_tid(pt->machine, sample->cpu,
2593 event->itrace_start.pid,
2594 event->itrace_start.tid);
2597 static int intel_pt_process_event(struct perf_session *session,
2598 union perf_event *event,
2599 struct perf_sample *sample,
2600 struct perf_tool *tool)
2602 struct intel_pt *pt = container_of(session->auxtrace, struct intel_pt,
2610 if (!tool->ordered_events) {
2611 pr_err("Intel Processor Trace requires ordered events\n");
2615 if (sample->time && sample->time != (u64)-1)
2616 timestamp = perf_time_to_tsc(sample->time, &pt->tc);
2620 if (timestamp || pt->timeless_decoding) {
2621 err = intel_pt_update_queues(pt);
2626 if (pt->timeless_decoding) {
2627 if (pt->sampling_mode) {
2628 if (sample->aux_sample.size)
2629 err = intel_pt_process_timeless_sample(pt,
2631 } else if (event->header.type == PERF_RECORD_EXIT) {
2632 err = intel_pt_process_timeless_queues(pt,
2636 } else if (timestamp) {
2637 err = intel_pt_process_queues(pt, timestamp);
2642 if (event->header.type == PERF_RECORD_SAMPLE) {
2643 if (pt->synth_opts.add_callchain && !sample->callchain)
2644 intel_pt_add_callchain(pt, sample);
2645 if (pt->synth_opts.add_last_branch && !sample->branch_stack)
2646 intel_pt_add_br_stack(pt, sample);
2649 if (event->header.type == PERF_RECORD_AUX &&
2650 (event->aux.flags & PERF_AUX_FLAG_TRUNCATED) &&
2651 pt->synth_opts.errors) {
2652 err = intel_pt_lost(pt, sample);
2657 if (pt->switch_evsel && event->header.type == PERF_RECORD_SAMPLE)
2658 err = intel_pt_process_switch(pt, sample);
2659 else if (event->header.type == PERF_RECORD_ITRACE_START)
2660 err = intel_pt_process_itrace_start(pt, event, sample);
2661 else if (event->header.type == PERF_RECORD_SWITCH ||
2662 event->header.type == PERF_RECORD_SWITCH_CPU_WIDE)
2663 err = intel_pt_context_switch(pt, event, sample);
2665 intel_pt_log("event %u: cpu %d time %"PRIu64" tsc %#"PRIx64" ",
2666 event->header.type, sample->cpu, sample->time, timestamp);
2667 intel_pt_log_event(event);
2672 static int intel_pt_flush(struct perf_session *session, struct perf_tool *tool)
2674 struct intel_pt *pt = container_of(session->auxtrace, struct intel_pt,
2681 if (!tool->ordered_events)
2684 ret = intel_pt_update_queues(pt);
2688 if (pt->timeless_decoding)
2689 return intel_pt_process_timeless_queues(pt, -1,
2692 return intel_pt_process_queues(pt, MAX_TIMESTAMP);
2695 static void intel_pt_free_events(struct perf_session *session)
2697 struct intel_pt *pt = container_of(session->auxtrace, struct intel_pt,
2699 struct auxtrace_queues *queues = &pt->queues;
2702 for (i = 0; i < queues->nr_queues; i++) {
2703 intel_pt_free_queue(queues->queue_array[i].priv);
2704 queues->queue_array[i].priv = NULL;
2706 intel_pt_log_disable();
2707 auxtrace_queues__free(queues);
2710 static void intel_pt_free(struct perf_session *session)
2712 struct intel_pt *pt = container_of(session->auxtrace, struct intel_pt,
2715 auxtrace_heap__free(&pt->heap);
2716 intel_pt_free_events(session);
2717 session->auxtrace = NULL;
2718 thread__put(pt->unknown_thread);
2719 addr_filters__exit(&pt->filts);
2722 zfree(&pt->time_ranges);
2726 static bool intel_pt_evsel_is_auxtrace(struct perf_session *session,
2727 struct evsel *evsel)
2729 struct intel_pt *pt = container_of(session->auxtrace, struct intel_pt,
2732 return evsel->core.attr.type == pt->pmu_type;
2735 static int intel_pt_process_auxtrace_event(struct perf_session *session,
2736 union perf_event *event,
2737 struct perf_tool *tool __maybe_unused)
2739 struct intel_pt *pt = container_of(session->auxtrace, struct intel_pt,
2742 if (!pt->data_queued) {
2743 struct auxtrace_buffer *buffer;
2745 int fd = perf_data__fd(session->data);
2748 if (perf_data__is_pipe(session->data)) {
2751 data_offset = lseek(fd, 0, SEEK_CUR);
2752 if (data_offset == -1)
2756 err = auxtrace_queues__add_event(&pt->queues, session, event,
2757 data_offset, &buffer);
2761 /* Dump here now we have copied a piped trace out of the pipe */
2763 if (auxtrace_buffer__get_data(buffer, fd)) {
2764 intel_pt_dump_event(pt, buffer->data,
2766 auxtrace_buffer__put_data(buffer);
2774 static int intel_pt_queue_data(struct perf_session *session,
2775 struct perf_sample *sample,
2776 union perf_event *event, u64 data_offset)
2778 struct intel_pt *pt = container_of(session->auxtrace, struct intel_pt,
2783 return auxtrace_queues__add_event(&pt->queues, session, event,
2787 if (sample->time && sample->time != (u64)-1)
2788 timestamp = perf_time_to_tsc(sample->time, &pt->tc);
2792 return auxtrace_queues__add_sample(&pt->queues, session, sample,
2793 data_offset, timestamp);
2796 struct intel_pt_synth {
2797 struct perf_tool dummy_tool;
2798 struct perf_session *session;
2801 static int intel_pt_event_synth(struct perf_tool *tool,
2802 union perf_event *event,
2803 struct perf_sample *sample __maybe_unused,
2804 struct machine *machine __maybe_unused)
2806 struct intel_pt_synth *intel_pt_synth =
2807 container_of(tool, struct intel_pt_synth, dummy_tool);
2809 return perf_session__deliver_synth_event(intel_pt_synth->session, event,
2813 static int intel_pt_synth_event(struct perf_session *session, const char *name,
2814 struct perf_event_attr *attr, u64 id)
2816 struct intel_pt_synth intel_pt_synth;
2819 pr_debug("Synthesizing '%s' event with id %" PRIu64 " sample type %#" PRIx64 "\n",
2820 name, id, (u64)attr->sample_type);
2822 memset(&intel_pt_synth, 0, sizeof(struct intel_pt_synth));
2823 intel_pt_synth.session = session;
2825 err = perf_event__synthesize_attr(&intel_pt_synth.dummy_tool, attr, 1,
2826 &id, intel_pt_event_synth);
2828 pr_err("%s: failed to synthesize '%s' event type\n",
2834 static void intel_pt_set_event_name(struct evlist *evlist, u64 id,
2837 struct evsel *evsel;
2839 evlist__for_each_entry(evlist, evsel) {
2840 if (evsel->core.id && evsel->core.id[0] == id) {
2842 zfree(&evsel->name);
2843 evsel->name = strdup(name);
2849 static struct evsel *intel_pt_evsel(struct intel_pt *pt,
2850 struct evlist *evlist)
2852 struct evsel *evsel;
2854 evlist__for_each_entry(evlist, evsel) {
2855 if (evsel->core.attr.type == pt->pmu_type && evsel->core.ids)
2862 static int intel_pt_synth_events(struct intel_pt *pt,
2863 struct perf_session *session)
2865 struct evlist *evlist = session->evlist;
2866 struct evsel *evsel = intel_pt_evsel(pt, evlist);
2867 struct perf_event_attr attr;
2872 pr_debug("There are no selected events with Intel Processor Trace data\n");
2876 memset(&attr, 0, sizeof(struct perf_event_attr));
2877 attr.size = sizeof(struct perf_event_attr);
2878 attr.type = PERF_TYPE_HARDWARE;
2879 attr.sample_type = evsel->core.attr.sample_type & PERF_SAMPLE_MASK;
2880 attr.sample_type |= PERF_SAMPLE_IP | PERF_SAMPLE_TID |
2882 if (pt->timeless_decoding)
2883 attr.sample_type &= ~(u64)PERF_SAMPLE_TIME;
2885 attr.sample_type |= PERF_SAMPLE_TIME;
2886 if (!pt->per_cpu_mmaps)
2887 attr.sample_type &= ~(u64)PERF_SAMPLE_CPU;
2888 attr.exclude_user = evsel->core.attr.exclude_user;
2889 attr.exclude_kernel = evsel->core.attr.exclude_kernel;
2890 attr.exclude_hv = evsel->core.attr.exclude_hv;
2891 attr.exclude_host = evsel->core.attr.exclude_host;
2892 attr.exclude_guest = evsel->core.attr.exclude_guest;
2893 attr.sample_id_all = evsel->core.attr.sample_id_all;
2894 attr.read_format = evsel->core.attr.read_format;
2896 id = evsel->core.id[0] + 1000000000;
2900 if (pt->synth_opts.branches) {
2901 attr.config = PERF_COUNT_HW_BRANCH_INSTRUCTIONS;
2902 attr.sample_period = 1;
2903 attr.sample_type |= PERF_SAMPLE_ADDR;
2904 err = intel_pt_synth_event(session, "branches", &attr, id);
2907 pt->sample_branches = true;
2908 pt->branches_sample_type = attr.sample_type;
2909 pt->branches_id = id;
2911 attr.sample_type &= ~(u64)PERF_SAMPLE_ADDR;
2914 if (pt->synth_opts.callchain)
2915 attr.sample_type |= PERF_SAMPLE_CALLCHAIN;
2916 if (pt->synth_opts.last_branch)
2917 attr.sample_type |= PERF_SAMPLE_BRANCH_STACK;
2919 if (pt->synth_opts.instructions) {
2920 attr.config = PERF_COUNT_HW_INSTRUCTIONS;
2921 if (pt->synth_opts.period_type == PERF_ITRACE_PERIOD_NANOSECS)
2922 attr.sample_period =
2923 intel_pt_ns_to_ticks(pt, pt->synth_opts.period);
2925 attr.sample_period = pt->synth_opts.period;
2926 err = intel_pt_synth_event(session, "instructions", &attr, id);
2929 pt->sample_instructions = true;
2930 pt->instructions_sample_type = attr.sample_type;
2931 pt->instructions_id = id;
2935 attr.sample_type &= ~(u64)PERF_SAMPLE_PERIOD;
2936 attr.sample_period = 1;
2938 if (pt->synth_opts.transactions) {
2939 attr.config = PERF_COUNT_HW_INSTRUCTIONS;
2940 err = intel_pt_synth_event(session, "transactions", &attr, id);
2943 pt->sample_transactions = true;
2944 pt->transactions_sample_type = attr.sample_type;
2945 pt->transactions_id = id;
2946 intel_pt_set_event_name(evlist, id, "transactions");
2950 attr.type = PERF_TYPE_SYNTH;
2951 attr.sample_type |= PERF_SAMPLE_RAW;
2953 if (pt->synth_opts.ptwrites) {
2954 attr.config = PERF_SYNTH_INTEL_PTWRITE;
2955 err = intel_pt_synth_event(session, "ptwrite", &attr, id);
2958 pt->sample_ptwrites = true;
2959 pt->ptwrites_sample_type = attr.sample_type;
2960 pt->ptwrites_id = id;
2961 intel_pt_set_event_name(evlist, id, "ptwrite");
2965 if (pt->synth_opts.pwr_events) {
2966 pt->sample_pwr_events = true;
2967 pt->pwr_events_sample_type = attr.sample_type;
2969 attr.config = PERF_SYNTH_INTEL_CBR;
2970 err = intel_pt_synth_event(session, "cbr", &attr, id);
2974 intel_pt_set_event_name(evlist, id, "cbr");
2978 if (pt->synth_opts.pwr_events && (evsel->core.attr.config & 0x10)) {
2979 attr.config = PERF_SYNTH_INTEL_MWAIT;
2980 err = intel_pt_synth_event(session, "mwait", &attr, id);
2984 intel_pt_set_event_name(evlist, id, "mwait");
2987 attr.config = PERF_SYNTH_INTEL_PWRE;
2988 err = intel_pt_synth_event(session, "pwre", &attr, id);
2992 intel_pt_set_event_name(evlist, id, "pwre");
2995 attr.config = PERF_SYNTH_INTEL_EXSTOP;
2996 err = intel_pt_synth_event(session, "exstop", &attr, id);
3000 intel_pt_set_event_name(evlist, id, "exstop");
3003 attr.config = PERF_SYNTH_INTEL_PWRX;
3004 err = intel_pt_synth_event(session, "pwrx", &attr, id);
3008 intel_pt_set_event_name(evlist, id, "pwrx");
3015 static void intel_pt_setup_pebs_events(struct intel_pt *pt)
3017 struct evsel *evsel;
3019 if (!pt->synth_opts.other_events)
3022 evlist__for_each_entry(pt->session->evlist, evsel) {
3023 if (evsel->core.attr.aux_output && evsel->core.id) {
3024 pt->sample_pebs = true;
3025 pt->pebs_evsel = evsel;
3031 static struct evsel *intel_pt_find_sched_switch(struct evlist *evlist)
3033 struct evsel *evsel;
3035 evlist__for_each_entry_reverse(evlist, evsel) {
3036 const char *name = evsel__name(evsel);
3038 if (!strcmp(name, "sched:sched_switch"))
3045 static bool intel_pt_find_switch(struct evlist *evlist)
3047 struct evsel *evsel;
3049 evlist__for_each_entry(evlist, evsel) {
3050 if (evsel->core.attr.context_switch)
3057 static int intel_pt_perf_config(const char *var, const char *value, void *data)
3059 struct intel_pt *pt = data;
3061 if (!strcmp(var, "intel-pt.mispred-all"))
3062 pt->mispred_all = perf_config_bool(var, value);
3067 /* Find least TSC which converts to ns or later */
3068 static u64 intel_pt_tsc_start(u64 ns, struct intel_pt *pt)
3072 tsc = perf_time_to_tsc(ns, &pt->tc);
3075 tm = tsc_to_perf_time(tsc, &pt->tc);
3082 tm = tsc_to_perf_time(++tsc, &pt->tc);
3087 /* Find greatest TSC which converts to ns or earlier */
3088 static u64 intel_pt_tsc_end(u64 ns, struct intel_pt *pt)
3092 tsc = perf_time_to_tsc(ns, &pt->tc);
3095 tm = tsc_to_perf_time(tsc, &pt->tc);
3102 tm = tsc_to_perf_time(--tsc, &pt->tc);
3107 static int intel_pt_setup_time_ranges(struct intel_pt *pt,
3108 struct itrace_synth_opts *opts)
3110 struct perf_time_interval *p = opts->ptime_range;
3111 int n = opts->range_num;
3114 if (!n || !p || pt->timeless_decoding)
3117 pt->time_ranges = calloc(n, sizeof(struct range));
3118 if (!pt->time_ranges)
3123 intel_pt_log("%s: %u range(s)\n", __func__, n);
3125 for (i = 0; i < n; i++) {
3126 struct range *r = &pt->time_ranges[i];
3127 u64 ts = p[i].start;
3131 * Take care to ensure the TSC range matches the perf-time range
3132 * when converted back to perf-time.
3134 r->start = ts ? intel_pt_tsc_start(ts, pt) : 0;
3135 r->end = te ? intel_pt_tsc_end(te, pt) : 0;
3137 intel_pt_log("range %d: perf time interval: %"PRIu64" to %"PRIu64"\n",
3139 intel_pt_log("range %d: TSC time interval: %#"PRIx64" to %#"PRIx64"\n",
3140 i, r->start, r->end);
3146 static const char * const intel_pt_info_fmts[] = {
3147 [INTEL_PT_PMU_TYPE] = " PMU Type %"PRId64"\n",
3148 [INTEL_PT_TIME_SHIFT] = " Time Shift %"PRIu64"\n",
3149 [INTEL_PT_TIME_MULT] = " Time Muliplier %"PRIu64"\n",
3150 [INTEL_PT_TIME_ZERO] = " Time Zero %"PRIu64"\n",
3151 [INTEL_PT_CAP_USER_TIME_ZERO] = " Cap Time Zero %"PRId64"\n",
3152 [INTEL_PT_TSC_BIT] = " TSC bit %#"PRIx64"\n",
3153 [INTEL_PT_NORETCOMP_BIT] = " NoRETComp bit %#"PRIx64"\n",
3154 [INTEL_PT_HAVE_SCHED_SWITCH] = " Have sched_switch %"PRId64"\n",
3155 [INTEL_PT_SNAPSHOT_MODE] = " Snapshot mode %"PRId64"\n",
3156 [INTEL_PT_PER_CPU_MMAPS] = " Per-cpu maps %"PRId64"\n",
3157 [INTEL_PT_MTC_BIT] = " MTC bit %#"PRIx64"\n",
3158 [INTEL_PT_TSC_CTC_N] = " TSC:CTC numerator %"PRIu64"\n",
3159 [INTEL_PT_TSC_CTC_D] = " TSC:CTC denominator %"PRIu64"\n",
3160 [INTEL_PT_CYC_BIT] = " CYC bit %#"PRIx64"\n",
3161 [INTEL_PT_MAX_NONTURBO_RATIO] = " Max non-turbo ratio %"PRIu64"\n",
3162 [INTEL_PT_FILTER_STR_LEN] = " Filter string len. %"PRIu64"\n",
3165 static void intel_pt_print_info(__u64 *arr, int start, int finish)
3172 for (i = start; i <= finish; i++)
3173 fprintf(stdout, intel_pt_info_fmts[i], arr[i]);
3176 static void intel_pt_print_info_str(const char *name, const char *str)
3181 fprintf(stdout, " %-20s%s\n", name, str ? str : "");
3184 static bool intel_pt_has(struct perf_record_auxtrace_info *auxtrace_info, int pos)
3186 return auxtrace_info->header.size >=
3187 sizeof(struct perf_record_auxtrace_info) + (sizeof(u64) * (pos + 1));
3190 int intel_pt_process_auxtrace_info(union perf_event *event,
3191 struct perf_session *session)
3193 struct perf_record_auxtrace_info *auxtrace_info = &event->auxtrace_info;
3194 size_t min_sz = sizeof(u64) * INTEL_PT_PER_CPU_MMAPS;
3195 struct intel_pt *pt;
3200 if (auxtrace_info->header.size < sizeof(struct perf_record_auxtrace_info) +
3204 pt = zalloc(sizeof(struct intel_pt));
3208 addr_filters__init(&pt->filts);
3210 err = perf_config(intel_pt_perf_config, pt);
3214 err = auxtrace_queues__init(&pt->queues);
3218 intel_pt_log_set_name(INTEL_PT_PMU_NAME);
3220 pt->session = session;
3221 pt->machine = &session->machines.host; /* No kvm support */
3222 pt->auxtrace_type = auxtrace_info->type;
3223 pt->pmu_type = auxtrace_info->priv[INTEL_PT_PMU_TYPE];
3224 pt->tc.time_shift = auxtrace_info->priv[INTEL_PT_TIME_SHIFT];
3225 pt->tc.time_mult = auxtrace_info->priv[INTEL_PT_TIME_MULT];
3226 pt->tc.time_zero = auxtrace_info->priv[INTEL_PT_TIME_ZERO];
3227 pt->cap_user_time_zero = auxtrace_info->priv[INTEL_PT_CAP_USER_TIME_ZERO];
3228 pt->tsc_bit = auxtrace_info->priv[INTEL_PT_TSC_BIT];
3229 pt->noretcomp_bit = auxtrace_info->priv[INTEL_PT_NORETCOMP_BIT];
3230 pt->have_sched_switch = auxtrace_info->priv[INTEL_PT_HAVE_SCHED_SWITCH];
3231 pt->snapshot_mode = auxtrace_info->priv[INTEL_PT_SNAPSHOT_MODE];
3232 pt->per_cpu_mmaps = auxtrace_info->priv[INTEL_PT_PER_CPU_MMAPS];
3233 intel_pt_print_info(&auxtrace_info->priv[0], INTEL_PT_PMU_TYPE,
3234 INTEL_PT_PER_CPU_MMAPS);
3236 if (intel_pt_has(auxtrace_info, INTEL_PT_CYC_BIT)) {
3237 pt->mtc_bit = auxtrace_info->priv[INTEL_PT_MTC_BIT];
3238 pt->mtc_freq_bits = auxtrace_info->priv[INTEL_PT_MTC_FREQ_BITS];
3239 pt->tsc_ctc_ratio_n = auxtrace_info->priv[INTEL_PT_TSC_CTC_N];
3240 pt->tsc_ctc_ratio_d = auxtrace_info->priv[INTEL_PT_TSC_CTC_D];
3241 pt->cyc_bit = auxtrace_info->priv[INTEL_PT_CYC_BIT];
3242 intel_pt_print_info(&auxtrace_info->priv[0], INTEL_PT_MTC_BIT,
3246 if (intel_pt_has(auxtrace_info, INTEL_PT_MAX_NONTURBO_RATIO)) {
3247 pt->max_non_turbo_ratio =
3248 auxtrace_info->priv[INTEL_PT_MAX_NONTURBO_RATIO];
3249 intel_pt_print_info(&auxtrace_info->priv[0],
3250 INTEL_PT_MAX_NONTURBO_RATIO,
3251 INTEL_PT_MAX_NONTURBO_RATIO);
3254 info = &auxtrace_info->priv[INTEL_PT_FILTER_STR_LEN] + 1;
3255 info_end = (void *)info + auxtrace_info->header.size;
3257 if (intel_pt_has(auxtrace_info, INTEL_PT_FILTER_STR_LEN)) {
3260 len = auxtrace_info->priv[INTEL_PT_FILTER_STR_LEN];
3261 intel_pt_print_info(&auxtrace_info->priv[0],
3262 INTEL_PT_FILTER_STR_LEN,
3263 INTEL_PT_FILTER_STR_LEN);
3265 const char *filter = (const char *)info;
3267 len = roundup(len + 1, 8);
3269 if ((void *)info > info_end) {
3270 pr_err("%s: bad filter string length\n", __func__);
3272 goto err_free_queues;
3274 pt->filter = memdup(filter, len);
3277 goto err_free_queues;
3279 if (session->header.needs_swap)
3280 mem_bswap_64(pt->filter, len);
3281 if (pt->filter[len - 1]) {
3282 pr_err("%s: filter string not null terminated\n", __func__);
3284 goto err_free_queues;
3286 err = addr_filters__parse_bare_filter(&pt->filts,
3289 goto err_free_queues;
3291 intel_pt_print_info_str("Filter string", pt->filter);
3294 pt->timeless_decoding = intel_pt_timeless_decoding(pt);
3295 if (pt->timeless_decoding && !pt->tc.time_mult)
3296 pt->tc.time_mult = 1;
3297 pt->have_tsc = intel_pt_have_tsc(pt);
3298 pt->sampling_mode = intel_pt_sampling_mode(pt);
3299 pt->est_tsc = !pt->timeless_decoding;
3301 pt->unknown_thread = thread__new(999999999, 999999999);
3302 if (!pt->unknown_thread) {
3304 goto err_free_queues;
3308 * Since this thread will not be kept in any rbtree not in a
3309 * list, initialize its list node so that at thread__put() the
3310 * current thread lifetime assuption is kept and we don't segfault
3311 * at list_del_init().
3313 INIT_LIST_HEAD(&pt->unknown_thread->node);
3315 err = thread__set_comm(pt->unknown_thread, "unknown", 0);
3317 goto err_delete_thread;
3318 if (thread__init_maps(pt->unknown_thread, pt->machine)) {
3320 goto err_delete_thread;
3323 pt->auxtrace.process_event = intel_pt_process_event;
3324 pt->auxtrace.process_auxtrace_event = intel_pt_process_auxtrace_event;
3325 pt->auxtrace.queue_data = intel_pt_queue_data;
3326 pt->auxtrace.dump_auxtrace_sample = intel_pt_dump_sample;
3327 pt->auxtrace.flush_events = intel_pt_flush;
3328 pt->auxtrace.free_events = intel_pt_free_events;
3329 pt->auxtrace.free = intel_pt_free;
3330 pt->auxtrace.evsel_is_auxtrace = intel_pt_evsel_is_auxtrace;
3331 session->auxtrace = &pt->auxtrace;
3336 if (pt->have_sched_switch == 1) {
3337 pt->switch_evsel = intel_pt_find_sched_switch(session->evlist);
3338 if (!pt->switch_evsel) {
3339 pr_err("%s: missing sched_switch event\n", __func__);
3341 goto err_delete_thread;
3343 } else if (pt->have_sched_switch == 2 &&
3344 !intel_pt_find_switch(session->evlist)) {
3345 pr_err("%s: missing context_switch attribute flag\n", __func__);
3347 goto err_delete_thread;
3350 if (session->itrace_synth_opts->set) {
3351 pt->synth_opts = *session->itrace_synth_opts;
3353 itrace_synth_opts__set_default(&pt->synth_opts,
3354 session->itrace_synth_opts->default_no_sample);
3355 if (!session->itrace_synth_opts->default_no_sample &&
3356 !session->itrace_synth_opts->inject) {
3357 pt->synth_opts.branches = false;
3358 pt->synth_opts.callchain = true;
3359 pt->synth_opts.add_callchain = true;
3361 pt->synth_opts.thread_stack =
3362 session->itrace_synth_opts->thread_stack;
3365 if (pt->synth_opts.log)
3366 intel_pt_log_enable();
3368 /* Maximum non-turbo ratio is TSC freq / 100 MHz */
3369 if (pt->tc.time_mult) {
3370 u64 tsc_freq = intel_pt_ns_to_ticks(pt, 1000000000);
3372 if (!pt->max_non_turbo_ratio)
3373 pt->max_non_turbo_ratio =
3374 (tsc_freq + 50000000) / 100000000;
3375 intel_pt_log("TSC frequency %"PRIu64"\n", tsc_freq);
3376 intel_pt_log("Maximum non-turbo ratio %u\n",
3377 pt->max_non_turbo_ratio);
3378 pt->cbr2khz = tsc_freq / pt->max_non_turbo_ratio / 1000;
3381 err = intel_pt_setup_time_ranges(pt, session->itrace_synth_opts);
3383 goto err_delete_thread;
3385 if (pt->synth_opts.calls)
3386 pt->branches_filter |= PERF_IP_FLAG_CALL | PERF_IP_FLAG_ASYNC |
3387 PERF_IP_FLAG_TRACE_END;
3388 if (pt->synth_opts.returns)
3389 pt->branches_filter |= PERF_IP_FLAG_RETURN |
3390 PERF_IP_FLAG_TRACE_BEGIN;
3392 if ((pt->synth_opts.callchain || pt->synth_opts.add_callchain) &&
3393 !symbol_conf.use_callchain) {
3394 symbol_conf.use_callchain = true;
3395 if (callchain_register_param(&callchain_param) < 0) {
3396 symbol_conf.use_callchain = false;
3397 pt->synth_opts.callchain = false;
3398 pt->synth_opts.add_callchain = false;
3402 if (pt->synth_opts.add_callchain) {
3403 err = intel_pt_callchain_init(pt);
3405 goto err_delete_thread;
3408 if (pt->synth_opts.last_branch || pt->synth_opts.add_last_branch) {
3409 pt->br_stack_sz = pt->synth_opts.last_branch_sz;
3410 pt->br_stack_sz_plus = pt->br_stack_sz;
3413 if (pt->synth_opts.add_last_branch) {
3414 err = intel_pt_br_stack_init(pt);
3416 goto err_delete_thread;
3418 * Additional branch stack size to cater for tracing from the
3419 * actual sample ip to where the sample time is recorded.
3420 * Measured at about 200 branches, but generously set to 1024.
3421 * If kernel space is not being traced, then add just 1 for the
3422 * branch to kernel space.
3424 if (intel_pt_tracing_kernel(pt))
3425 pt->br_stack_sz_plus += 1024;
3427 pt->br_stack_sz_plus += 1;
3430 pt->use_thread_stack = pt->synth_opts.callchain ||
3431 pt->synth_opts.add_callchain ||
3432 pt->synth_opts.thread_stack ||
3433 pt->synth_opts.last_branch ||
3434 pt->synth_opts.add_last_branch;
3436 pt->callstack = pt->synth_opts.callchain ||
3437 pt->synth_opts.add_callchain ||
3438 pt->synth_opts.thread_stack;
3440 err = intel_pt_synth_events(pt, session);
3442 goto err_delete_thread;
3444 intel_pt_setup_pebs_events(pt);
3446 if (pt->sampling_mode || list_empty(&session->auxtrace_index))
3447 err = auxtrace_queue_data(session, true, true);
3449 err = auxtrace_queues__process_index(&pt->queues, session);
3451 goto err_delete_thread;
3453 if (pt->queues.populated)
3454 pt->data_queued = true;
3456 if (pt->timeless_decoding)
3457 pr_debug2("Intel PT decoding without timestamps\n");
3463 thread__zput(pt->unknown_thread);
3465 intel_pt_log_disable();
3466 auxtrace_queues__free(&pt->queues);
3467 session->auxtrace = NULL;
3469 addr_filters__exit(&pt->filts);
3471 zfree(&pt->time_ranges);