1 // SPDX-License-Identifier: GPL-2.0-only
3 * intel_pt.c: Intel Processor Trace support
4 * Copyright (c) 2013-2015, Intel Corporation.
11 #include <linux/kernel.h>
12 #include <linux/string.h>
13 #include <linux/types.h>
14 #include <linux/zalloc.h>
27 #include "thread-stack.h"
29 #include "callchain.h"
36 #include "util/perf_api_probe.h"
37 #include "util/synthetic-events.h"
38 #include "time-utils.h"
40 #include "../arch/x86/include/uapi/asm/perf_regs.h"
42 #include "intel-pt-decoder/intel-pt-log.h"
43 #include "intel-pt-decoder/intel-pt-decoder.h"
44 #include "intel-pt-decoder/intel-pt-insn-decoder.h"
45 #include "intel-pt-decoder/intel-pt-pkt-decoder.h"
47 #define MAX_TIMESTAMP (~0ULL)
55 struct auxtrace auxtrace;
56 struct auxtrace_queues queues;
57 struct auxtrace_heap heap;
59 struct perf_session *session;
60 struct machine *machine;
61 struct evsel *switch_evsel;
62 struct thread *unknown_thread;
63 bool timeless_decoding;
72 bool use_thread_stack;
74 unsigned int br_stack_sz;
75 unsigned int br_stack_sz_plus;
76 int have_sched_switch;
82 struct perf_tsc_conversion tc;
83 bool cap_user_time_zero;
85 struct itrace_synth_opts synth_opts;
87 bool sample_instructions;
88 u64 instructions_sample_type;
93 u64 branches_sample_type;
96 bool sample_transactions;
97 u64 transactions_sample_type;
100 bool sample_ptwrites;
101 u64 ptwrites_sample_type;
104 bool sample_pwr_events;
105 u64 pwr_events_sample_type;
113 struct evsel *pebs_evsel;
122 unsigned max_non_turbo_ratio;
125 unsigned long num_events;
128 struct addr_filters filts;
130 struct range *time_ranges;
131 unsigned int range_cnt;
133 struct ip_callchain *chain;
134 struct branch_stack *br_stack;
138 INTEL_PT_SS_NOT_TRACING,
141 INTEL_PT_SS_EXPECTING_SWITCH_EVENT,
142 INTEL_PT_SS_EXPECTING_SWITCH_IP,
145 struct intel_pt_queue {
147 unsigned int queue_nr;
148 struct auxtrace_buffer *buffer;
149 struct auxtrace_buffer *old_buffer;
151 const struct intel_pt_state *state;
152 struct ip_callchain *chain;
153 struct branch_stack *last_branch;
154 union perf_event *event_buf;
157 bool step_through_buffers;
158 bool use_buffer_pid_tid;
164 struct thread *thread;
171 unsigned int sel_idx;
177 u64 last_in_insn_cnt;
179 u64 last_br_insn_cnt;
181 unsigned int cbr_seen;
182 char insn[INTEL_PT_INSN_BUF_SZ];
185 static void intel_pt_dump(struct intel_pt *pt __maybe_unused,
186 unsigned char *buf, size_t len)
188 struct intel_pt_pkt packet;
191 char desc[INTEL_PT_PKT_DESC_MAX];
192 const char *color = PERF_COLOR_BLUE;
193 enum intel_pt_pkt_ctx ctx = INTEL_PT_NO_CTX;
195 color_fprintf(stdout, color,
196 ". ... Intel Processor Trace data: size %zu bytes\n",
200 ret = intel_pt_get_packet(buf, len, &packet, &ctx);
206 color_fprintf(stdout, color, " %08x: ", pos);
207 for (i = 0; i < pkt_len; i++)
208 color_fprintf(stdout, color, " %02x", buf[i]);
210 color_fprintf(stdout, color, " ");
212 ret = intel_pt_pkt_desc(&packet, desc,
213 INTEL_PT_PKT_DESC_MAX);
215 color_fprintf(stdout, color, " %s\n", desc);
217 color_fprintf(stdout, color, " Bad packet!\n");
225 static void intel_pt_dump_event(struct intel_pt *pt, unsigned char *buf,
229 intel_pt_dump(pt, buf, len);
232 static void intel_pt_log_event(union perf_event *event)
234 FILE *f = intel_pt_log_fp();
236 if (!intel_pt_enable_logging || !f)
239 perf_event__fprintf(event, NULL, f);
242 static void intel_pt_dump_sample(struct perf_session *session,
243 struct perf_sample *sample)
245 struct intel_pt *pt = container_of(session->auxtrace, struct intel_pt,
249 intel_pt_dump(pt, sample->aux_sample.data, sample->aux_sample.size);
252 static bool intel_pt_log_events(struct intel_pt *pt, u64 tm)
254 struct perf_time_interval *range = pt->synth_opts.ptime_range;
255 int n = pt->synth_opts.range_num;
257 if (pt->synth_opts.log_plus_flags & AUXTRACE_LOG_FLG_ALL_PERF_EVTS)
260 if (pt->synth_opts.log_minus_flags & AUXTRACE_LOG_FLG_ALL_PERF_EVTS)
263 /* perf_time__ranges_skip_sample does not work if time is zero */
267 return !n || !perf_time__ranges_skip_sample(range, n, tm);
270 static int intel_pt_do_fix_overlap(struct intel_pt *pt, struct auxtrace_buffer *a,
271 struct auxtrace_buffer *b)
273 bool consecutive = false;
276 start = intel_pt_find_overlap(a->data, a->size, b->data, b->size,
277 pt->have_tsc, &consecutive);
280 b->use_size = b->data + b->size - start;
282 if (b->use_size && consecutive)
283 b->consecutive = true;
287 static int intel_pt_get_buffer(struct intel_pt_queue *ptq,
288 struct auxtrace_buffer *buffer,
289 struct auxtrace_buffer *old_buffer,
290 struct intel_pt_buffer *b)
295 int fd = perf_data__fd(ptq->pt->session->data);
297 buffer->data = auxtrace_buffer__get_data(buffer, fd);
302 might_overlap = ptq->pt->snapshot_mode || ptq->pt->sampling_mode;
303 if (might_overlap && !buffer->consecutive && old_buffer &&
304 intel_pt_do_fix_overlap(ptq->pt, old_buffer, buffer))
307 if (buffer->use_data) {
308 b->len = buffer->use_size;
309 b->buf = buffer->use_data;
311 b->len = buffer->size;
312 b->buf = buffer->data;
314 b->ref_timestamp = buffer->reference;
316 if (!old_buffer || (might_overlap && !buffer->consecutive)) {
317 b->consecutive = false;
318 b->trace_nr = buffer->buffer_nr + 1;
320 b->consecutive = true;
326 /* Do not drop buffers with references - refer intel_pt_get_trace() */
327 static void intel_pt_lookahead_drop_buffer(struct intel_pt_queue *ptq,
328 struct auxtrace_buffer *buffer)
330 if (!buffer || buffer == ptq->buffer || buffer == ptq->old_buffer)
333 auxtrace_buffer__drop_data(buffer);
336 /* Must be serialized with respect to intel_pt_get_trace() */
337 static int intel_pt_lookahead(void *data, intel_pt_lookahead_cb_t cb,
340 struct intel_pt_queue *ptq = data;
341 struct auxtrace_buffer *buffer = ptq->buffer;
342 struct auxtrace_buffer *old_buffer = ptq->old_buffer;
343 struct auxtrace_queue *queue;
346 queue = &ptq->pt->queues.queue_array[ptq->queue_nr];
349 struct intel_pt_buffer b = { .len = 0 };
351 buffer = auxtrace_buffer__next(queue, buffer);
355 err = intel_pt_get_buffer(ptq, buffer, old_buffer, &b);
360 intel_pt_lookahead_drop_buffer(ptq, old_buffer);
363 intel_pt_lookahead_drop_buffer(ptq, buffer);
367 err = cb(&b, cb_data);
372 if (buffer != old_buffer)
373 intel_pt_lookahead_drop_buffer(ptq, buffer);
374 intel_pt_lookahead_drop_buffer(ptq, old_buffer);
380 * This function assumes data is processed sequentially only.
381 * Must be serialized with respect to intel_pt_lookahead()
383 static int intel_pt_get_trace(struct intel_pt_buffer *b, void *data)
385 struct intel_pt_queue *ptq = data;
386 struct auxtrace_buffer *buffer = ptq->buffer;
387 struct auxtrace_buffer *old_buffer = ptq->old_buffer;
388 struct auxtrace_queue *queue;
396 queue = &ptq->pt->queues.queue_array[ptq->queue_nr];
398 buffer = auxtrace_buffer__next(queue, buffer);
401 auxtrace_buffer__drop_data(old_buffer);
406 ptq->buffer = buffer;
408 err = intel_pt_get_buffer(ptq, buffer, old_buffer, b);
412 if (ptq->step_through_buffers)
417 auxtrace_buffer__drop_data(old_buffer);
418 ptq->old_buffer = buffer;
420 auxtrace_buffer__drop_data(buffer);
421 return intel_pt_get_trace(b, data);
427 struct intel_pt_cache_entry {
428 struct auxtrace_cache_entry entry;
431 enum intel_pt_insn_op op;
432 enum intel_pt_insn_branch branch;
435 char insn[INTEL_PT_INSN_BUF_SZ];
438 static int intel_pt_config_div(const char *var, const char *value, void *data)
443 if (!strcmp(var, "intel-pt.cache-divisor")) {
444 val = strtol(value, NULL, 0);
445 if (val > 0 && val <= INT_MAX)
452 static int intel_pt_cache_divisor(void)
459 perf_config(intel_pt_config_div, &d);
467 static unsigned int intel_pt_cache_size(struct dso *dso,
468 struct machine *machine)
472 size = dso__data_size(dso, machine);
473 size /= intel_pt_cache_divisor();
476 if (size > (1 << 21))
478 return 32 - __builtin_clz(size);
481 static struct auxtrace_cache *intel_pt_cache(struct dso *dso,
482 struct machine *machine)
484 struct auxtrace_cache *c;
487 if (dso->auxtrace_cache)
488 return dso->auxtrace_cache;
490 bits = intel_pt_cache_size(dso, machine);
492 /* Ignoring cache creation failure */
493 c = auxtrace_cache__new(bits, sizeof(struct intel_pt_cache_entry), 200);
495 dso->auxtrace_cache = c;
500 static int intel_pt_cache_add(struct dso *dso, struct machine *machine,
501 u64 offset, u64 insn_cnt, u64 byte_cnt,
502 struct intel_pt_insn *intel_pt_insn)
504 struct auxtrace_cache *c = intel_pt_cache(dso, machine);
505 struct intel_pt_cache_entry *e;
511 e = auxtrace_cache__alloc_entry(c);
515 e->insn_cnt = insn_cnt;
516 e->byte_cnt = byte_cnt;
517 e->op = intel_pt_insn->op;
518 e->branch = intel_pt_insn->branch;
519 e->length = intel_pt_insn->length;
520 e->rel = intel_pt_insn->rel;
521 memcpy(e->insn, intel_pt_insn->buf, INTEL_PT_INSN_BUF_SZ);
523 err = auxtrace_cache__add(c, offset, &e->entry);
525 auxtrace_cache__free_entry(c, e);
530 static struct intel_pt_cache_entry *
531 intel_pt_cache_lookup(struct dso *dso, struct machine *machine, u64 offset)
533 struct auxtrace_cache *c = intel_pt_cache(dso, machine);
538 return auxtrace_cache__lookup(dso->auxtrace_cache, offset);
541 static void intel_pt_cache_invalidate(struct dso *dso, struct machine *machine,
544 struct auxtrace_cache *c = intel_pt_cache(dso, machine);
549 auxtrace_cache__remove(dso->auxtrace_cache, offset);
552 static inline u8 intel_pt_cpumode(struct intel_pt *pt, uint64_t ip)
554 return ip >= pt->kernel_start ?
555 PERF_RECORD_MISC_KERNEL :
556 PERF_RECORD_MISC_USER;
559 static int intel_pt_walk_next_insn(struct intel_pt_insn *intel_pt_insn,
560 uint64_t *insn_cnt_ptr, uint64_t *ip,
561 uint64_t to_ip, uint64_t max_insn_cnt,
564 struct intel_pt_queue *ptq = data;
565 struct machine *machine = ptq->pt->machine;
566 struct thread *thread;
567 struct addr_location al;
568 unsigned char buf[INTEL_PT_INSN_BUF_SZ];
572 u64 offset, start_offset, start_ip;
576 intel_pt_insn->length = 0;
578 if (to_ip && *ip == to_ip)
581 cpumode = intel_pt_cpumode(ptq->pt, *ip);
583 thread = ptq->thread;
585 if (cpumode != PERF_RECORD_MISC_KERNEL)
587 thread = ptq->pt->unknown_thread;
591 if (!thread__find_map(thread, cpumode, *ip, &al) || !al.map->dso)
594 if (al.map->dso->data.status == DSO_DATA_STATUS_ERROR &&
595 dso__data_status_seen(al.map->dso,
596 DSO_DATA_STATUS_SEEN_ITRACE))
599 offset = al.map->map_ip(al.map, *ip);
601 if (!to_ip && one_map) {
602 struct intel_pt_cache_entry *e;
604 e = intel_pt_cache_lookup(al.map->dso, machine, offset);
606 (!max_insn_cnt || e->insn_cnt <= max_insn_cnt)) {
607 *insn_cnt_ptr = e->insn_cnt;
609 intel_pt_insn->op = e->op;
610 intel_pt_insn->branch = e->branch;
611 intel_pt_insn->length = e->length;
612 intel_pt_insn->rel = e->rel;
613 memcpy(intel_pt_insn->buf, e->insn,
614 INTEL_PT_INSN_BUF_SZ);
615 intel_pt_log_insn_no_data(intel_pt_insn, *ip);
620 start_offset = offset;
623 /* Load maps to ensure dso->is_64_bit has been updated */
626 x86_64 = al.map->dso->is_64_bit;
629 len = dso__data_read_offset(al.map->dso, machine,
631 INTEL_PT_INSN_BUF_SZ);
635 if (intel_pt_get_insn(buf, len, x86_64, intel_pt_insn))
638 intel_pt_log_insn(intel_pt_insn, *ip);
642 if (intel_pt_insn->branch != INTEL_PT_BR_NO_BRANCH)
645 if (max_insn_cnt && insn_cnt >= max_insn_cnt)
648 *ip += intel_pt_insn->length;
650 if (to_ip && *ip == to_ip)
653 if (*ip >= al.map->end)
656 offset += intel_pt_insn->length;
661 *insn_cnt_ptr = insn_cnt;
667 * Didn't lookup in the 'to_ip' case, so do it now to prevent duplicate
671 struct intel_pt_cache_entry *e;
673 e = intel_pt_cache_lookup(al.map->dso, machine, start_offset);
678 /* Ignore cache errors */
679 intel_pt_cache_add(al.map->dso, machine, start_offset, insn_cnt,
680 *ip - start_ip, intel_pt_insn);
685 *insn_cnt_ptr = insn_cnt;
689 static bool intel_pt_match_pgd_ip(struct intel_pt *pt, uint64_t ip,
690 uint64_t offset, const char *filename)
692 struct addr_filter *filt;
693 bool have_filter = false;
694 bool hit_tracestop = false;
695 bool hit_filter = false;
697 list_for_each_entry(filt, &pt->filts.head, list) {
701 if ((filename && !filt->filename) ||
702 (!filename && filt->filename) ||
703 (filename && strcmp(filename, filt->filename)))
706 if (!(offset >= filt->addr && offset < filt->addr + filt->size))
709 intel_pt_log("TIP.PGD ip %#"PRIx64" offset %#"PRIx64" in %s hit filter: %s offset %#"PRIx64" size %#"PRIx64"\n",
710 ip, offset, filename ? filename : "[kernel]",
711 filt->start ? "filter" : "stop",
712 filt->addr, filt->size);
717 hit_tracestop = true;
720 if (!hit_tracestop && !hit_filter)
721 intel_pt_log("TIP.PGD ip %#"PRIx64" offset %#"PRIx64" in %s is not in a filter region\n",
722 ip, offset, filename ? filename : "[kernel]");
724 return hit_tracestop || (have_filter && !hit_filter);
727 static int __intel_pt_pgd_ip(uint64_t ip, void *data)
729 struct intel_pt_queue *ptq = data;
730 struct thread *thread;
731 struct addr_location al;
735 if (ip >= ptq->pt->kernel_start)
736 return intel_pt_match_pgd_ip(ptq->pt, ip, ip, NULL);
738 cpumode = PERF_RECORD_MISC_USER;
740 thread = ptq->thread;
744 if (!thread__find_map(thread, cpumode, ip, &al) || !al.map->dso)
747 offset = al.map->map_ip(al.map, ip);
749 return intel_pt_match_pgd_ip(ptq->pt, ip, offset,
750 al.map->dso->long_name);
753 static bool intel_pt_pgd_ip(uint64_t ip, void *data)
755 return __intel_pt_pgd_ip(ip, data) > 0;
758 static bool intel_pt_get_config(struct intel_pt *pt,
759 struct perf_event_attr *attr, u64 *config)
761 if (attr->type == pt->pmu_type) {
763 *config = attr->config;
770 static bool intel_pt_exclude_kernel(struct intel_pt *pt)
774 evlist__for_each_entry(pt->session->evlist, evsel) {
775 if (intel_pt_get_config(pt, &evsel->core.attr, NULL) &&
776 !evsel->core.attr.exclude_kernel)
782 static bool intel_pt_return_compression(struct intel_pt *pt)
787 if (!pt->noretcomp_bit)
790 evlist__for_each_entry(pt->session->evlist, evsel) {
791 if (intel_pt_get_config(pt, &evsel->core.attr, &config) &&
792 (config & pt->noretcomp_bit))
798 static bool intel_pt_branch_enable(struct intel_pt *pt)
803 evlist__for_each_entry(pt->session->evlist, evsel) {
804 if (intel_pt_get_config(pt, &evsel->core.attr, &config) &&
805 (config & 1) && !(config & 0x2000))
811 static unsigned int intel_pt_mtc_period(struct intel_pt *pt)
817 if (!pt->mtc_freq_bits)
820 for (shift = 0, config = pt->mtc_freq_bits; !(config & 1); shift++)
823 evlist__for_each_entry(pt->session->evlist, evsel) {
824 if (intel_pt_get_config(pt, &evsel->core.attr, &config))
825 return (config & pt->mtc_freq_bits) >> shift;
830 static bool intel_pt_timeless_decoding(struct intel_pt *pt)
833 bool timeless_decoding = true;
836 if (!pt->tsc_bit || !pt->cap_user_time_zero)
839 evlist__for_each_entry(pt->session->evlist, evsel) {
840 if (!(evsel->core.attr.sample_type & PERF_SAMPLE_TIME))
842 if (intel_pt_get_config(pt, &evsel->core.attr, &config)) {
843 if (config & pt->tsc_bit)
844 timeless_decoding = false;
849 return timeless_decoding;
852 static bool intel_pt_tracing_kernel(struct intel_pt *pt)
856 evlist__for_each_entry(pt->session->evlist, evsel) {
857 if (intel_pt_get_config(pt, &evsel->core.attr, NULL) &&
858 !evsel->core.attr.exclude_kernel)
864 static bool intel_pt_have_tsc(struct intel_pt *pt)
867 bool have_tsc = false;
873 evlist__for_each_entry(pt->session->evlist, evsel) {
874 if (intel_pt_get_config(pt, &evsel->core.attr, &config)) {
875 if (config & pt->tsc_bit)
884 static bool intel_pt_sampling_mode(struct intel_pt *pt)
888 evlist__for_each_entry(pt->session->evlist, evsel) {
889 if ((evsel->core.attr.sample_type & PERF_SAMPLE_AUX) &&
890 evsel->core.attr.aux_sample_size)
896 static u64 intel_pt_ns_to_ticks(const struct intel_pt *pt, u64 ns)
900 quot = ns / pt->tc.time_mult;
901 rem = ns % pt->tc.time_mult;
902 return (quot << pt->tc.time_shift) + (rem << pt->tc.time_shift) /
906 static struct ip_callchain *intel_pt_alloc_chain(struct intel_pt *pt)
908 size_t sz = sizeof(struct ip_callchain);
910 /* Add 1 to callchain_sz for callchain context */
911 sz += (pt->synth_opts.callchain_sz + 1) * sizeof(u64);
915 static int intel_pt_callchain_init(struct intel_pt *pt)
919 evlist__for_each_entry(pt->session->evlist, evsel) {
920 if (!(evsel->core.attr.sample_type & PERF_SAMPLE_CALLCHAIN))
921 evsel->synth_sample_type |= PERF_SAMPLE_CALLCHAIN;
924 pt->chain = intel_pt_alloc_chain(pt);
931 static void intel_pt_add_callchain(struct intel_pt *pt,
932 struct perf_sample *sample)
934 struct thread *thread = machine__findnew_thread(pt->machine,
938 thread_stack__sample_late(thread, sample->cpu, pt->chain,
939 pt->synth_opts.callchain_sz + 1, sample->ip,
942 sample->callchain = pt->chain;
945 static struct branch_stack *intel_pt_alloc_br_stack(unsigned int entry_cnt)
947 size_t sz = sizeof(struct branch_stack);
949 sz += entry_cnt * sizeof(struct branch_entry);
953 static int intel_pt_br_stack_init(struct intel_pt *pt)
957 evlist__for_each_entry(pt->session->evlist, evsel) {
958 if (!(evsel->core.attr.sample_type & PERF_SAMPLE_BRANCH_STACK))
959 evsel->synth_sample_type |= PERF_SAMPLE_BRANCH_STACK;
962 pt->br_stack = intel_pt_alloc_br_stack(pt->br_stack_sz);
969 static void intel_pt_add_br_stack(struct intel_pt *pt,
970 struct perf_sample *sample)
972 struct thread *thread = machine__findnew_thread(pt->machine,
976 thread_stack__br_sample_late(thread, sample->cpu, pt->br_stack,
977 pt->br_stack_sz, sample->ip,
980 sample->branch_stack = pt->br_stack;
983 /* INTEL_PT_LBR_0, INTEL_PT_LBR_1 and INTEL_PT_LBR_2 */
984 #define LBRS_MAX (INTEL_PT_BLK_ITEM_ID_CNT * 3U)
986 static struct intel_pt_queue *intel_pt_alloc_queue(struct intel_pt *pt,
987 unsigned int queue_nr)
989 struct intel_pt_params params = { .get_trace = 0, };
990 struct perf_env *env = pt->machine->env;
991 struct intel_pt_queue *ptq;
993 ptq = zalloc(sizeof(struct intel_pt_queue));
997 if (pt->synth_opts.callchain) {
998 ptq->chain = intel_pt_alloc_chain(pt);
1003 if (pt->synth_opts.last_branch || pt->synth_opts.other_events) {
1004 unsigned int entry_cnt = max(LBRS_MAX, pt->br_stack_sz);
1006 ptq->last_branch = intel_pt_alloc_br_stack(entry_cnt);
1007 if (!ptq->last_branch)
1011 ptq->event_buf = malloc(PERF_SAMPLE_MAX_SIZE);
1012 if (!ptq->event_buf)
1016 ptq->queue_nr = queue_nr;
1017 ptq->exclude_kernel = intel_pt_exclude_kernel(pt);
1023 params.get_trace = intel_pt_get_trace;
1024 params.walk_insn = intel_pt_walk_next_insn;
1025 params.lookahead = intel_pt_lookahead;
1027 params.return_compression = intel_pt_return_compression(pt);
1028 params.branch_enable = intel_pt_branch_enable(pt);
1029 params.max_non_turbo_ratio = pt->max_non_turbo_ratio;
1030 params.mtc_period = intel_pt_mtc_period(pt);
1031 params.tsc_ctc_ratio_n = pt->tsc_ctc_ratio_n;
1032 params.tsc_ctc_ratio_d = pt->tsc_ctc_ratio_d;
1034 if (pt->filts.cnt > 0)
1035 params.pgd_ip = intel_pt_pgd_ip;
1037 if (pt->synth_opts.instructions) {
1038 if (pt->synth_opts.period) {
1039 switch (pt->synth_opts.period_type) {
1040 case PERF_ITRACE_PERIOD_INSTRUCTIONS:
1041 params.period_type =
1042 INTEL_PT_PERIOD_INSTRUCTIONS;
1043 params.period = pt->synth_opts.period;
1045 case PERF_ITRACE_PERIOD_TICKS:
1046 params.period_type = INTEL_PT_PERIOD_TICKS;
1047 params.period = pt->synth_opts.period;
1049 case PERF_ITRACE_PERIOD_NANOSECS:
1050 params.period_type = INTEL_PT_PERIOD_TICKS;
1051 params.period = intel_pt_ns_to_ticks(pt,
1052 pt->synth_opts.period);
1059 if (!params.period) {
1060 params.period_type = INTEL_PT_PERIOD_INSTRUCTIONS;
1065 if (env->cpuid && !strncmp(env->cpuid, "GenuineIntel,6,92,", 18))
1066 params.flags |= INTEL_PT_FUP_WITH_NLIP;
1068 ptq->decoder = intel_pt_decoder_new(¶ms);
1075 zfree(&ptq->event_buf);
1076 zfree(&ptq->last_branch);
1082 static void intel_pt_free_queue(void *priv)
1084 struct intel_pt_queue *ptq = priv;
1088 thread__zput(ptq->thread);
1089 intel_pt_decoder_free(ptq->decoder);
1090 zfree(&ptq->event_buf);
1091 zfree(&ptq->last_branch);
1096 static void intel_pt_set_pid_tid_cpu(struct intel_pt *pt,
1097 struct auxtrace_queue *queue)
1099 struct intel_pt_queue *ptq = queue->priv;
1101 if (queue->tid == -1 || pt->have_sched_switch) {
1102 ptq->tid = machine__get_current_tid(pt->machine, ptq->cpu);
1103 thread__zput(ptq->thread);
1106 if (!ptq->thread && ptq->tid != -1)
1107 ptq->thread = machine__find_thread(pt->machine, -1, ptq->tid);
1110 ptq->pid = ptq->thread->pid_;
1111 if (queue->cpu == -1)
1112 ptq->cpu = ptq->thread->cpu;
1116 static void intel_pt_sample_flags(struct intel_pt_queue *ptq)
1118 if (ptq->state->flags & INTEL_PT_ABORT_TX) {
1119 ptq->flags = PERF_IP_FLAG_BRANCH | PERF_IP_FLAG_TX_ABORT;
1120 } else if (ptq->state->flags & INTEL_PT_ASYNC) {
1121 if (ptq->state->to_ip)
1122 ptq->flags = PERF_IP_FLAG_BRANCH | PERF_IP_FLAG_CALL |
1123 PERF_IP_FLAG_ASYNC |
1124 PERF_IP_FLAG_INTERRUPT;
1126 ptq->flags = PERF_IP_FLAG_BRANCH |
1127 PERF_IP_FLAG_TRACE_END;
1130 if (ptq->state->from_ip)
1131 ptq->flags = intel_pt_insn_type(ptq->state->insn_op);
1133 ptq->flags = PERF_IP_FLAG_BRANCH |
1134 PERF_IP_FLAG_TRACE_BEGIN;
1135 if (ptq->state->flags & INTEL_PT_IN_TX)
1136 ptq->flags |= PERF_IP_FLAG_IN_TX;
1137 ptq->insn_len = ptq->state->insn_len;
1138 memcpy(ptq->insn, ptq->state->insn, INTEL_PT_INSN_BUF_SZ);
1141 if (ptq->state->type & INTEL_PT_TRACE_BEGIN)
1142 ptq->flags |= PERF_IP_FLAG_TRACE_BEGIN;
1143 if (ptq->state->type & INTEL_PT_TRACE_END)
1144 ptq->flags |= PERF_IP_FLAG_TRACE_END;
1147 static void intel_pt_setup_time_range(struct intel_pt *pt,
1148 struct intel_pt_queue *ptq)
1153 ptq->sel_timestamp = pt->time_ranges[0].start;
1156 if (ptq->sel_timestamp) {
1157 ptq->sel_start = true;
1159 ptq->sel_timestamp = pt->time_ranges[0].end;
1160 ptq->sel_start = false;
1164 static int intel_pt_setup_queue(struct intel_pt *pt,
1165 struct auxtrace_queue *queue,
1166 unsigned int queue_nr)
1168 struct intel_pt_queue *ptq = queue->priv;
1170 if (list_empty(&queue->head))
1174 ptq = intel_pt_alloc_queue(pt, queue_nr);
1179 if (queue->cpu != -1)
1180 ptq->cpu = queue->cpu;
1181 ptq->tid = queue->tid;
1183 ptq->cbr_seen = UINT_MAX;
1185 if (pt->sampling_mode && !pt->snapshot_mode &&
1186 pt->timeless_decoding)
1187 ptq->step_through_buffers = true;
1189 ptq->sync_switch = pt->sync_switch;
1191 intel_pt_setup_time_range(pt, ptq);
1194 if (!ptq->on_heap &&
1195 (!ptq->sync_switch ||
1196 ptq->switch_state != INTEL_PT_SS_EXPECTING_SWITCH_EVENT)) {
1197 const struct intel_pt_state *state;
1200 if (pt->timeless_decoding)
1203 intel_pt_log("queue %u getting timestamp\n", queue_nr);
1204 intel_pt_log("queue %u decoding cpu %d pid %d tid %d\n",
1205 queue_nr, ptq->cpu, ptq->pid, ptq->tid);
1207 if (ptq->sel_start && ptq->sel_timestamp) {
1208 ret = intel_pt_fast_forward(ptq->decoder,
1209 ptq->sel_timestamp);
1215 state = intel_pt_decode(ptq->decoder);
1217 if (state->err == INTEL_PT_ERR_NODATA) {
1218 intel_pt_log("queue %u has no timestamp\n",
1224 if (state->timestamp)
1228 ptq->timestamp = state->timestamp;
1229 intel_pt_log("queue %u timestamp 0x%" PRIx64 "\n",
1230 queue_nr, ptq->timestamp);
1232 ptq->have_sample = true;
1233 if (ptq->sel_start && ptq->sel_timestamp &&
1234 ptq->timestamp < ptq->sel_timestamp)
1235 ptq->have_sample = false;
1236 intel_pt_sample_flags(ptq);
1237 ret = auxtrace_heap__add(&pt->heap, queue_nr, ptq->timestamp);
1240 ptq->on_heap = true;
1246 static int intel_pt_setup_queues(struct intel_pt *pt)
1251 for (i = 0; i < pt->queues.nr_queues; i++) {
1252 ret = intel_pt_setup_queue(pt, &pt->queues.queue_array[i], i);
1259 static inline bool intel_pt_skip_event(struct intel_pt *pt)
1261 return pt->synth_opts.initial_skip &&
1262 pt->num_events++ < pt->synth_opts.initial_skip;
1266 * Cannot count CBR as skipped because it won't go away until cbr == cbr_seen.
1267 * Also ensure CBR is first non-skipped event by allowing for 4 more samples
1268 * from this decoder state.
1270 static inline bool intel_pt_skip_cbr_event(struct intel_pt *pt)
1272 return pt->synth_opts.initial_skip &&
1273 pt->num_events + 4 < pt->synth_opts.initial_skip;
1276 static void intel_pt_prep_a_sample(struct intel_pt_queue *ptq,
1277 union perf_event *event,
1278 struct perf_sample *sample)
1280 event->sample.header.type = PERF_RECORD_SAMPLE;
1281 event->sample.header.size = sizeof(struct perf_event_header);
1283 sample->pid = ptq->pid;
1284 sample->tid = ptq->tid;
1285 sample->cpu = ptq->cpu;
1286 sample->insn_len = ptq->insn_len;
1287 memcpy(sample->insn, ptq->insn, INTEL_PT_INSN_BUF_SZ);
1290 static void intel_pt_prep_b_sample(struct intel_pt *pt,
1291 struct intel_pt_queue *ptq,
1292 union perf_event *event,
1293 struct perf_sample *sample)
1295 intel_pt_prep_a_sample(ptq, event, sample);
1297 if (!pt->timeless_decoding)
1298 sample->time = tsc_to_perf_time(ptq->timestamp, &pt->tc);
1300 sample->ip = ptq->state->from_ip;
1301 sample->cpumode = intel_pt_cpumode(pt, sample->ip);
1302 sample->addr = ptq->state->to_ip;
1304 sample->flags = ptq->flags;
1306 event->sample.header.misc = sample->cpumode;
1309 static int intel_pt_inject_event(union perf_event *event,
1310 struct perf_sample *sample, u64 type)
1312 event->header.size = perf_event__sample_event_size(sample, type, 0);
1313 return perf_event__synthesize_sample(event, type, 0, sample);
1316 static inline int intel_pt_opt_inject(struct intel_pt *pt,
1317 union perf_event *event,
1318 struct perf_sample *sample, u64 type)
1320 if (!pt->synth_opts.inject)
1323 return intel_pt_inject_event(event, sample, type);
1326 static int intel_pt_deliver_synth_event(struct intel_pt *pt,
1327 union perf_event *event,
1328 struct perf_sample *sample, u64 type)
1332 ret = intel_pt_opt_inject(pt, event, sample, type);
1336 ret = perf_session__deliver_synth_event(pt->session, event, sample);
1338 pr_err("Intel PT: failed to deliver event, error %d\n", ret);
1343 static int intel_pt_synth_branch_sample(struct intel_pt_queue *ptq)
1345 struct intel_pt *pt = ptq->pt;
1346 union perf_event *event = ptq->event_buf;
1347 struct perf_sample sample = { .ip = 0, };
1348 struct dummy_branch_stack {
1351 struct branch_entry entries;
1354 if (pt->branches_filter && !(pt->branches_filter & ptq->flags))
1357 if (intel_pt_skip_event(pt))
1360 intel_pt_prep_b_sample(pt, ptq, event, &sample);
1362 sample.id = ptq->pt->branches_id;
1363 sample.stream_id = ptq->pt->branches_id;
1366 * perf report cannot handle events without a branch stack when using
1367 * SORT_MODE__BRANCH so make a dummy one.
1369 if (pt->synth_opts.last_branch && sort__mode == SORT_MODE__BRANCH) {
1370 dummy_bs = (struct dummy_branch_stack){
1378 sample.branch_stack = (struct branch_stack *)&dummy_bs;
1381 sample.cyc_cnt = ptq->ipc_cyc_cnt - ptq->last_br_cyc_cnt;
1382 if (sample.cyc_cnt) {
1383 sample.insn_cnt = ptq->ipc_insn_cnt - ptq->last_br_insn_cnt;
1384 ptq->last_br_insn_cnt = ptq->ipc_insn_cnt;
1385 ptq->last_br_cyc_cnt = ptq->ipc_cyc_cnt;
1388 return intel_pt_deliver_synth_event(pt, event, &sample,
1389 pt->branches_sample_type);
1392 static void intel_pt_prep_sample(struct intel_pt *pt,
1393 struct intel_pt_queue *ptq,
1394 union perf_event *event,
1395 struct perf_sample *sample)
1397 intel_pt_prep_b_sample(pt, ptq, event, sample);
1399 if (pt->synth_opts.callchain) {
1400 thread_stack__sample(ptq->thread, ptq->cpu, ptq->chain,
1401 pt->synth_opts.callchain_sz + 1,
1402 sample->ip, pt->kernel_start);
1403 sample->callchain = ptq->chain;
1406 if (pt->synth_opts.last_branch) {
1407 thread_stack__br_sample(ptq->thread, ptq->cpu, ptq->last_branch,
1409 sample->branch_stack = ptq->last_branch;
1413 static int intel_pt_synth_instruction_sample(struct intel_pt_queue *ptq)
1415 struct intel_pt *pt = ptq->pt;
1416 union perf_event *event = ptq->event_buf;
1417 struct perf_sample sample = { .ip = 0, };
1419 if (intel_pt_skip_event(pt))
1422 intel_pt_prep_sample(pt, ptq, event, &sample);
1424 sample.id = ptq->pt->instructions_id;
1425 sample.stream_id = ptq->pt->instructions_id;
1426 sample.period = ptq->state->tot_insn_cnt - ptq->last_insn_cnt;
1428 sample.cyc_cnt = ptq->ipc_cyc_cnt - ptq->last_in_cyc_cnt;
1429 if (sample.cyc_cnt) {
1430 sample.insn_cnt = ptq->ipc_insn_cnt - ptq->last_in_insn_cnt;
1431 ptq->last_in_insn_cnt = ptq->ipc_insn_cnt;
1432 ptq->last_in_cyc_cnt = ptq->ipc_cyc_cnt;
1435 ptq->last_insn_cnt = ptq->state->tot_insn_cnt;
1437 return intel_pt_deliver_synth_event(pt, event, &sample,
1438 pt->instructions_sample_type);
1441 static int intel_pt_synth_transaction_sample(struct intel_pt_queue *ptq)
1443 struct intel_pt *pt = ptq->pt;
1444 union perf_event *event = ptq->event_buf;
1445 struct perf_sample sample = { .ip = 0, };
1447 if (intel_pt_skip_event(pt))
1450 intel_pt_prep_sample(pt, ptq, event, &sample);
1452 sample.id = ptq->pt->transactions_id;
1453 sample.stream_id = ptq->pt->transactions_id;
1455 return intel_pt_deliver_synth_event(pt, event, &sample,
1456 pt->transactions_sample_type);
1459 static void intel_pt_prep_p_sample(struct intel_pt *pt,
1460 struct intel_pt_queue *ptq,
1461 union perf_event *event,
1462 struct perf_sample *sample)
1464 intel_pt_prep_sample(pt, ptq, event, sample);
1467 * Zero IP is used to mean "trace start" but that is not the case for
1468 * power or PTWRITE events with no IP, so clear the flags.
1474 static int intel_pt_synth_ptwrite_sample(struct intel_pt_queue *ptq)
1476 struct intel_pt *pt = ptq->pt;
1477 union perf_event *event = ptq->event_buf;
1478 struct perf_sample sample = { .ip = 0, };
1479 struct perf_synth_intel_ptwrite raw;
1481 if (intel_pt_skip_event(pt))
1484 intel_pt_prep_p_sample(pt, ptq, event, &sample);
1486 sample.id = ptq->pt->ptwrites_id;
1487 sample.stream_id = ptq->pt->ptwrites_id;
1490 raw.ip = !!(ptq->state->flags & INTEL_PT_FUP_IP);
1491 raw.payload = cpu_to_le64(ptq->state->ptw_payload);
1493 sample.raw_size = perf_synth__raw_size(raw);
1494 sample.raw_data = perf_synth__raw_data(&raw);
1496 return intel_pt_deliver_synth_event(pt, event, &sample,
1497 pt->ptwrites_sample_type);
1500 static int intel_pt_synth_cbr_sample(struct intel_pt_queue *ptq)
1502 struct intel_pt *pt = ptq->pt;
1503 union perf_event *event = ptq->event_buf;
1504 struct perf_sample sample = { .ip = 0, };
1505 struct perf_synth_intel_cbr raw;
1508 if (intel_pt_skip_cbr_event(pt))
1511 ptq->cbr_seen = ptq->state->cbr;
1513 intel_pt_prep_p_sample(pt, ptq, event, &sample);
1515 sample.id = ptq->pt->cbr_id;
1516 sample.stream_id = ptq->pt->cbr_id;
1518 flags = (u16)ptq->state->cbr_payload | (pt->max_non_turbo_ratio << 16);
1519 raw.flags = cpu_to_le32(flags);
1520 raw.freq = cpu_to_le32(raw.cbr * pt->cbr2khz);
1523 sample.raw_size = perf_synth__raw_size(raw);
1524 sample.raw_data = perf_synth__raw_data(&raw);
1526 return intel_pt_deliver_synth_event(pt, event, &sample,
1527 pt->pwr_events_sample_type);
1530 static int intel_pt_synth_mwait_sample(struct intel_pt_queue *ptq)
1532 struct intel_pt *pt = ptq->pt;
1533 union perf_event *event = ptq->event_buf;
1534 struct perf_sample sample = { .ip = 0, };
1535 struct perf_synth_intel_mwait raw;
1537 if (intel_pt_skip_event(pt))
1540 intel_pt_prep_p_sample(pt, ptq, event, &sample);
1542 sample.id = ptq->pt->mwait_id;
1543 sample.stream_id = ptq->pt->mwait_id;
1546 raw.payload = cpu_to_le64(ptq->state->mwait_payload);
1548 sample.raw_size = perf_synth__raw_size(raw);
1549 sample.raw_data = perf_synth__raw_data(&raw);
1551 return intel_pt_deliver_synth_event(pt, event, &sample,
1552 pt->pwr_events_sample_type);
1555 static int intel_pt_synth_pwre_sample(struct intel_pt_queue *ptq)
1557 struct intel_pt *pt = ptq->pt;
1558 union perf_event *event = ptq->event_buf;
1559 struct perf_sample sample = { .ip = 0, };
1560 struct perf_synth_intel_pwre raw;
1562 if (intel_pt_skip_event(pt))
1565 intel_pt_prep_p_sample(pt, ptq, event, &sample);
1567 sample.id = ptq->pt->pwre_id;
1568 sample.stream_id = ptq->pt->pwre_id;
1571 raw.payload = cpu_to_le64(ptq->state->pwre_payload);
1573 sample.raw_size = perf_synth__raw_size(raw);
1574 sample.raw_data = perf_synth__raw_data(&raw);
1576 return intel_pt_deliver_synth_event(pt, event, &sample,
1577 pt->pwr_events_sample_type);
1580 static int intel_pt_synth_exstop_sample(struct intel_pt_queue *ptq)
1582 struct intel_pt *pt = ptq->pt;
1583 union perf_event *event = ptq->event_buf;
1584 struct perf_sample sample = { .ip = 0, };
1585 struct perf_synth_intel_exstop raw;
1587 if (intel_pt_skip_event(pt))
1590 intel_pt_prep_p_sample(pt, ptq, event, &sample);
1592 sample.id = ptq->pt->exstop_id;
1593 sample.stream_id = ptq->pt->exstop_id;
1596 raw.ip = !!(ptq->state->flags & INTEL_PT_FUP_IP);
1598 sample.raw_size = perf_synth__raw_size(raw);
1599 sample.raw_data = perf_synth__raw_data(&raw);
1601 return intel_pt_deliver_synth_event(pt, event, &sample,
1602 pt->pwr_events_sample_type);
1605 static int intel_pt_synth_pwrx_sample(struct intel_pt_queue *ptq)
1607 struct intel_pt *pt = ptq->pt;
1608 union perf_event *event = ptq->event_buf;
1609 struct perf_sample sample = { .ip = 0, };
1610 struct perf_synth_intel_pwrx raw;
1612 if (intel_pt_skip_event(pt))
1615 intel_pt_prep_p_sample(pt, ptq, event, &sample);
1617 sample.id = ptq->pt->pwrx_id;
1618 sample.stream_id = ptq->pt->pwrx_id;
1621 raw.payload = cpu_to_le64(ptq->state->pwrx_payload);
1623 sample.raw_size = perf_synth__raw_size(raw);
1624 sample.raw_data = perf_synth__raw_data(&raw);
1626 return intel_pt_deliver_synth_event(pt, event, &sample,
1627 pt->pwr_events_sample_type);
1631 * PEBS gp_regs array indexes plus 1 so that 0 means not present. Refer
1632 * intel_pt_add_gp_regs().
1634 static const int pebs_gp_regs[] = {
1635 [PERF_REG_X86_FLAGS] = 1,
1636 [PERF_REG_X86_IP] = 2,
1637 [PERF_REG_X86_AX] = 3,
1638 [PERF_REG_X86_CX] = 4,
1639 [PERF_REG_X86_DX] = 5,
1640 [PERF_REG_X86_BX] = 6,
1641 [PERF_REG_X86_SP] = 7,
1642 [PERF_REG_X86_BP] = 8,
1643 [PERF_REG_X86_SI] = 9,
1644 [PERF_REG_X86_DI] = 10,
1645 [PERF_REG_X86_R8] = 11,
1646 [PERF_REG_X86_R9] = 12,
1647 [PERF_REG_X86_R10] = 13,
1648 [PERF_REG_X86_R11] = 14,
1649 [PERF_REG_X86_R12] = 15,
1650 [PERF_REG_X86_R13] = 16,
1651 [PERF_REG_X86_R14] = 17,
1652 [PERF_REG_X86_R15] = 18,
1655 static u64 *intel_pt_add_gp_regs(struct regs_dump *intr_regs, u64 *pos,
1656 const struct intel_pt_blk_items *items,
1659 const u64 *gp_regs = items->val[INTEL_PT_GP_REGS_POS];
1660 u32 mask = items->mask[INTEL_PT_GP_REGS_POS];
1664 for (i = 0, bit = 1; i < PERF_REG_X86_64_MAX; i++, bit <<= 1) {
1665 /* Get the PEBS gp_regs array index */
1666 int n = pebs_gp_regs[i] - 1;
1671 * Add only registers that were requested (i.e. 'regs_mask') and
1672 * that were provided (i.e. 'mask'), and update the resulting
1673 * mask (i.e. 'intr_regs->mask') accordingly.
1675 if (mask & 1 << n && regs_mask & bit) {
1676 intr_regs->mask |= bit;
1677 *pos++ = gp_regs[n];
1684 #ifndef PERF_REG_X86_XMM0
1685 #define PERF_REG_X86_XMM0 32
1688 static void intel_pt_add_xmm(struct regs_dump *intr_regs, u64 *pos,
1689 const struct intel_pt_blk_items *items,
1692 u32 mask = items->has_xmm & (regs_mask >> PERF_REG_X86_XMM0);
1693 const u64 *xmm = items->xmm;
1696 * If there are any XMM registers, then there should be all of them.
1697 * Nevertheless, follow the logic to add only registers that were
1698 * requested (i.e. 'regs_mask') and that were provided (i.e. 'mask'),
1699 * and update the resulting mask (i.e. 'intr_regs->mask') accordingly.
1701 intr_regs->mask |= (u64)mask << PERF_REG_X86_XMM0;
1703 for (; mask; mask >>= 1, xmm++) {
1709 #define LBR_INFO_MISPRED (1ULL << 63)
1710 #define LBR_INFO_IN_TX (1ULL << 62)
1711 #define LBR_INFO_ABORT (1ULL << 61)
1712 #define LBR_INFO_CYCLES 0xffff
1714 /* Refer kernel's intel_pmu_store_pebs_lbrs() */
1715 static u64 intel_pt_lbr_flags(u64 info)
1718 struct branch_flags flags;
1723 u.flags.mispred = !!(info & LBR_INFO_MISPRED);
1724 u.flags.predicted = !(info & LBR_INFO_MISPRED);
1725 u.flags.in_tx = !!(info & LBR_INFO_IN_TX);
1726 u.flags.abort = !!(info & LBR_INFO_ABORT);
1727 u.flags.cycles = info & LBR_INFO_CYCLES;
1732 static void intel_pt_add_lbrs(struct branch_stack *br_stack,
1733 const struct intel_pt_blk_items *items)
1740 to = &br_stack->entries[0].from;
1742 for (i = INTEL_PT_LBR_0_POS; i <= INTEL_PT_LBR_2_POS; i++) {
1743 u32 mask = items->mask[i];
1744 const u64 *from = items->val[i];
1746 for (; mask; mask >>= 3, from += 3) {
1747 if ((mask & 7) == 7) {
1750 *to++ = intel_pt_lbr_flags(from[2]);
1757 static int intel_pt_synth_pebs_sample(struct intel_pt_queue *ptq)
1759 const struct intel_pt_blk_items *items = &ptq->state->items;
1760 struct perf_sample sample = { .ip = 0, };
1761 union perf_event *event = ptq->event_buf;
1762 struct intel_pt *pt = ptq->pt;
1763 struct evsel *evsel = pt->pebs_evsel;
1764 u64 sample_type = evsel->core.attr.sample_type;
1765 u64 id = evsel->core.id[0];
1767 u64 regs[8 * sizeof(sample.intr_regs.mask)];
1769 if (intel_pt_skip_event(pt))
1772 intel_pt_prep_a_sample(ptq, event, &sample);
1775 sample.stream_id = id;
1777 if (!evsel->core.attr.freq)
1778 sample.period = evsel->core.attr.sample_period;
1780 /* No support for non-zero CS base */
1782 sample.ip = items->ip;
1783 else if (items->has_rip)
1784 sample.ip = items->rip;
1786 sample.ip = ptq->state->from_ip;
1788 /* No support for guest mode at this time */
1789 cpumode = sample.ip < ptq->pt->kernel_start ?
1790 PERF_RECORD_MISC_USER :
1791 PERF_RECORD_MISC_KERNEL;
1793 event->sample.header.misc = cpumode | PERF_RECORD_MISC_EXACT_IP;
1795 sample.cpumode = cpumode;
1797 if (sample_type & PERF_SAMPLE_TIME) {
1800 if (items->has_timestamp)
1801 timestamp = items->timestamp;
1802 else if (!pt->timeless_decoding)
1803 timestamp = ptq->timestamp;
1805 sample.time = tsc_to_perf_time(timestamp, &pt->tc);
1808 if (sample_type & PERF_SAMPLE_CALLCHAIN &&
1809 pt->synth_opts.callchain) {
1810 thread_stack__sample(ptq->thread, ptq->cpu, ptq->chain,
1811 pt->synth_opts.callchain_sz, sample.ip,
1813 sample.callchain = ptq->chain;
1816 if (sample_type & PERF_SAMPLE_REGS_INTR &&
1817 (items->mask[INTEL_PT_GP_REGS_POS] ||
1818 items->mask[INTEL_PT_XMM_POS])) {
1819 u64 regs_mask = evsel->core.attr.sample_regs_intr;
1822 sample.intr_regs.abi = items->is_32_bit ?
1823 PERF_SAMPLE_REGS_ABI_32 :
1824 PERF_SAMPLE_REGS_ABI_64;
1825 sample.intr_regs.regs = regs;
1827 pos = intel_pt_add_gp_regs(&sample.intr_regs, regs, items, regs_mask);
1829 intel_pt_add_xmm(&sample.intr_regs, pos, items, regs_mask);
1832 if (sample_type & PERF_SAMPLE_BRANCH_STACK) {
1833 if (items->mask[INTEL_PT_LBR_0_POS] ||
1834 items->mask[INTEL_PT_LBR_1_POS] ||
1835 items->mask[INTEL_PT_LBR_2_POS]) {
1836 intel_pt_add_lbrs(ptq->last_branch, items);
1837 } else if (pt->synth_opts.last_branch) {
1838 thread_stack__br_sample(ptq->thread, ptq->cpu,
1842 ptq->last_branch->nr = 0;
1844 sample.branch_stack = ptq->last_branch;
1847 if (sample_type & PERF_SAMPLE_ADDR && items->has_mem_access_address)
1848 sample.addr = items->mem_access_address;
1850 if (sample_type & PERF_SAMPLE_WEIGHT) {
1852 * Refer kernel's setup_pebs_adaptive_sample_data() and
1853 * intel_hsw_weight().
1855 if (items->has_mem_access_latency)
1856 sample.weight = items->mem_access_latency;
1857 if (!sample.weight && items->has_tsx_aux_info) {
1858 /* Cycles last block */
1859 sample.weight = (u32)items->tsx_aux_info;
1863 if (sample_type & PERF_SAMPLE_TRANSACTION && items->has_tsx_aux_info) {
1864 u64 ax = items->has_rax ? items->rax : 0;
1865 /* Refer kernel's intel_hsw_transaction() */
1866 u64 txn = (u8)(items->tsx_aux_info >> 32);
1868 /* For RTM XABORTs also log the abort code from AX */
1869 if (txn & PERF_TXN_TRANSACTION && ax & 1)
1870 txn |= ((ax >> 24) & 0xff) << PERF_TXN_ABORT_SHIFT;
1871 sample.transaction = txn;
1874 return intel_pt_deliver_synth_event(pt, event, &sample, sample_type);
1877 static int intel_pt_synth_error(struct intel_pt *pt, int code, int cpu,
1878 pid_t pid, pid_t tid, u64 ip, u64 timestamp)
1880 union perf_event event;
1881 char msg[MAX_AUXTRACE_ERROR_MSG];
1884 if (pt->synth_opts.error_minus_flags) {
1885 if (code == INTEL_PT_ERR_OVR &&
1886 pt->synth_opts.error_minus_flags & AUXTRACE_ERR_FLG_OVERFLOW)
1888 if (code == INTEL_PT_ERR_LOST &&
1889 pt->synth_opts.error_minus_flags & AUXTRACE_ERR_FLG_DATA_LOST)
1893 intel_pt__strerror(code, msg, MAX_AUXTRACE_ERROR_MSG);
1895 auxtrace_synth_error(&event.auxtrace_error, PERF_AUXTRACE_ERROR_ITRACE,
1896 code, cpu, pid, tid, ip, msg, timestamp);
1898 err = perf_session__deliver_synth_event(pt->session, &event, NULL);
1900 pr_err("Intel Processor Trace: failed to deliver error event, error %d\n",
1906 static int intel_ptq_synth_error(struct intel_pt_queue *ptq,
1907 const struct intel_pt_state *state)
1909 struct intel_pt *pt = ptq->pt;
1910 u64 tm = ptq->timestamp;
1912 tm = pt->timeless_decoding ? 0 : tsc_to_perf_time(tm, &pt->tc);
1914 return intel_pt_synth_error(pt, state->err, ptq->cpu, ptq->pid,
1915 ptq->tid, state->from_ip, tm);
1918 static int intel_pt_next_tid(struct intel_pt *pt, struct intel_pt_queue *ptq)
1920 struct auxtrace_queue *queue;
1921 pid_t tid = ptq->next_tid;
1927 intel_pt_log("switch: cpu %d tid %d\n", ptq->cpu, tid);
1929 err = machine__set_current_tid(pt->machine, ptq->cpu, -1, tid);
1931 queue = &pt->queues.queue_array[ptq->queue_nr];
1932 intel_pt_set_pid_tid_cpu(pt, queue);
1939 static inline bool intel_pt_is_switch_ip(struct intel_pt_queue *ptq, u64 ip)
1941 struct intel_pt *pt = ptq->pt;
1943 return ip == pt->switch_ip &&
1944 (ptq->flags & PERF_IP_FLAG_BRANCH) &&
1945 !(ptq->flags & (PERF_IP_FLAG_CONDITIONAL | PERF_IP_FLAG_ASYNC |
1946 PERF_IP_FLAG_INTERRUPT | PERF_IP_FLAG_TX_ABORT));
1949 #define INTEL_PT_PWR_EVT (INTEL_PT_MWAIT_OP | INTEL_PT_PWR_ENTRY | \
1950 INTEL_PT_EX_STOP | INTEL_PT_PWR_EXIT)
1952 static int intel_pt_sample(struct intel_pt_queue *ptq)
1954 const struct intel_pt_state *state = ptq->state;
1955 struct intel_pt *pt = ptq->pt;
1958 if (!ptq->have_sample)
1961 ptq->have_sample = false;
1963 if (ptq->state->tot_cyc_cnt > ptq->ipc_cyc_cnt) {
1965 * Cycle count and instruction count only go together to create
1966 * a valid IPC ratio when the cycle count changes.
1968 ptq->ipc_insn_cnt = ptq->state->tot_insn_cnt;
1969 ptq->ipc_cyc_cnt = ptq->state->tot_cyc_cnt;
1973 * Do PEBS first to allow for the possibility that the PEBS timestamp
1974 * precedes the current timestamp.
1976 if (pt->sample_pebs && state->type & INTEL_PT_BLK_ITEMS) {
1977 err = intel_pt_synth_pebs_sample(ptq);
1982 if (pt->sample_pwr_events) {
1983 if (ptq->state->cbr != ptq->cbr_seen) {
1984 err = intel_pt_synth_cbr_sample(ptq);
1988 if (state->type & INTEL_PT_PWR_EVT) {
1989 if (state->type & INTEL_PT_MWAIT_OP) {
1990 err = intel_pt_synth_mwait_sample(ptq);
1994 if (state->type & INTEL_PT_PWR_ENTRY) {
1995 err = intel_pt_synth_pwre_sample(ptq);
1999 if (state->type & INTEL_PT_EX_STOP) {
2000 err = intel_pt_synth_exstop_sample(ptq);
2004 if (state->type & INTEL_PT_PWR_EXIT) {
2005 err = intel_pt_synth_pwrx_sample(ptq);
2012 if (pt->sample_instructions && (state->type & INTEL_PT_INSTRUCTION)) {
2013 err = intel_pt_synth_instruction_sample(ptq);
2018 if (pt->sample_transactions && (state->type & INTEL_PT_TRANSACTION)) {
2019 err = intel_pt_synth_transaction_sample(ptq);
2024 if (pt->sample_ptwrites && (state->type & INTEL_PT_PTW)) {
2025 err = intel_pt_synth_ptwrite_sample(ptq);
2030 if (!(state->type & INTEL_PT_BRANCH))
2033 if (pt->use_thread_stack) {
2034 thread_stack__event(ptq->thread, ptq->cpu, ptq->flags,
2035 state->from_ip, state->to_ip, ptq->insn_len,
2036 state->trace_nr, pt->callstack,
2037 pt->br_stack_sz_plus,
2040 thread_stack__set_trace_nr(ptq->thread, ptq->cpu, state->trace_nr);
2043 if (pt->sample_branches) {
2044 err = intel_pt_synth_branch_sample(ptq);
2049 if (!ptq->sync_switch)
2052 if (intel_pt_is_switch_ip(ptq, state->to_ip)) {
2053 switch (ptq->switch_state) {
2054 case INTEL_PT_SS_NOT_TRACING:
2055 case INTEL_PT_SS_UNKNOWN:
2056 case INTEL_PT_SS_EXPECTING_SWITCH_IP:
2057 err = intel_pt_next_tid(pt, ptq);
2060 ptq->switch_state = INTEL_PT_SS_TRACING;
2063 ptq->switch_state = INTEL_PT_SS_EXPECTING_SWITCH_EVENT;
2066 } else if (!state->to_ip) {
2067 ptq->switch_state = INTEL_PT_SS_NOT_TRACING;
2068 } else if (ptq->switch_state == INTEL_PT_SS_NOT_TRACING) {
2069 ptq->switch_state = INTEL_PT_SS_UNKNOWN;
2070 } else if (ptq->switch_state == INTEL_PT_SS_UNKNOWN &&
2071 state->to_ip == pt->ptss_ip &&
2072 (ptq->flags & PERF_IP_FLAG_CALL)) {
2073 ptq->switch_state = INTEL_PT_SS_TRACING;
2079 static u64 intel_pt_switch_ip(struct intel_pt *pt, u64 *ptss_ip)
2081 struct machine *machine = pt->machine;
2083 struct symbol *sym, *start;
2084 u64 ip, switch_ip = 0;
2090 map = machine__kernel_map(machine);
2097 start = dso__first_symbol(map->dso);
2099 for (sym = start; sym; sym = dso__next_symbol(sym)) {
2100 if (sym->binding == STB_GLOBAL &&
2101 !strcmp(sym->name, "__switch_to")) {
2102 ip = map->unmap_ip(map, sym->start);
2103 if (ip >= map->start && ip < map->end) {
2110 if (!switch_ip || !ptss_ip)
2113 if (pt->have_sched_switch == 1)
2114 ptss = "perf_trace_sched_switch";
2116 ptss = "__perf_event_task_sched_out";
2118 for (sym = start; sym; sym = dso__next_symbol(sym)) {
2119 if (!strcmp(sym->name, ptss)) {
2120 ip = map->unmap_ip(map, sym->start);
2121 if (ip >= map->start && ip < map->end) {
2131 static void intel_pt_enable_sync_switch(struct intel_pt *pt)
2135 pt->sync_switch = true;
2137 for (i = 0; i < pt->queues.nr_queues; i++) {
2138 struct auxtrace_queue *queue = &pt->queues.queue_array[i];
2139 struct intel_pt_queue *ptq = queue->priv;
2142 ptq->sync_switch = true;
2147 * To filter against time ranges, it is only necessary to look at the next start
2150 static bool intel_pt_next_time(struct intel_pt_queue *ptq)
2152 struct intel_pt *pt = ptq->pt;
2154 if (ptq->sel_start) {
2155 /* Next time is an end time */
2156 ptq->sel_start = false;
2157 ptq->sel_timestamp = pt->time_ranges[ptq->sel_idx].end;
2159 } else if (ptq->sel_idx + 1 < pt->range_cnt) {
2160 /* Next time is a start time */
2161 ptq->sel_start = true;
2163 ptq->sel_timestamp = pt->time_ranges[ptq->sel_idx].start;
2171 static int intel_pt_time_filter(struct intel_pt_queue *ptq, u64 *ff_timestamp)
2176 if (ptq->sel_start) {
2177 if (ptq->timestamp >= ptq->sel_timestamp) {
2178 /* After start time, so consider next time */
2179 intel_pt_next_time(ptq);
2180 if (!ptq->sel_timestamp) {
2184 /* Check against end time */
2187 /* Before start time, so fast forward */
2188 ptq->have_sample = false;
2189 if (ptq->sel_timestamp > *ff_timestamp) {
2190 if (ptq->sync_switch) {
2191 intel_pt_next_tid(ptq->pt, ptq);
2192 ptq->switch_state = INTEL_PT_SS_UNKNOWN;
2194 *ff_timestamp = ptq->sel_timestamp;
2195 err = intel_pt_fast_forward(ptq->decoder,
2196 ptq->sel_timestamp);
2201 } else if (ptq->timestamp > ptq->sel_timestamp) {
2202 /* After end time, so consider next time */
2203 if (!intel_pt_next_time(ptq)) {
2204 /* No next time range, so stop decoding */
2205 ptq->have_sample = false;
2206 ptq->switch_state = INTEL_PT_SS_NOT_TRACING;
2209 /* Check against next start time */
2212 /* Before end time */
2218 static int intel_pt_run_decoder(struct intel_pt_queue *ptq, u64 *timestamp)
2220 const struct intel_pt_state *state = ptq->state;
2221 struct intel_pt *pt = ptq->pt;
2222 u64 ff_timestamp = 0;
2225 if (!pt->kernel_start) {
2226 pt->kernel_start = machine__kernel_start(pt->machine);
2227 if (pt->per_cpu_mmaps &&
2228 (pt->have_sched_switch == 1 || pt->have_sched_switch == 3) &&
2229 !pt->timeless_decoding && intel_pt_tracing_kernel(pt) &&
2230 !pt->sampling_mode) {
2231 pt->switch_ip = intel_pt_switch_ip(pt, &pt->ptss_ip);
2232 if (pt->switch_ip) {
2233 intel_pt_log("switch_ip: %"PRIx64" ptss_ip: %"PRIx64"\n",
2234 pt->switch_ip, pt->ptss_ip);
2235 intel_pt_enable_sync_switch(pt);
2240 intel_pt_log("queue %u decoding cpu %d pid %d tid %d\n",
2241 ptq->queue_nr, ptq->cpu, ptq->pid, ptq->tid);
2243 err = intel_pt_sample(ptq);
2247 state = intel_pt_decode(ptq->decoder);
2249 if (state->err == INTEL_PT_ERR_NODATA)
2251 if (ptq->sync_switch &&
2252 state->from_ip >= pt->kernel_start) {
2253 ptq->sync_switch = false;
2254 intel_pt_next_tid(pt, ptq);
2256 if (pt->synth_opts.errors) {
2257 err = intel_ptq_synth_error(ptq, state);
2265 ptq->have_sample = true;
2266 intel_pt_sample_flags(ptq);
2268 /* Use estimated TSC upon return to user space */
2270 (state->from_ip >= pt->kernel_start || !state->from_ip) &&
2271 state->to_ip && state->to_ip < pt->kernel_start) {
2272 intel_pt_log("TSC %"PRIx64" est. TSC %"PRIx64"\n",
2273 state->timestamp, state->est_timestamp);
2274 ptq->timestamp = state->est_timestamp;
2275 /* Use estimated TSC in unknown switch state */
2276 } else if (ptq->sync_switch &&
2277 ptq->switch_state == INTEL_PT_SS_UNKNOWN &&
2278 intel_pt_is_switch_ip(ptq, state->to_ip) &&
2279 ptq->next_tid == -1) {
2280 intel_pt_log("TSC %"PRIx64" est. TSC %"PRIx64"\n",
2281 state->timestamp, state->est_timestamp);
2282 ptq->timestamp = state->est_timestamp;
2283 } else if (state->timestamp > ptq->timestamp) {
2284 ptq->timestamp = state->timestamp;
2287 if (ptq->sel_timestamp) {
2288 err = intel_pt_time_filter(ptq, &ff_timestamp);
2293 if (!pt->timeless_decoding && ptq->timestamp >= *timestamp) {
2294 *timestamp = ptq->timestamp;
2301 static inline int intel_pt_update_queues(struct intel_pt *pt)
2303 if (pt->queues.new_data) {
2304 pt->queues.new_data = false;
2305 return intel_pt_setup_queues(pt);
2310 static int intel_pt_process_queues(struct intel_pt *pt, u64 timestamp)
2312 unsigned int queue_nr;
2317 struct auxtrace_queue *queue;
2318 struct intel_pt_queue *ptq;
2320 if (!pt->heap.heap_cnt)
2323 if (pt->heap.heap_array[0].ordinal >= timestamp)
2326 queue_nr = pt->heap.heap_array[0].queue_nr;
2327 queue = &pt->queues.queue_array[queue_nr];
2330 intel_pt_log("queue %u processing 0x%" PRIx64 " to 0x%" PRIx64 "\n",
2331 queue_nr, pt->heap.heap_array[0].ordinal,
2334 auxtrace_heap__pop(&pt->heap);
2336 if (pt->heap.heap_cnt) {
2337 ts = pt->heap.heap_array[0].ordinal + 1;
2344 intel_pt_set_pid_tid_cpu(pt, queue);
2346 ret = intel_pt_run_decoder(ptq, &ts);
2349 auxtrace_heap__add(&pt->heap, queue_nr, ts);
2354 ret = auxtrace_heap__add(&pt->heap, queue_nr, ts);
2358 ptq->on_heap = false;
2365 static int intel_pt_process_timeless_queues(struct intel_pt *pt, pid_t tid,
2368 struct auxtrace_queues *queues = &pt->queues;
2372 for (i = 0; i < queues->nr_queues; i++) {
2373 struct auxtrace_queue *queue = &pt->queues.queue_array[i];
2374 struct intel_pt_queue *ptq = queue->priv;
2376 if (ptq && (tid == -1 || ptq->tid == tid)) {
2378 intel_pt_set_pid_tid_cpu(pt, queue);
2379 intel_pt_run_decoder(ptq, &ts);
2385 static void intel_pt_sample_set_pid_tid_cpu(struct intel_pt_queue *ptq,
2386 struct auxtrace_queue *queue,
2387 struct perf_sample *sample)
2389 struct machine *m = ptq->pt->machine;
2391 ptq->pid = sample->pid;
2392 ptq->tid = sample->tid;
2393 ptq->cpu = queue->cpu;
2395 intel_pt_log("queue %u cpu %d pid %d tid %d\n",
2396 ptq->queue_nr, ptq->cpu, ptq->pid, ptq->tid);
2398 thread__zput(ptq->thread);
2403 if (ptq->pid == -1) {
2404 ptq->thread = machine__find_thread(m, -1, ptq->tid);
2406 ptq->pid = ptq->thread->pid_;
2410 ptq->thread = machine__findnew_thread(m, ptq->pid, ptq->tid);
2413 static int intel_pt_process_timeless_sample(struct intel_pt *pt,
2414 struct perf_sample *sample)
2416 struct auxtrace_queue *queue;
2417 struct intel_pt_queue *ptq;
2420 queue = auxtrace_queues__sample_queue(&pt->queues, sample, pt->session);
2429 ptq->time = sample->time;
2430 intel_pt_sample_set_pid_tid_cpu(ptq, queue, sample);
2431 intel_pt_run_decoder(ptq, &ts);
2435 static int intel_pt_lost(struct intel_pt *pt, struct perf_sample *sample)
2437 return intel_pt_synth_error(pt, INTEL_PT_ERR_LOST, sample->cpu,
2438 sample->pid, sample->tid, 0, sample->time);
2441 static struct intel_pt_queue *intel_pt_cpu_to_ptq(struct intel_pt *pt, int cpu)
2445 if (cpu < 0 || !pt->queues.nr_queues)
2448 if ((unsigned)cpu >= pt->queues.nr_queues)
2449 i = pt->queues.nr_queues - 1;
2453 if (pt->queues.queue_array[i].cpu == cpu)
2454 return pt->queues.queue_array[i].priv;
2456 for (j = 0; i > 0; j++) {
2457 if (pt->queues.queue_array[--i].cpu == cpu)
2458 return pt->queues.queue_array[i].priv;
2461 for (; j < pt->queues.nr_queues; j++) {
2462 if (pt->queues.queue_array[j].cpu == cpu)
2463 return pt->queues.queue_array[j].priv;
2469 static int intel_pt_sync_switch(struct intel_pt *pt, int cpu, pid_t tid,
2472 struct intel_pt_queue *ptq;
2475 if (!pt->sync_switch)
2478 ptq = intel_pt_cpu_to_ptq(pt, cpu);
2479 if (!ptq || !ptq->sync_switch)
2482 switch (ptq->switch_state) {
2483 case INTEL_PT_SS_NOT_TRACING:
2485 case INTEL_PT_SS_UNKNOWN:
2486 case INTEL_PT_SS_TRACING:
2487 ptq->next_tid = tid;
2488 ptq->switch_state = INTEL_PT_SS_EXPECTING_SWITCH_IP;
2490 case INTEL_PT_SS_EXPECTING_SWITCH_EVENT:
2491 if (!ptq->on_heap) {
2492 ptq->timestamp = perf_time_to_tsc(timestamp,
2494 err = auxtrace_heap__add(&pt->heap, ptq->queue_nr,
2498 ptq->on_heap = true;
2500 ptq->switch_state = INTEL_PT_SS_TRACING;
2502 case INTEL_PT_SS_EXPECTING_SWITCH_IP:
2503 intel_pt_log("ERROR: cpu %d expecting switch ip\n", cpu);
2514 static int intel_pt_process_switch(struct intel_pt *pt,
2515 struct perf_sample *sample)
2517 struct evsel *evsel;
2521 evsel = perf_evlist__id2evsel(pt->session->evlist, sample->id);
2522 if (evsel != pt->switch_evsel)
2525 tid = evsel__intval(evsel, sample, "next_pid");
2528 intel_pt_log("sched_switch: cpu %d tid %d time %"PRIu64" tsc %#"PRIx64"\n",
2529 cpu, tid, sample->time, perf_time_to_tsc(sample->time,
2532 ret = intel_pt_sync_switch(pt, cpu, tid, sample->time);
2536 return machine__set_current_tid(pt->machine, cpu, -1, tid);
2539 static int intel_pt_context_switch_in(struct intel_pt *pt,
2540 struct perf_sample *sample)
2542 pid_t pid = sample->pid;
2543 pid_t tid = sample->tid;
2544 int cpu = sample->cpu;
2546 if (pt->sync_switch) {
2547 struct intel_pt_queue *ptq;
2549 ptq = intel_pt_cpu_to_ptq(pt, cpu);
2550 if (ptq && ptq->sync_switch) {
2552 switch (ptq->switch_state) {
2553 case INTEL_PT_SS_NOT_TRACING:
2554 case INTEL_PT_SS_UNKNOWN:
2555 case INTEL_PT_SS_TRACING:
2557 case INTEL_PT_SS_EXPECTING_SWITCH_EVENT:
2558 case INTEL_PT_SS_EXPECTING_SWITCH_IP:
2559 ptq->switch_state = INTEL_PT_SS_TRACING;
2568 * If the current tid has not been updated yet, ensure it is now that
2569 * a "switch in" event has occurred.
2571 if (machine__get_current_tid(pt->machine, cpu) == tid)
2574 return machine__set_current_tid(pt->machine, cpu, pid, tid);
2577 static int intel_pt_context_switch(struct intel_pt *pt, union perf_event *event,
2578 struct perf_sample *sample)
2580 bool out = event->header.misc & PERF_RECORD_MISC_SWITCH_OUT;
2586 if (pt->have_sched_switch == 3) {
2588 return intel_pt_context_switch_in(pt, sample);
2589 if (event->header.type != PERF_RECORD_SWITCH_CPU_WIDE) {
2590 pr_err("Expecting CPU-wide context switch event\n");
2593 pid = event->context_switch.next_prev_pid;
2594 tid = event->context_switch.next_prev_tid;
2603 pr_err("context_switch event has no tid\n");
2607 ret = intel_pt_sync_switch(pt, cpu, tid, sample->time);
2611 return machine__set_current_tid(pt->machine, cpu, pid, tid);
2614 static int intel_pt_process_itrace_start(struct intel_pt *pt,
2615 union perf_event *event,
2616 struct perf_sample *sample)
2618 if (!pt->per_cpu_mmaps)
2621 intel_pt_log("itrace_start: cpu %d pid %d tid %d time %"PRIu64" tsc %#"PRIx64"\n",
2622 sample->cpu, event->itrace_start.pid,
2623 event->itrace_start.tid, sample->time,
2624 perf_time_to_tsc(sample->time, &pt->tc));
2626 return machine__set_current_tid(pt->machine, sample->cpu,
2627 event->itrace_start.pid,
2628 event->itrace_start.tid);
2631 static int intel_pt_find_map(struct thread *thread, u8 cpumode, u64 addr,
2632 struct addr_location *al)
2634 if (!al->map || addr < al->map->start || addr >= al->map->end) {
2635 if (!thread__find_map(thread, cpumode, addr, al))
2642 /* Invalidate all instruction cache entries that overlap the text poke */
2643 static int intel_pt_text_poke(struct intel_pt *pt, union perf_event *event)
2645 u8 cpumode = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK;
2646 u64 addr = event->text_poke.addr + event->text_poke.new_len - 1;
2647 /* Assume text poke begins in a basic block no more than 4096 bytes */
2648 int cnt = 4096 + event->text_poke.new_len;
2649 struct thread *thread = pt->unknown_thread;
2650 struct addr_location al = { .map = NULL };
2651 struct machine *machine = pt->machine;
2652 struct intel_pt_cache_entry *e;
2655 if (!event->text_poke.new_len)
2658 for (; cnt; cnt--, addr--) {
2659 if (intel_pt_find_map(thread, cpumode, addr, &al)) {
2660 if (addr < event->text_poke.addr)
2665 if (!al.map->dso || !al.map->dso->auxtrace_cache)
2668 offset = al.map->map_ip(al.map, addr);
2670 e = intel_pt_cache_lookup(al.map->dso, machine, offset);
2674 if (addr + e->byte_cnt + e->length <= event->text_poke.addr) {
2676 * No overlap. Working backwards there cannot be another
2677 * basic block that overlaps the text poke if there is a
2678 * branch instruction before the text poke address.
2680 if (e->branch != INTEL_PT_BR_NO_BRANCH)
2683 intel_pt_cache_invalidate(al.map->dso, machine, offset);
2684 intel_pt_log("Invalidated instruction cache for %s at %#"PRIx64"\n",
2685 al.map->dso->long_name, addr);
2692 static int intel_pt_process_event(struct perf_session *session,
2693 union perf_event *event,
2694 struct perf_sample *sample,
2695 struct perf_tool *tool)
2697 struct intel_pt *pt = container_of(session->auxtrace, struct intel_pt,
2705 if (!tool->ordered_events) {
2706 pr_err("Intel Processor Trace requires ordered events\n");
2710 if (sample->time && sample->time != (u64)-1)
2711 timestamp = perf_time_to_tsc(sample->time, &pt->tc);
2715 if (timestamp || pt->timeless_decoding) {
2716 err = intel_pt_update_queues(pt);
2721 if (pt->timeless_decoding) {
2722 if (pt->sampling_mode) {
2723 if (sample->aux_sample.size)
2724 err = intel_pt_process_timeless_sample(pt,
2726 } else if (event->header.type == PERF_RECORD_EXIT) {
2727 err = intel_pt_process_timeless_queues(pt,
2731 } else if (timestamp) {
2732 err = intel_pt_process_queues(pt, timestamp);
2737 if (event->header.type == PERF_RECORD_SAMPLE) {
2738 if (pt->synth_opts.add_callchain && !sample->callchain)
2739 intel_pt_add_callchain(pt, sample);
2740 if (pt->synth_opts.add_last_branch && !sample->branch_stack)
2741 intel_pt_add_br_stack(pt, sample);
2744 if (event->header.type == PERF_RECORD_AUX &&
2745 (event->aux.flags & PERF_AUX_FLAG_TRUNCATED) &&
2746 pt->synth_opts.errors) {
2747 err = intel_pt_lost(pt, sample);
2752 if (pt->switch_evsel && event->header.type == PERF_RECORD_SAMPLE)
2753 err = intel_pt_process_switch(pt, sample);
2754 else if (event->header.type == PERF_RECORD_ITRACE_START)
2755 err = intel_pt_process_itrace_start(pt, event, sample);
2756 else if (event->header.type == PERF_RECORD_SWITCH ||
2757 event->header.type == PERF_RECORD_SWITCH_CPU_WIDE)
2758 err = intel_pt_context_switch(pt, event, sample);
2760 if (!err && event->header.type == PERF_RECORD_TEXT_POKE)
2761 err = intel_pt_text_poke(pt, event);
2763 if (intel_pt_enable_logging && intel_pt_log_events(pt, sample->time)) {
2764 intel_pt_log("event %u: cpu %d time %"PRIu64" tsc %#"PRIx64" ",
2765 event->header.type, sample->cpu, sample->time, timestamp);
2766 intel_pt_log_event(event);
2772 static int intel_pt_flush(struct perf_session *session, struct perf_tool *tool)
2774 struct intel_pt *pt = container_of(session->auxtrace, struct intel_pt,
2781 if (!tool->ordered_events)
2784 ret = intel_pt_update_queues(pt);
2788 if (pt->timeless_decoding)
2789 return intel_pt_process_timeless_queues(pt, -1,
2792 return intel_pt_process_queues(pt, MAX_TIMESTAMP);
2795 static void intel_pt_free_events(struct perf_session *session)
2797 struct intel_pt *pt = container_of(session->auxtrace, struct intel_pt,
2799 struct auxtrace_queues *queues = &pt->queues;
2802 for (i = 0; i < queues->nr_queues; i++) {
2803 intel_pt_free_queue(queues->queue_array[i].priv);
2804 queues->queue_array[i].priv = NULL;
2806 intel_pt_log_disable();
2807 auxtrace_queues__free(queues);
2810 static void intel_pt_free(struct perf_session *session)
2812 struct intel_pt *pt = container_of(session->auxtrace, struct intel_pt,
2815 auxtrace_heap__free(&pt->heap);
2816 intel_pt_free_events(session);
2817 session->auxtrace = NULL;
2818 thread__put(pt->unknown_thread);
2819 addr_filters__exit(&pt->filts);
2822 zfree(&pt->time_ranges);
2826 static bool intel_pt_evsel_is_auxtrace(struct perf_session *session,
2827 struct evsel *evsel)
2829 struct intel_pt *pt = container_of(session->auxtrace, struct intel_pt,
2832 return evsel->core.attr.type == pt->pmu_type;
2835 static int intel_pt_process_auxtrace_event(struct perf_session *session,
2836 union perf_event *event,
2837 struct perf_tool *tool __maybe_unused)
2839 struct intel_pt *pt = container_of(session->auxtrace, struct intel_pt,
2842 if (!pt->data_queued) {
2843 struct auxtrace_buffer *buffer;
2845 int fd = perf_data__fd(session->data);
2848 if (perf_data__is_pipe(session->data)) {
2851 data_offset = lseek(fd, 0, SEEK_CUR);
2852 if (data_offset == -1)
2856 err = auxtrace_queues__add_event(&pt->queues, session, event,
2857 data_offset, &buffer);
2861 /* Dump here now we have copied a piped trace out of the pipe */
2863 if (auxtrace_buffer__get_data(buffer, fd)) {
2864 intel_pt_dump_event(pt, buffer->data,
2866 auxtrace_buffer__put_data(buffer);
2874 static int intel_pt_queue_data(struct perf_session *session,
2875 struct perf_sample *sample,
2876 union perf_event *event, u64 data_offset)
2878 struct intel_pt *pt = container_of(session->auxtrace, struct intel_pt,
2883 return auxtrace_queues__add_event(&pt->queues, session, event,
2887 if (sample->time && sample->time != (u64)-1)
2888 timestamp = perf_time_to_tsc(sample->time, &pt->tc);
2892 return auxtrace_queues__add_sample(&pt->queues, session, sample,
2893 data_offset, timestamp);
2896 struct intel_pt_synth {
2897 struct perf_tool dummy_tool;
2898 struct perf_session *session;
2901 static int intel_pt_event_synth(struct perf_tool *tool,
2902 union perf_event *event,
2903 struct perf_sample *sample __maybe_unused,
2904 struct machine *machine __maybe_unused)
2906 struct intel_pt_synth *intel_pt_synth =
2907 container_of(tool, struct intel_pt_synth, dummy_tool);
2909 return perf_session__deliver_synth_event(intel_pt_synth->session, event,
2913 static int intel_pt_synth_event(struct perf_session *session, const char *name,
2914 struct perf_event_attr *attr, u64 id)
2916 struct intel_pt_synth intel_pt_synth;
2919 pr_debug("Synthesizing '%s' event with id %" PRIu64 " sample type %#" PRIx64 "\n",
2920 name, id, (u64)attr->sample_type);
2922 memset(&intel_pt_synth, 0, sizeof(struct intel_pt_synth));
2923 intel_pt_synth.session = session;
2925 err = perf_event__synthesize_attr(&intel_pt_synth.dummy_tool, attr, 1,
2926 &id, intel_pt_event_synth);
2928 pr_err("%s: failed to synthesize '%s' event type\n",
2934 static void intel_pt_set_event_name(struct evlist *evlist, u64 id,
2937 struct evsel *evsel;
2939 evlist__for_each_entry(evlist, evsel) {
2940 if (evsel->core.id && evsel->core.id[0] == id) {
2942 zfree(&evsel->name);
2943 evsel->name = strdup(name);
2949 static struct evsel *intel_pt_evsel(struct intel_pt *pt,
2950 struct evlist *evlist)
2952 struct evsel *evsel;
2954 evlist__for_each_entry(evlist, evsel) {
2955 if (evsel->core.attr.type == pt->pmu_type && evsel->core.ids)
2962 static int intel_pt_synth_events(struct intel_pt *pt,
2963 struct perf_session *session)
2965 struct evlist *evlist = session->evlist;
2966 struct evsel *evsel = intel_pt_evsel(pt, evlist);
2967 struct perf_event_attr attr;
2972 pr_debug("There are no selected events with Intel Processor Trace data\n");
2976 memset(&attr, 0, sizeof(struct perf_event_attr));
2977 attr.size = sizeof(struct perf_event_attr);
2978 attr.type = PERF_TYPE_HARDWARE;
2979 attr.sample_type = evsel->core.attr.sample_type & PERF_SAMPLE_MASK;
2980 attr.sample_type |= PERF_SAMPLE_IP | PERF_SAMPLE_TID |
2982 if (pt->timeless_decoding)
2983 attr.sample_type &= ~(u64)PERF_SAMPLE_TIME;
2985 attr.sample_type |= PERF_SAMPLE_TIME;
2986 if (!pt->per_cpu_mmaps)
2987 attr.sample_type &= ~(u64)PERF_SAMPLE_CPU;
2988 attr.exclude_user = evsel->core.attr.exclude_user;
2989 attr.exclude_kernel = evsel->core.attr.exclude_kernel;
2990 attr.exclude_hv = evsel->core.attr.exclude_hv;
2991 attr.exclude_host = evsel->core.attr.exclude_host;
2992 attr.exclude_guest = evsel->core.attr.exclude_guest;
2993 attr.sample_id_all = evsel->core.attr.sample_id_all;
2994 attr.read_format = evsel->core.attr.read_format;
2996 id = evsel->core.id[0] + 1000000000;
3000 if (pt->synth_opts.branches) {
3001 attr.config = PERF_COUNT_HW_BRANCH_INSTRUCTIONS;
3002 attr.sample_period = 1;
3003 attr.sample_type |= PERF_SAMPLE_ADDR;
3004 err = intel_pt_synth_event(session, "branches", &attr, id);
3007 pt->sample_branches = true;
3008 pt->branches_sample_type = attr.sample_type;
3009 pt->branches_id = id;
3011 attr.sample_type &= ~(u64)PERF_SAMPLE_ADDR;
3014 if (pt->synth_opts.callchain)
3015 attr.sample_type |= PERF_SAMPLE_CALLCHAIN;
3016 if (pt->synth_opts.last_branch)
3017 attr.sample_type |= PERF_SAMPLE_BRANCH_STACK;
3019 if (pt->synth_opts.instructions) {
3020 attr.config = PERF_COUNT_HW_INSTRUCTIONS;
3021 if (pt->synth_opts.period_type == PERF_ITRACE_PERIOD_NANOSECS)
3022 attr.sample_period =
3023 intel_pt_ns_to_ticks(pt, pt->synth_opts.period);
3025 attr.sample_period = pt->synth_opts.period;
3026 err = intel_pt_synth_event(session, "instructions", &attr, id);
3029 pt->sample_instructions = true;
3030 pt->instructions_sample_type = attr.sample_type;
3031 pt->instructions_id = id;
3035 attr.sample_type &= ~(u64)PERF_SAMPLE_PERIOD;
3036 attr.sample_period = 1;
3038 if (pt->synth_opts.transactions) {
3039 attr.config = PERF_COUNT_HW_INSTRUCTIONS;
3040 err = intel_pt_synth_event(session, "transactions", &attr, id);
3043 pt->sample_transactions = true;
3044 pt->transactions_sample_type = attr.sample_type;
3045 pt->transactions_id = id;
3046 intel_pt_set_event_name(evlist, id, "transactions");
3050 attr.type = PERF_TYPE_SYNTH;
3051 attr.sample_type |= PERF_SAMPLE_RAW;
3053 if (pt->synth_opts.ptwrites) {
3054 attr.config = PERF_SYNTH_INTEL_PTWRITE;
3055 err = intel_pt_synth_event(session, "ptwrite", &attr, id);
3058 pt->sample_ptwrites = true;
3059 pt->ptwrites_sample_type = attr.sample_type;
3060 pt->ptwrites_id = id;
3061 intel_pt_set_event_name(evlist, id, "ptwrite");
3065 if (pt->synth_opts.pwr_events) {
3066 pt->sample_pwr_events = true;
3067 pt->pwr_events_sample_type = attr.sample_type;
3069 attr.config = PERF_SYNTH_INTEL_CBR;
3070 err = intel_pt_synth_event(session, "cbr", &attr, id);
3074 intel_pt_set_event_name(evlist, id, "cbr");
3078 if (pt->synth_opts.pwr_events && (evsel->core.attr.config & 0x10)) {
3079 attr.config = PERF_SYNTH_INTEL_MWAIT;
3080 err = intel_pt_synth_event(session, "mwait", &attr, id);
3084 intel_pt_set_event_name(evlist, id, "mwait");
3087 attr.config = PERF_SYNTH_INTEL_PWRE;
3088 err = intel_pt_synth_event(session, "pwre", &attr, id);
3092 intel_pt_set_event_name(evlist, id, "pwre");
3095 attr.config = PERF_SYNTH_INTEL_EXSTOP;
3096 err = intel_pt_synth_event(session, "exstop", &attr, id);
3100 intel_pt_set_event_name(evlist, id, "exstop");
3103 attr.config = PERF_SYNTH_INTEL_PWRX;
3104 err = intel_pt_synth_event(session, "pwrx", &attr, id);
3108 intel_pt_set_event_name(evlist, id, "pwrx");
3115 static void intel_pt_setup_pebs_events(struct intel_pt *pt)
3117 struct evsel *evsel;
3119 if (!pt->synth_opts.other_events)
3122 evlist__for_each_entry(pt->session->evlist, evsel) {
3123 if (evsel->core.attr.aux_output && evsel->core.id) {
3124 pt->sample_pebs = true;
3125 pt->pebs_evsel = evsel;
3131 static struct evsel *intel_pt_find_sched_switch(struct evlist *evlist)
3133 struct evsel *evsel;
3135 evlist__for_each_entry_reverse(evlist, evsel) {
3136 const char *name = evsel__name(evsel);
3138 if (!strcmp(name, "sched:sched_switch"))
3145 static bool intel_pt_find_switch(struct evlist *evlist)
3147 struct evsel *evsel;
3149 evlist__for_each_entry(evlist, evsel) {
3150 if (evsel->core.attr.context_switch)
3157 static int intel_pt_perf_config(const char *var, const char *value, void *data)
3159 struct intel_pt *pt = data;
3161 if (!strcmp(var, "intel-pt.mispred-all"))
3162 pt->mispred_all = perf_config_bool(var, value);
3167 /* Find least TSC which converts to ns or later */
3168 static u64 intel_pt_tsc_start(u64 ns, struct intel_pt *pt)
3172 tsc = perf_time_to_tsc(ns, &pt->tc);
3175 tm = tsc_to_perf_time(tsc, &pt->tc);
3182 tm = tsc_to_perf_time(++tsc, &pt->tc);
3187 /* Find greatest TSC which converts to ns or earlier */
3188 static u64 intel_pt_tsc_end(u64 ns, struct intel_pt *pt)
3192 tsc = perf_time_to_tsc(ns, &pt->tc);
3195 tm = tsc_to_perf_time(tsc, &pt->tc);
3202 tm = tsc_to_perf_time(--tsc, &pt->tc);
3207 static int intel_pt_setup_time_ranges(struct intel_pt *pt,
3208 struct itrace_synth_opts *opts)
3210 struct perf_time_interval *p = opts->ptime_range;
3211 int n = opts->range_num;
3214 if (!n || !p || pt->timeless_decoding)
3217 pt->time_ranges = calloc(n, sizeof(struct range));
3218 if (!pt->time_ranges)
3223 intel_pt_log("%s: %u range(s)\n", __func__, n);
3225 for (i = 0; i < n; i++) {
3226 struct range *r = &pt->time_ranges[i];
3227 u64 ts = p[i].start;
3231 * Take care to ensure the TSC range matches the perf-time range
3232 * when converted back to perf-time.
3234 r->start = ts ? intel_pt_tsc_start(ts, pt) : 0;
3235 r->end = te ? intel_pt_tsc_end(te, pt) : 0;
3237 intel_pt_log("range %d: perf time interval: %"PRIu64" to %"PRIu64"\n",
3239 intel_pt_log("range %d: TSC time interval: %#"PRIx64" to %#"PRIx64"\n",
3240 i, r->start, r->end);
3246 static const char * const intel_pt_info_fmts[] = {
3247 [INTEL_PT_PMU_TYPE] = " PMU Type %"PRId64"\n",
3248 [INTEL_PT_TIME_SHIFT] = " Time Shift %"PRIu64"\n",
3249 [INTEL_PT_TIME_MULT] = " Time Muliplier %"PRIu64"\n",
3250 [INTEL_PT_TIME_ZERO] = " Time Zero %"PRIu64"\n",
3251 [INTEL_PT_CAP_USER_TIME_ZERO] = " Cap Time Zero %"PRId64"\n",
3252 [INTEL_PT_TSC_BIT] = " TSC bit %#"PRIx64"\n",
3253 [INTEL_PT_NORETCOMP_BIT] = " NoRETComp bit %#"PRIx64"\n",
3254 [INTEL_PT_HAVE_SCHED_SWITCH] = " Have sched_switch %"PRId64"\n",
3255 [INTEL_PT_SNAPSHOT_MODE] = " Snapshot mode %"PRId64"\n",
3256 [INTEL_PT_PER_CPU_MMAPS] = " Per-cpu maps %"PRId64"\n",
3257 [INTEL_PT_MTC_BIT] = " MTC bit %#"PRIx64"\n",
3258 [INTEL_PT_TSC_CTC_N] = " TSC:CTC numerator %"PRIu64"\n",
3259 [INTEL_PT_TSC_CTC_D] = " TSC:CTC denominator %"PRIu64"\n",
3260 [INTEL_PT_CYC_BIT] = " CYC bit %#"PRIx64"\n",
3261 [INTEL_PT_MAX_NONTURBO_RATIO] = " Max non-turbo ratio %"PRIu64"\n",
3262 [INTEL_PT_FILTER_STR_LEN] = " Filter string len. %"PRIu64"\n",
3265 static void intel_pt_print_info(__u64 *arr, int start, int finish)
3272 for (i = start; i <= finish; i++)
3273 fprintf(stdout, intel_pt_info_fmts[i], arr[i]);
3276 static void intel_pt_print_info_str(const char *name, const char *str)
3281 fprintf(stdout, " %-20s%s\n", name, str ? str : "");
3284 static bool intel_pt_has(struct perf_record_auxtrace_info *auxtrace_info, int pos)
3286 return auxtrace_info->header.size >=
3287 sizeof(struct perf_record_auxtrace_info) + (sizeof(u64) * (pos + 1));
3290 int intel_pt_process_auxtrace_info(union perf_event *event,
3291 struct perf_session *session)
3293 struct perf_record_auxtrace_info *auxtrace_info = &event->auxtrace_info;
3294 size_t min_sz = sizeof(u64) * INTEL_PT_PER_CPU_MMAPS;
3295 struct intel_pt *pt;
3300 if (auxtrace_info->header.size < sizeof(struct perf_record_auxtrace_info) +
3304 pt = zalloc(sizeof(struct intel_pt));
3308 addr_filters__init(&pt->filts);
3310 err = perf_config(intel_pt_perf_config, pt);
3314 err = auxtrace_queues__init(&pt->queues);
3318 intel_pt_log_set_name(INTEL_PT_PMU_NAME);
3320 pt->session = session;
3321 pt->machine = &session->machines.host; /* No kvm support */
3322 pt->auxtrace_type = auxtrace_info->type;
3323 pt->pmu_type = auxtrace_info->priv[INTEL_PT_PMU_TYPE];
3324 pt->tc.time_shift = auxtrace_info->priv[INTEL_PT_TIME_SHIFT];
3325 pt->tc.time_mult = auxtrace_info->priv[INTEL_PT_TIME_MULT];
3326 pt->tc.time_zero = auxtrace_info->priv[INTEL_PT_TIME_ZERO];
3327 pt->cap_user_time_zero = auxtrace_info->priv[INTEL_PT_CAP_USER_TIME_ZERO];
3328 pt->tsc_bit = auxtrace_info->priv[INTEL_PT_TSC_BIT];
3329 pt->noretcomp_bit = auxtrace_info->priv[INTEL_PT_NORETCOMP_BIT];
3330 pt->have_sched_switch = auxtrace_info->priv[INTEL_PT_HAVE_SCHED_SWITCH];
3331 pt->snapshot_mode = auxtrace_info->priv[INTEL_PT_SNAPSHOT_MODE];
3332 pt->per_cpu_mmaps = auxtrace_info->priv[INTEL_PT_PER_CPU_MMAPS];
3333 intel_pt_print_info(&auxtrace_info->priv[0], INTEL_PT_PMU_TYPE,
3334 INTEL_PT_PER_CPU_MMAPS);
3336 if (intel_pt_has(auxtrace_info, INTEL_PT_CYC_BIT)) {
3337 pt->mtc_bit = auxtrace_info->priv[INTEL_PT_MTC_BIT];
3338 pt->mtc_freq_bits = auxtrace_info->priv[INTEL_PT_MTC_FREQ_BITS];
3339 pt->tsc_ctc_ratio_n = auxtrace_info->priv[INTEL_PT_TSC_CTC_N];
3340 pt->tsc_ctc_ratio_d = auxtrace_info->priv[INTEL_PT_TSC_CTC_D];
3341 pt->cyc_bit = auxtrace_info->priv[INTEL_PT_CYC_BIT];
3342 intel_pt_print_info(&auxtrace_info->priv[0], INTEL_PT_MTC_BIT,
3346 if (intel_pt_has(auxtrace_info, INTEL_PT_MAX_NONTURBO_RATIO)) {
3347 pt->max_non_turbo_ratio =
3348 auxtrace_info->priv[INTEL_PT_MAX_NONTURBO_RATIO];
3349 intel_pt_print_info(&auxtrace_info->priv[0],
3350 INTEL_PT_MAX_NONTURBO_RATIO,
3351 INTEL_PT_MAX_NONTURBO_RATIO);
3354 info = &auxtrace_info->priv[INTEL_PT_FILTER_STR_LEN] + 1;
3355 info_end = (void *)info + auxtrace_info->header.size;
3357 if (intel_pt_has(auxtrace_info, INTEL_PT_FILTER_STR_LEN)) {
3360 len = auxtrace_info->priv[INTEL_PT_FILTER_STR_LEN];
3361 intel_pt_print_info(&auxtrace_info->priv[0],
3362 INTEL_PT_FILTER_STR_LEN,
3363 INTEL_PT_FILTER_STR_LEN);
3365 const char *filter = (const char *)info;
3367 len = roundup(len + 1, 8);
3369 if ((void *)info > info_end) {
3370 pr_err("%s: bad filter string length\n", __func__);
3372 goto err_free_queues;
3374 pt->filter = memdup(filter, len);
3377 goto err_free_queues;
3379 if (session->header.needs_swap)
3380 mem_bswap_64(pt->filter, len);
3381 if (pt->filter[len - 1]) {
3382 pr_err("%s: filter string not null terminated\n", __func__);
3384 goto err_free_queues;
3386 err = addr_filters__parse_bare_filter(&pt->filts,
3389 goto err_free_queues;
3391 intel_pt_print_info_str("Filter string", pt->filter);
3394 pt->timeless_decoding = intel_pt_timeless_decoding(pt);
3395 if (pt->timeless_decoding && !pt->tc.time_mult)
3396 pt->tc.time_mult = 1;
3397 pt->have_tsc = intel_pt_have_tsc(pt);
3398 pt->sampling_mode = intel_pt_sampling_mode(pt);
3399 pt->est_tsc = !pt->timeless_decoding;
3401 pt->unknown_thread = thread__new(999999999, 999999999);
3402 if (!pt->unknown_thread) {
3404 goto err_free_queues;
3408 * Since this thread will not be kept in any rbtree not in a
3409 * list, initialize its list node so that at thread__put() the
3410 * current thread lifetime assuption is kept and we don't segfault
3411 * at list_del_init().
3413 INIT_LIST_HEAD(&pt->unknown_thread->node);
3415 err = thread__set_comm(pt->unknown_thread, "unknown", 0);
3417 goto err_delete_thread;
3418 if (thread__init_maps(pt->unknown_thread, pt->machine)) {
3420 goto err_delete_thread;
3423 pt->auxtrace.process_event = intel_pt_process_event;
3424 pt->auxtrace.process_auxtrace_event = intel_pt_process_auxtrace_event;
3425 pt->auxtrace.queue_data = intel_pt_queue_data;
3426 pt->auxtrace.dump_auxtrace_sample = intel_pt_dump_sample;
3427 pt->auxtrace.flush_events = intel_pt_flush;
3428 pt->auxtrace.free_events = intel_pt_free_events;
3429 pt->auxtrace.free = intel_pt_free;
3430 pt->auxtrace.evsel_is_auxtrace = intel_pt_evsel_is_auxtrace;
3431 session->auxtrace = &pt->auxtrace;
3436 if (pt->have_sched_switch == 1) {
3437 pt->switch_evsel = intel_pt_find_sched_switch(session->evlist);
3438 if (!pt->switch_evsel) {
3439 pr_err("%s: missing sched_switch event\n", __func__);
3441 goto err_delete_thread;
3443 } else if (pt->have_sched_switch == 2 &&
3444 !intel_pt_find_switch(session->evlist)) {
3445 pr_err("%s: missing context_switch attribute flag\n", __func__);
3447 goto err_delete_thread;
3450 if (session->itrace_synth_opts->set) {
3451 pt->synth_opts = *session->itrace_synth_opts;
3453 itrace_synth_opts__set_default(&pt->synth_opts,
3454 session->itrace_synth_opts->default_no_sample);
3455 if (!session->itrace_synth_opts->default_no_sample &&
3456 !session->itrace_synth_opts->inject) {
3457 pt->synth_opts.branches = false;
3458 pt->synth_opts.callchain = true;
3459 pt->synth_opts.add_callchain = true;
3461 pt->synth_opts.thread_stack =
3462 session->itrace_synth_opts->thread_stack;
3465 if (pt->synth_opts.log)
3466 intel_pt_log_enable();
3468 /* Maximum non-turbo ratio is TSC freq / 100 MHz */
3469 if (pt->tc.time_mult) {
3470 u64 tsc_freq = intel_pt_ns_to_ticks(pt, 1000000000);
3472 if (!pt->max_non_turbo_ratio)
3473 pt->max_non_turbo_ratio =
3474 (tsc_freq + 50000000) / 100000000;
3475 intel_pt_log("TSC frequency %"PRIu64"\n", tsc_freq);
3476 intel_pt_log("Maximum non-turbo ratio %u\n",
3477 pt->max_non_turbo_ratio);
3478 pt->cbr2khz = tsc_freq / pt->max_non_turbo_ratio / 1000;
3481 err = intel_pt_setup_time_ranges(pt, session->itrace_synth_opts);
3483 goto err_delete_thread;
3485 if (pt->synth_opts.calls)
3486 pt->branches_filter |= PERF_IP_FLAG_CALL | PERF_IP_FLAG_ASYNC |
3487 PERF_IP_FLAG_TRACE_END;
3488 if (pt->synth_opts.returns)
3489 pt->branches_filter |= PERF_IP_FLAG_RETURN |
3490 PERF_IP_FLAG_TRACE_BEGIN;
3492 if ((pt->synth_opts.callchain || pt->synth_opts.add_callchain) &&
3493 !symbol_conf.use_callchain) {
3494 symbol_conf.use_callchain = true;
3495 if (callchain_register_param(&callchain_param) < 0) {
3496 symbol_conf.use_callchain = false;
3497 pt->synth_opts.callchain = false;
3498 pt->synth_opts.add_callchain = false;
3502 if (pt->synth_opts.add_callchain) {
3503 err = intel_pt_callchain_init(pt);
3505 goto err_delete_thread;
3508 if (pt->synth_opts.last_branch || pt->synth_opts.add_last_branch) {
3509 pt->br_stack_sz = pt->synth_opts.last_branch_sz;
3510 pt->br_stack_sz_plus = pt->br_stack_sz;
3513 if (pt->synth_opts.add_last_branch) {
3514 err = intel_pt_br_stack_init(pt);
3516 goto err_delete_thread;
3518 * Additional branch stack size to cater for tracing from the
3519 * actual sample ip to where the sample time is recorded.
3520 * Measured at about 200 branches, but generously set to 1024.
3521 * If kernel space is not being traced, then add just 1 for the
3522 * branch to kernel space.
3524 if (intel_pt_tracing_kernel(pt))
3525 pt->br_stack_sz_plus += 1024;
3527 pt->br_stack_sz_plus += 1;
3530 pt->use_thread_stack = pt->synth_opts.callchain ||
3531 pt->synth_opts.add_callchain ||
3532 pt->synth_opts.thread_stack ||
3533 pt->synth_opts.last_branch ||
3534 pt->synth_opts.add_last_branch;
3536 pt->callstack = pt->synth_opts.callchain ||
3537 pt->synth_opts.add_callchain ||
3538 pt->synth_opts.thread_stack;
3540 err = intel_pt_synth_events(pt, session);
3542 goto err_delete_thread;
3544 intel_pt_setup_pebs_events(pt);
3546 if (pt->sampling_mode || list_empty(&session->auxtrace_index))
3547 err = auxtrace_queue_data(session, true, true);
3549 err = auxtrace_queues__process_index(&pt->queues, session);
3551 goto err_delete_thread;
3553 if (pt->queues.populated)
3554 pt->data_queued = true;
3556 if (pt->timeless_decoding)
3557 pr_debug2("Intel PT decoding without timestamps\n");
3563 thread__zput(pt->unknown_thread);
3565 intel_pt_log_disable();
3566 auxtrace_queues__free(&pt->queues);
3567 session->auxtrace = NULL;
3569 addr_filters__exit(&pt->filts);
3571 zfree(&pt->time_ranges);