1 // SPDX-License-Identifier: GPL-2.0-only
3 * intel_pt.c: Intel Processor Trace support
4 * Copyright (c) 2013-2015, Intel Corporation.
11 #include <linux/kernel.h>
12 #include <linux/string.h>
13 #include <linux/types.h>
14 #include <linux/zalloc.h>
27 #include "thread-stack.h"
29 #include "callchain.h"
36 #include "util/perf_api_probe.h"
37 #include "util/synthetic-events.h"
38 #include "time-utils.h"
40 #include "../arch/x86/include/uapi/asm/perf_regs.h"
42 #include "intel-pt-decoder/intel-pt-log.h"
43 #include "intel-pt-decoder/intel-pt-decoder.h"
44 #include "intel-pt-decoder/intel-pt-insn-decoder.h"
45 #include "intel-pt-decoder/intel-pt-pkt-decoder.h"
47 #define MAX_TIMESTAMP (~0ULL)
55 struct auxtrace auxtrace;
56 struct auxtrace_queues queues;
57 struct auxtrace_heap heap;
59 struct perf_session *session;
60 struct machine *machine;
61 struct evsel *switch_evsel;
62 struct thread *unknown_thread;
63 bool timeless_decoding;
72 bool use_thread_stack;
74 unsigned int br_stack_sz;
75 unsigned int br_stack_sz_plus;
76 int have_sched_switch;
83 struct perf_tsc_conversion tc;
84 bool cap_user_time_zero;
86 struct itrace_synth_opts synth_opts;
88 bool sample_instructions;
89 u64 instructions_sample_type;
94 u64 branches_sample_type;
97 bool sample_transactions;
98 u64 transactions_sample_type;
101 bool sample_ptwrites;
102 u64 ptwrites_sample_type;
105 bool sample_pwr_events;
106 u64 pwr_events_sample_type;
115 struct evsel *pebs_evsel;
124 unsigned max_non_turbo_ratio;
127 unsigned long num_events;
130 struct addr_filters filts;
132 struct range *time_ranges;
133 unsigned int range_cnt;
135 struct ip_callchain *chain;
136 struct branch_stack *br_stack;
139 struct rb_root vmcs_info;
143 INTEL_PT_SS_NOT_TRACING,
146 INTEL_PT_SS_EXPECTING_SWITCH_EVENT,
147 INTEL_PT_SS_EXPECTING_SWITCH_IP,
150 struct intel_pt_queue {
152 unsigned int queue_nr;
153 struct auxtrace_buffer *buffer;
154 struct auxtrace_buffer *old_buffer;
156 const struct intel_pt_state *state;
157 struct ip_callchain *chain;
158 struct branch_stack *last_branch;
159 union perf_event *event_buf;
162 bool step_through_buffers;
163 bool use_buffer_pid_tid;
169 struct thread *thread;
170 struct machine *guest_machine;
171 struct thread *unknown_guest_thread;
172 pid_t guest_machine_pid;
179 unsigned int sel_idx;
185 u64 last_in_insn_cnt;
187 u64 last_br_insn_cnt;
189 unsigned int cbr_seen;
190 char insn[INTEL_PT_INSN_BUF_SZ];
193 static void intel_pt_dump(struct intel_pt *pt __maybe_unused,
194 unsigned char *buf, size_t len)
196 struct intel_pt_pkt packet;
199 char desc[INTEL_PT_PKT_DESC_MAX];
200 const char *color = PERF_COLOR_BLUE;
201 enum intel_pt_pkt_ctx ctx = INTEL_PT_NO_CTX;
203 color_fprintf(stdout, color,
204 ". ... Intel Processor Trace data: size %zu bytes\n",
208 ret = intel_pt_get_packet(buf, len, &packet, &ctx);
214 color_fprintf(stdout, color, " %08x: ", pos);
215 for (i = 0; i < pkt_len; i++)
216 color_fprintf(stdout, color, " %02x", buf[i]);
218 color_fprintf(stdout, color, " ");
220 ret = intel_pt_pkt_desc(&packet, desc,
221 INTEL_PT_PKT_DESC_MAX);
223 color_fprintf(stdout, color, " %s\n", desc);
225 color_fprintf(stdout, color, " Bad packet!\n");
233 static void intel_pt_dump_event(struct intel_pt *pt, unsigned char *buf,
237 intel_pt_dump(pt, buf, len);
240 static void intel_pt_log_event(union perf_event *event)
242 FILE *f = intel_pt_log_fp();
244 if (!intel_pt_enable_logging || !f)
247 perf_event__fprintf(event, NULL, f);
250 static void intel_pt_dump_sample(struct perf_session *session,
251 struct perf_sample *sample)
253 struct intel_pt *pt = container_of(session->auxtrace, struct intel_pt,
257 intel_pt_dump(pt, sample->aux_sample.data, sample->aux_sample.size);
260 static bool intel_pt_log_events(struct intel_pt *pt, u64 tm)
262 struct perf_time_interval *range = pt->synth_opts.ptime_range;
263 int n = pt->synth_opts.range_num;
265 if (pt->synth_opts.log_plus_flags & AUXTRACE_LOG_FLG_ALL_PERF_EVTS)
268 if (pt->synth_opts.log_minus_flags & AUXTRACE_LOG_FLG_ALL_PERF_EVTS)
271 /* perf_time__ranges_skip_sample does not work if time is zero */
275 return !n || !perf_time__ranges_skip_sample(range, n, tm);
278 static struct intel_pt_vmcs_info *intel_pt_findnew_vmcs(struct rb_root *rb_root,
282 struct rb_node **p = &rb_root->rb_node;
283 struct rb_node *parent = NULL;
284 struct intel_pt_vmcs_info *v;
288 v = rb_entry(parent, struct intel_pt_vmcs_info, rb_node);
299 v = zalloc(sizeof(*v));
302 v->tsc_offset = dflt_tsc_offset;
303 v->reliable = dflt_tsc_offset;
305 rb_link_node(&v->rb_node, parent, p);
306 rb_insert_color(&v->rb_node, rb_root);
312 static struct intel_pt_vmcs_info *intel_pt_findnew_vmcs_info(void *data, uint64_t vmcs)
314 struct intel_pt_queue *ptq = data;
315 struct intel_pt *pt = ptq->pt;
317 if (!vmcs && !pt->dflt_tsc_offset)
320 return intel_pt_findnew_vmcs(&pt->vmcs_info, vmcs, pt->dflt_tsc_offset);
323 static void intel_pt_free_vmcs_info(struct intel_pt *pt)
325 struct intel_pt_vmcs_info *v;
328 n = rb_first(&pt->vmcs_info);
330 v = rb_entry(n, struct intel_pt_vmcs_info, rb_node);
332 rb_erase(&v->rb_node, &pt->vmcs_info);
337 static int intel_pt_do_fix_overlap(struct intel_pt *pt, struct auxtrace_buffer *a,
338 struct auxtrace_buffer *b)
340 bool consecutive = false;
343 start = intel_pt_find_overlap(a->data, a->size, b->data, b->size,
344 pt->have_tsc, &consecutive,
345 pt->synth_opts.vm_time_correlation);
349 * In the case of vm_time_correlation, the overlap might contain TSC
350 * packets that will not be fixed, and that will then no longer work for
351 * overlap detection. Avoid that by zeroing out the overlap.
353 if (pt->synth_opts.vm_time_correlation)
354 memset(b->data, 0, start - b->data);
355 b->use_size = b->data + b->size - start;
357 if (b->use_size && consecutive)
358 b->consecutive = true;
362 static int intel_pt_get_buffer(struct intel_pt_queue *ptq,
363 struct auxtrace_buffer *buffer,
364 struct auxtrace_buffer *old_buffer,
365 struct intel_pt_buffer *b)
370 int fd = perf_data__fd(ptq->pt->session->data);
372 buffer->data = auxtrace_buffer__get_data(buffer, fd);
377 might_overlap = ptq->pt->snapshot_mode || ptq->pt->sampling_mode;
378 if (might_overlap && !buffer->consecutive && old_buffer &&
379 intel_pt_do_fix_overlap(ptq->pt, old_buffer, buffer))
382 if (buffer->use_data) {
383 b->len = buffer->use_size;
384 b->buf = buffer->use_data;
386 b->len = buffer->size;
387 b->buf = buffer->data;
389 b->ref_timestamp = buffer->reference;
391 if (!old_buffer || (might_overlap && !buffer->consecutive)) {
392 b->consecutive = false;
393 b->trace_nr = buffer->buffer_nr + 1;
395 b->consecutive = true;
401 /* Do not drop buffers with references - refer intel_pt_get_trace() */
402 static void intel_pt_lookahead_drop_buffer(struct intel_pt_queue *ptq,
403 struct auxtrace_buffer *buffer)
405 if (!buffer || buffer == ptq->buffer || buffer == ptq->old_buffer)
408 auxtrace_buffer__drop_data(buffer);
411 /* Must be serialized with respect to intel_pt_get_trace() */
412 static int intel_pt_lookahead(void *data, intel_pt_lookahead_cb_t cb,
415 struct intel_pt_queue *ptq = data;
416 struct auxtrace_buffer *buffer = ptq->buffer;
417 struct auxtrace_buffer *old_buffer = ptq->old_buffer;
418 struct auxtrace_queue *queue;
421 queue = &ptq->pt->queues.queue_array[ptq->queue_nr];
424 struct intel_pt_buffer b = { .len = 0 };
426 buffer = auxtrace_buffer__next(queue, buffer);
430 err = intel_pt_get_buffer(ptq, buffer, old_buffer, &b);
435 intel_pt_lookahead_drop_buffer(ptq, old_buffer);
438 intel_pt_lookahead_drop_buffer(ptq, buffer);
442 err = cb(&b, cb_data);
447 if (buffer != old_buffer)
448 intel_pt_lookahead_drop_buffer(ptq, buffer);
449 intel_pt_lookahead_drop_buffer(ptq, old_buffer);
455 * This function assumes data is processed sequentially only.
456 * Must be serialized with respect to intel_pt_lookahead()
458 static int intel_pt_get_trace(struct intel_pt_buffer *b, void *data)
460 struct intel_pt_queue *ptq = data;
461 struct auxtrace_buffer *buffer = ptq->buffer;
462 struct auxtrace_buffer *old_buffer = ptq->old_buffer;
463 struct auxtrace_queue *queue;
471 queue = &ptq->pt->queues.queue_array[ptq->queue_nr];
473 buffer = auxtrace_buffer__next(queue, buffer);
476 auxtrace_buffer__drop_data(old_buffer);
481 ptq->buffer = buffer;
483 err = intel_pt_get_buffer(ptq, buffer, old_buffer, b);
487 if (ptq->step_through_buffers)
492 auxtrace_buffer__drop_data(old_buffer);
493 ptq->old_buffer = buffer;
495 auxtrace_buffer__drop_data(buffer);
496 return intel_pt_get_trace(b, data);
502 struct intel_pt_cache_entry {
503 struct auxtrace_cache_entry entry;
506 enum intel_pt_insn_op op;
507 enum intel_pt_insn_branch branch;
510 char insn[INTEL_PT_INSN_BUF_SZ];
513 static int intel_pt_config_div(const char *var, const char *value, void *data)
518 if (!strcmp(var, "intel-pt.cache-divisor")) {
519 val = strtol(value, NULL, 0);
520 if (val > 0 && val <= INT_MAX)
527 static int intel_pt_cache_divisor(void)
534 perf_config(intel_pt_config_div, &d);
542 static unsigned int intel_pt_cache_size(struct dso *dso,
543 struct machine *machine)
547 size = dso__data_size(dso, machine);
548 size /= intel_pt_cache_divisor();
551 if (size > (1 << 21))
553 return 32 - __builtin_clz(size);
556 static struct auxtrace_cache *intel_pt_cache(struct dso *dso,
557 struct machine *machine)
559 struct auxtrace_cache *c;
562 if (dso->auxtrace_cache)
563 return dso->auxtrace_cache;
565 bits = intel_pt_cache_size(dso, machine);
567 /* Ignoring cache creation failure */
568 c = auxtrace_cache__new(bits, sizeof(struct intel_pt_cache_entry), 200);
570 dso->auxtrace_cache = c;
575 static int intel_pt_cache_add(struct dso *dso, struct machine *machine,
576 u64 offset, u64 insn_cnt, u64 byte_cnt,
577 struct intel_pt_insn *intel_pt_insn)
579 struct auxtrace_cache *c = intel_pt_cache(dso, machine);
580 struct intel_pt_cache_entry *e;
586 e = auxtrace_cache__alloc_entry(c);
590 e->insn_cnt = insn_cnt;
591 e->byte_cnt = byte_cnt;
592 e->op = intel_pt_insn->op;
593 e->branch = intel_pt_insn->branch;
594 e->length = intel_pt_insn->length;
595 e->rel = intel_pt_insn->rel;
596 memcpy(e->insn, intel_pt_insn->buf, INTEL_PT_INSN_BUF_SZ);
598 err = auxtrace_cache__add(c, offset, &e->entry);
600 auxtrace_cache__free_entry(c, e);
605 static struct intel_pt_cache_entry *
606 intel_pt_cache_lookup(struct dso *dso, struct machine *machine, u64 offset)
608 struct auxtrace_cache *c = intel_pt_cache(dso, machine);
613 return auxtrace_cache__lookup(dso->auxtrace_cache, offset);
616 static void intel_pt_cache_invalidate(struct dso *dso, struct machine *machine,
619 struct auxtrace_cache *c = intel_pt_cache(dso, machine);
624 auxtrace_cache__remove(dso->auxtrace_cache, offset);
627 static inline bool intel_pt_guest_kernel_ip(uint64_t ip)
629 /* Assumes 64-bit kernel */
630 return ip & (1ULL << 63);
633 static inline u8 intel_pt_nr_cpumode(struct intel_pt_queue *ptq, uint64_t ip, bool nr)
636 return intel_pt_guest_kernel_ip(ip) ?
637 PERF_RECORD_MISC_GUEST_KERNEL :
638 PERF_RECORD_MISC_GUEST_USER;
641 return ip >= ptq->pt->kernel_start ?
642 PERF_RECORD_MISC_KERNEL :
643 PERF_RECORD_MISC_USER;
646 static inline u8 intel_pt_cpumode(struct intel_pt_queue *ptq, uint64_t from_ip, uint64_t to_ip)
648 /* No support for non-zero CS base */
650 return intel_pt_nr_cpumode(ptq, from_ip, ptq->state->from_nr);
651 return intel_pt_nr_cpumode(ptq, to_ip, ptq->state->to_nr);
654 static int intel_pt_get_guest(struct intel_pt_queue *ptq)
656 struct machines *machines = &ptq->pt->session->machines;
657 struct machine *machine;
658 pid_t pid = ptq->pid <= 0 ? DEFAULT_GUEST_KERNEL_ID : ptq->pid;
660 if (ptq->guest_machine && pid == ptq->guest_machine_pid)
663 ptq->guest_machine = NULL;
664 thread__zput(ptq->unknown_guest_thread);
666 machine = machines__find_guest(machines, pid);
670 ptq->unknown_guest_thread = machine__idle_thread(machine);
671 if (!ptq->unknown_guest_thread)
674 ptq->guest_machine = machine;
675 ptq->guest_machine_pid = pid;
680 static int intel_pt_walk_next_insn(struct intel_pt_insn *intel_pt_insn,
681 uint64_t *insn_cnt_ptr, uint64_t *ip,
682 uint64_t to_ip, uint64_t max_insn_cnt,
685 struct intel_pt_queue *ptq = data;
686 struct machine *machine = ptq->pt->machine;
687 struct thread *thread;
688 struct addr_location al;
689 unsigned char buf[INTEL_PT_INSN_BUF_SZ];
693 u64 offset, start_offset, start_ip;
698 intel_pt_insn->length = 0;
700 if (to_ip && *ip == to_ip)
703 nr = ptq->state->to_nr;
704 cpumode = intel_pt_nr_cpumode(ptq, *ip, nr);
707 if (cpumode != PERF_RECORD_MISC_GUEST_KERNEL ||
708 intel_pt_get_guest(ptq))
710 machine = ptq->guest_machine;
711 thread = ptq->unknown_guest_thread;
713 thread = ptq->thread;
715 if (cpumode != PERF_RECORD_MISC_KERNEL)
717 thread = ptq->pt->unknown_thread;
722 if (!thread__find_map(thread, cpumode, *ip, &al) || !al.map->dso)
725 if (al.map->dso->data.status == DSO_DATA_STATUS_ERROR &&
726 dso__data_status_seen(al.map->dso,
727 DSO_DATA_STATUS_SEEN_ITRACE))
730 offset = al.map->map_ip(al.map, *ip);
732 if (!to_ip && one_map) {
733 struct intel_pt_cache_entry *e;
735 e = intel_pt_cache_lookup(al.map->dso, machine, offset);
737 (!max_insn_cnt || e->insn_cnt <= max_insn_cnt)) {
738 *insn_cnt_ptr = e->insn_cnt;
740 intel_pt_insn->op = e->op;
741 intel_pt_insn->branch = e->branch;
742 intel_pt_insn->length = e->length;
743 intel_pt_insn->rel = e->rel;
744 memcpy(intel_pt_insn->buf, e->insn,
745 INTEL_PT_INSN_BUF_SZ);
746 intel_pt_log_insn_no_data(intel_pt_insn, *ip);
751 start_offset = offset;
754 /* Load maps to ensure dso->is_64_bit has been updated */
757 x86_64 = al.map->dso->is_64_bit;
760 len = dso__data_read_offset(al.map->dso, machine,
762 INTEL_PT_INSN_BUF_SZ);
766 if (intel_pt_get_insn(buf, len, x86_64, intel_pt_insn))
769 intel_pt_log_insn(intel_pt_insn, *ip);
773 if (intel_pt_insn->branch != INTEL_PT_BR_NO_BRANCH)
776 if (max_insn_cnt && insn_cnt >= max_insn_cnt)
779 *ip += intel_pt_insn->length;
781 if (to_ip && *ip == to_ip) {
782 intel_pt_insn->length = 0;
786 if (*ip >= al.map->end)
789 offset += intel_pt_insn->length;
794 *insn_cnt_ptr = insn_cnt;
800 * Didn't lookup in the 'to_ip' case, so do it now to prevent duplicate
804 struct intel_pt_cache_entry *e;
806 e = intel_pt_cache_lookup(al.map->dso, machine, start_offset);
811 /* Ignore cache errors */
812 intel_pt_cache_add(al.map->dso, machine, start_offset, insn_cnt,
813 *ip - start_ip, intel_pt_insn);
818 *insn_cnt_ptr = insn_cnt;
822 static bool intel_pt_match_pgd_ip(struct intel_pt *pt, uint64_t ip,
823 uint64_t offset, const char *filename)
825 struct addr_filter *filt;
826 bool have_filter = false;
827 bool hit_tracestop = false;
828 bool hit_filter = false;
830 list_for_each_entry(filt, &pt->filts.head, list) {
834 if ((filename && !filt->filename) ||
835 (!filename && filt->filename) ||
836 (filename && strcmp(filename, filt->filename)))
839 if (!(offset >= filt->addr && offset < filt->addr + filt->size))
842 intel_pt_log("TIP.PGD ip %#"PRIx64" offset %#"PRIx64" in %s hit filter: %s offset %#"PRIx64" size %#"PRIx64"\n",
843 ip, offset, filename ? filename : "[kernel]",
844 filt->start ? "filter" : "stop",
845 filt->addr, filt->size);
850 hit_tracestop = true;
853 if (!hit_tracestop && !hit_filter)
854 intel_pt_log("TIP.PGD ip %#"PRIx64" offset %#"PRIx64" in %s is not in a filter region\n",
855 ip, offset, filename ? filename : "[kernel]");
857 return hit_tracestop || (have_filter && !hit_filter);
860 static int __intel_pt_pgd_ip(uint64_t ip, void *data)
862 struct intel_pt_queue *ptq = data;
863 struct thread *thread;
864 struct addr_location al;
868 if (ptq->state->to_nr) {
869 if (intel_pt_guest_kernel_ip(ip))
870 return intel_pt_match_pgd_ip(ptq->pt, ip, ip, NULL);
871 /* No support for decoding guest user space */
873 } else if (ip >= ptq->pt->kernel_start) {
874 return intel_pt_match_pgd_ip(ptq->pt, ip, ip, NULL);
877 cpumode = PERF_RECORD_MISC_USER;
879 thread = ptq->thread;
883 if (!thread__find_map(thread, cpumode, ip, &al) || !al.map->dso)
886 offset = al.map->map_ip(al.map, ip);
888 return intel_pt_match_pgd_ip(ptq->pt, ip, offset,
889 al.map->dso->long_name);
892 static bool intel_pt_pgd_ip(uint64_t ip, void *data)
894 return __intel_pt_pgd_ip(ip, data) > 0;
897 static bool intel_pt_get_config(struct intel_pt *pt,
898 struct perf_event_attr *attr, u64 *config)
900 if (attr->type == pt->pmu_type) {
902 *config = attr->config;
909 static bool intel_pt_exclude_kernel(struct intel_pt *pt)
913 evlist__for_each_entry(pt->session->evlist, evsel) {
914 if (intel_pt_get_config(pt, &evsel->core.attr, NULL) &&
915 !evsel->core.attr.exclude_kernel)
921 static bool intel_pt_return_compression(struct intel_pt *pt)
926 if (!pt->noretcomp_bit)
929 evlist__for_each_entry(pt->session->evlist, evsel) {
930 if (intel_pt_get_config(pt, &evsel->core.attr, &config) &&
931 (config & pt->noretcomp_bit))
937 static bool intel_pt_branch_enable(struct intel_pt *pt)
942 evlist__for_each_entry(pt->session->evlist, evsel) {
943 if (intel_pt_get_config(pt, &evsel->core.attr, &config) &&
944 (config & 1) && !(config & 0x2000))
950 static unsigned int intel_pt_mtc_period(struct intel_pt *pt)
956 if (!pt->mtc_freq_bits)
959 for (shift = 0, config = pt->mtc_freq_bits; !(config & 1); shift++)
962 evlist__for_each_entry(pt->session->evlist, evsel) {
963 if (intel_pt_get_config(pt, &evsel->core.attr, &config))
964 return (config & pt->mtc_freq_bits) >> shift;
969 static bool intel_pt_timeless_decoding(struct intel_pt *pt)
972 bool timeless_decoding = true;
975 if (!pt->tsc_bit || !pt->cap_user_time_zero || pt->synth_opts.timeless_decoding)
978 evlist__for_each_entry(pt->session->evlist, evsel) {
979 if (!(evsel->core.attr.sample_type & PERF_SAMPLE_TIME))
981 if (intel_pt_get_config(pt, &evsel->core.attr, &config)) {
982 if (config & pt->tsc_bit)
983 timeless_decoding = false;
988 return timeless_decoding;
991 static bool intel_pt_tracing_kernel(struct intel_pt *pt)
995 evlist__for_each_entry(pt->session->evlist, evsel) {
996 if (intel_pt_get_config(pt, &evsel->core.attr, NULL) &&
997 !evsel->core.attr.exclude_kernel)
1003 static bool intel_pt_have_tsc(struct intel_pt *pt)
1005 struct evsel *evsel;
1006 bool have_tsc = false;
1012 evlist__for_each_entry(pt->session->evlist, evsel) {
1013 if (intel_pt_get_config(pt, &evsel->core.attr, &config)) {
1014 if (config & pt->tsc_bit)
1023 static bool intel_pt_have_mtc(struct intel_pt *pt)
1025 struct evsel *evsel;
1028 evlist__for_each_entry(pt->session->evlist, evsel) {
1029 if (intel_pt_get_config(pt, &evsel->core.attr, &config) &&
1030 (config & pt->mtc_bit))
1036 static bool intel_pt_sampling_mode(struct intel_pt *pt)
1038 struct evsel *evsel;
1040 evlist__for_each_entry(pt->session->evlist, evsel) {
1041 if ((evsel->core.attr.sample_type & PERF_SAMPLE_AUX) &&
1042 evsel->core.attr.aux_sample_size)
1048 static u64 intel_pt_ctl(struct intel_pt *pt)
1050 struct evsel *evsel;
1053 evlist__for_each_entry(pt->session->evlist, evsel) {
1054 if (intel_pt_get_config(pt, &evsel->core.attr, &config))
1060 static u64 intel_pt_ns_to_ticks(const struct intel_pt *pt, u64 ns)
1064 quot = ns / pt->tc.time_mult;
1065 rem = ns % pt->tc.time_mult;
1066 return (quot << pt->tc.time_shift) + (rem << pt->tc.time_shift) /
1070 static struct ip_callchain *intel_pt_alloc_chain(struct intel_pt *pt)
1072 size_t sz = sizeof(struct ip_callchain);
1074 /* Add 1 to callchain_sz for callchain context */
1075 sz += (pt->synth_opts.callchain_sz + 1) * sizeof(u64);
1079 static int intel_pt_callchain_init(struct intel_pt *pt)
1081 struct evsel *evsel;
1083 evlist__for_each_entry(pt->session->evlist, evsel) {
1084 if (!(evsel->core.attr.sample_type & PERF_SAMPLE_CALLCHAIN))
1085 evsel->synth_sample_type |= PERF_SAMPLE_CALLCHAIN;
1088 pt->chain = intel_pt_alloc_chain(pt);
1095 static void intel_pt_add_callchain(struct intel_pt *pt,
1096 struct perf_sample *sample)
1098 struct thread *thread = machine__findnew_thread(pt->machine,
1102 thread_stack__sample_late(thread, sample->cpu, pt->chain,
1103 pt->synth_opts.callchain_sz + 1, sample->ip,
1106 sample->callchain = pt->chain;
1109 static struct branch_stack *intel_pt_alloc_br_stack(unsigned int entry_cnt)
1111 size_t sz = sizeof(struct branch_stack);
1113 sz += entry_cnt * sizeof(struct branch_entry);
1117 static int intel_pt_br_stack_init(struct intel_pt *pt)
1119 struct evsel *evsel;
1121 evlist__for_each_entry(pt->session->evlist, evsel) {
1122 if (!(evsel->core.attr.sample_type & PERF_SAMPLE_BRANCH_STACK))
1123 evsel->synth_sample_type |= PERF_SAMPLE_BRANCH_STACK;
1126 pt->br_stack = intel_pt_alloc_br_stack(pt->br_stack_sz);
1133 static void intel_pt_add_br_stack(struct intel_pt *pt,
1134 struct perf_sample *sample)
1136 struct thread *thread = machine__findnew_thread(pt->machine,
1140 thread_stack__br_sample_late(thread, sample->cpu, pt->br_stack,
1141 pt->br_stack_sz, sample->ip,
1144 sample->branch_stack = pt->br_stack;
1147 /* INTEL_PT_LBR_0, INTEL_PT_LBR_1 and INTEL_PT_LBR_2 */
1148 #define LBRS_MAX (INTEL_PT_BLK_ITEM_ID_CNT * 3U)
1150 static struct intel_pt_queue *intel_pt_alloc_queue(struct intel_pt *pt,
1151 unsigned int queue_nr)
1153 struct intel_pt_params params = { .get_trace = 0, };
1154 struct perf_env *env = pt->machine->env;
1155 struct intel_pt_queue *ptq;
1157 ptq = zalloc(sizeof(struct intel_pt_queue));
1161 if (pt->synth_opts.callchain) {
1162 ptq->chain = intel_pt_alloc_chain(pt);
1167 if (pt->synth_opts.last_branch || pt->synth_opts.other_events) {
1168 unsigned int entry_cnt = max(LBRS_MAX, pt->br_stack_sz);
1170 ptq->last_branch = intel_pt_alloc_br_stack(entry_cnt);
1171 if (!ptq->last_branch)
1175 ptq->event_buf = malloc(PERF_SAMPLE_MAX_SIZE);
1176 if (!ptq->event_buf)
1180 ptq->queue_nr = queue_nr;
1181 ptq->exclude_kernel = intel_pt_exclude_kernel(pt);
1187 params.get_trace = intel_pt_get_trace;
1188 params.walk_insn = intel_pt_walk_next_insn;
1189 params.lookahead = intel_pt_lookahead;
1190 params.findnew_vmcs_info = intel_pt_findnew_vmcs_info;
1192 params.return_compression = intel_pt_return_compression(pt);
1193 params.branch_enable = intel_pt_branch_enable(pt);
1194 params.ctl = intel_pt_ctl(pt);
1195 params.max_non_turbo_ratio = pt->max_non_turbo_ratio;
1196 params.mtc_period = intel_pt_mtc_period(pt);
1197 params.tsc_ctc_ratio_n = pt->tsc_ctc_ratio_n;
1198 params.tsc_ctc_ratio_d = pt->tsc_ctc_ratio_d;
1199 params.quick = pt->synth_opts.quick;
1200 params.vm_time_correlation = pt->synth_opts.vm_time_correlation;
1201 params.vm_tm_corr_dry_run = pt->synth_opts.vm_tm_corr_dry_run;
1202 params.first_timestamp = pt->first_timestamp;
1204 if (pt->filts.cnt > 0)
1205 params.pgd_ip = intel_pt_pgd_ip;
1207 if (pt->synth_opts.instructions) {
1208 if (pt->synth_opts.period) {
1209 switch (pt->synth_opts.period_type) {
1210 case PERF_ITRACE_PERIOD_INSTRUCTIONS:
1211 params.period_type =
1212 INTEL_PT_PERIOD_INSTRUCTIONS;
1213 params.period = pt->synth_opts.period;
1215 case PERF_ITRACE_PERIOD_TICKS:
1216 params.period_type = INTEL_PT_PERIOD_TICKS;
1217 params.period = pt->synth_opts.period;
1219 case PERF_ITRACE_PERIOD_NANOSECS:
1220 params.period_type = INTEL_PT_PERIOD_TICKS;
1221 params.period = intel_pt_ns_to_ticks(pt,
1222 pt->synth_opts.period);
1229 if (!params.period) {
1230 params.period_type = INTEL_PT_PERIOD_INSTRUCTIONS;
1235 if (env->cpuid && !strncmp(env->cpuid, "GenuineIntel,6,92,", 18))
1236 params.flags |= INTEL_PT_FUP_WITH_NLIP;
1238 ptq->decoder = intel_pt_decoder_new(¶ms);
1245 zfree(&ptq->event_buf);
1246 zfree(&ptq->last_branch);
1252 static void intel_pt_free_queue(void *priv)
1254 struct intel_pt_queue *ptq = priv;
1258 thread__zput(ptq->thread);
1259 thread__zput(ptq->unknown_guest_thread);
1260 intel_pt_decoder_free(ptq->decoder);
1261 zfree(&ptq->event_buf);
1262 zfree(&ptq->last_branch);
1267 static void intel_pt_first_timestamp(struct intel_pt *pt, u64 timestamp)
1271 pt->first_timestamp = timestamp;
1273 for (i = 0; i < pt->queues.nr_queues; i++) {
1274 struct auxtrace_queue *queue = &pt->queues.queue_array[i];
1275 struct intel_pt_queue *ptq = queue->priv;
1277 if (ptq && ptq->decoder)
1278 intel_pt_set_first_timestamp(ptq->decoder, timestamp);
1282 static void intel_pt_set_pid_tid_cpu(struct intel_pt *pt,
1283 struct auxtrace_queue *queue)
1285 struct intel_pt_queue *ptq = queue->priv;
1287 if (queue->tid == -1 || pt->have_sched_switch) {
1288 ptq->tid = machine__get_current_tid(pt->machine, ptq->cpu);
1291 thread__zput(ptq->thread);
1294 if (!ptq->thread && ptq->tid != -1)
1295 ptq->thread = machine__find_thread(pt->machine, -1, ptq->tid);
1298 ptq->pid = ptq->thread->pid_;
1299 if (queue->cpu == -1)
1300 ptq->cpu = ptq->thread->cpu;
1304 static void intel_pt_sample_flags(struct intel_pt_queue *ptq)
1307 if (ptq->state->flags & INTEL_PT_ABORT_TX) {
1308 ptq->flags = PERF_IP_FLAG_BRANCH | PERF_IP_FLAG_TX_ABORT;
1309 } else if (ptq->state->flags & INTEL_PT_ASYNC) {
1310 if (!ptq->state->to_ip)
1311 ptq->flags = PERF_IP_FLAG_BRANCH |
1312 PERF_IP_FLAG_TRACE_END;
1313 else if (ptq->state->from_nr && !ptq->state->to_nr)
1314 ptq->flags = PERF_IP_FLAG_BRANCH | PERF_IP_FLAG_CALL |
1315 PERF_IP_FLAG_VMEXIT;
1317 ptq->flags = PERF_IP_FLAG_BRANCH | PERF_IP_FLAG_CALL |
1318 PERF_IP_FLAG_ASYNC |
1319 PERF_IP_FLAG_INTERRUPT;
1321 if (ptq->state->from_ip)
1322 ptq->flags = intel_pt_insn_type(ptq->state->insn_op);
1324 ptq->flags = PERF_IP_FLAG_BRANCH |
1325 PERF_IP_FLAG_TRACE_BEGIN;
1326 if (ptq->state->flags & INTEL_PT_IN_TX)
1327 ptq->flags |= PERF_IP_FLAG_IN_TX;
1328 ptq->insn_len = ptq->state->insn_len;
1329 memcpy(ptq->insn, ptq->state->insn, INTEL_PT_INSN_BUF_SZ);
1332 if (ptq->state->type & INTEL_PT_TRACE_BEGIN)
1333 ptq->flags |= PERF_IP_FLAG_TRACE_BEGIN;
1334 if (ptq->state->type & INTEL_PT_TRACE_END)
1335 ptq->flags |= PERF_IP_FLAG_TRACE_END;
1338 static void intel_pt_setup_time_range(struct intel_pt *pt,
1339 struct intel_pt_queue *ptq)
1344 ptq->sel_timestamp = pt->time_ranges[0].start;
1347 if (ptq->sel_timestamp) {
1348 ptq->sel_start = true;
1350 ptq->sel_timestamp = pt->time_ranges[0].end;
1351 ptq->sel_start = false;
1355 static int intel_pt_setup_queue(struct intel_pt *pt,
1356 struct auxtrace_queue *queue,
1357 unsigned int queue_nr)
1359 struct intel_pt_queue *ptq = queue->priv;
1361 if (list_empty(&queue->head))
1365 ptq = intel_pt_alloc_queue(pt, queue_nr);
1370 if (queue->cpu != -1)
1371 ptq->cpu = queue->cpu;
1372 ptq->tid = queue->tid;
1374 ptq->cbr_seen = UINT_MAX;
1376 if (pt->sampling_mode && !pt->snapshot_mode &&
1377 pt->timeless_decoding)
1378 ptq->step_through_buffers = true;
1380 ptq->sync_switch = pt->sync_switch;
1382 intel_pt_setup_time_range(pt, ptq);
1385 if (!ptq->on_heap &&
1386 (!ptq->sync_switch ||
1387 ptq->switch_state != INTEL_PT_SS_EXPECTING_SWITCH_EVENT)) {
1388 const struct intel_pt_state *state;
1391 if (pt->timeless_decoding)
1394 intel_pt_log("queue %u getting timestamp\n", queue_nr);
1395 intel_pt_log("queue %u decoding cpu %d pid %d tid %d\n",
1396 queue_nr, ptq->cpu, ptq->pid, ptq->tid);
1398 if (ptq->sel_start && ptq->sel_timestamp) {
1399 ret = intel_pt_fast_forward(ptq->decoder,
1400 ptq->sel_timestamp);
1406 state = intel_pt_decode(ptq->decoder);
1408 if (state->err == INTEL_PT_ERR_NODATA) {
1409 intel_pt_log("queue %u has no timestamp\n",
1415 if (state->timestamp)
1419 ptq->timestamp = state->timestamp;
1420 intel_pt_log("queue %u timestamp 0x%" PRIx64 "\n",
1421 queue_nr, ptq->timestamp);
1423 ptq->have_sample = true;
1424 if (ptq->sel_start && ptq->sel_timestamp &&
1425 ptq->timestamp < ptq->sel_timestamp)
1426 ptq->have_sample = false;
1427 intel_pt_sample_flags(ptq);
1428 ret = auxtrace_heap__add(&pt->heap, queue_nr, ptq->timestamp);
1431 ptq->on_heap = true;
1437 static int intel_pt_setup_queues(struct intel_pt *pt)
1442 for (i = 0; i < pt->queues.nr_queues; i++) {
1443 ret = intel_pt_setup_queue(pt, &pt->queues.queue_array[i], i);
1450 static inline bool intel_pt_skip_event(struct intel_pt *pt)
1452 return pt->synth_opts.initial_skip &&
1453 pt->num_events++ < pt->synth_opts.initial_skip;
1457 * Cannot count CBR as skipped because it won't go away until cbr == cbr_seen.
1458 * Also ensure CBR is first non-skipped event by allowing for 4 more samples
1459 * from this decoder state.
1461 static inline bool intel_pt_skip_cbr_event(struct intel_pt *pt)
1463 return pt->synth_opts.initial_skip &&
1464 pt->num_events + 4 < pt->synth_opts.initial_skip;
1467 static void intel_pt_prep_a_sample(struct intel_pt_queue *ptq,
1468 union perf_event *event,
1469 struct perf_sample *sample)
1471 event->sample.header.type = PERF_RECORD_SAMPLE;
1472 event->sample.header.size = sizeof(struct perf_event_header);
1474 sample->pid = ptq->pid;
1475 sample->tid = ptq->tid;
1476 sample->cpu = ptq->cpu;
1477 sample->insn_len = ptq->insn_len;
1478 memcpy(sample->insn, ptq->insn, INTEL_PT_INSN_BUF_SZ);
1481 static void intel_pt_prep_b_sample(struct intel_pt *pt,
1482 struct intel_pt_queue *ptq,
1483 union perf_event *event,
1484 struct perf_sample *sample)
1486 intel_pt_prep_a_sample(ptq, event, sample);
1488 if (!pt->timeless_decoding)
1489 sample->time = tsc_to_perf_time(ptq->timestamp, &pt->tc);
1491 sample->ip = ptq->state->from_ip;
1492 sample->addr = ptq->state->to_ip;
1493 sample->cpumode = intel_pt_cpumode(ptq, sample->ip, sample->addr);
1495 sample->flags = ptq->flags;
1497 event->sample.header.misc = sample->cpumode;
1500 static int intel_pt_inject_event(union perf_event *event,
1501 struct perf_sample *sample, u64 type)
1503 event->header.size = perf_event__sample_event_size(sample, type, 0);
1504 return perf_event__synthesize_sample(event, type, 0, sample);
1507 static inline int intel_pt_opt_inject(struct intel_pt *pt,
1508 union perf_event *event,
1509 struct perf_sample *sample, u64 type)
1511 if (!pt->synth_opts.inject)
1514 return intel_pt_inject_event(event, sample, type);
1517 static int intel_pt_deliver_synth_event(struct intel_pt *pt,
1518 union perf_event *event,
1519 struct perf_sample *sample, u64 type)
1523 ret = intel_pt_opt_inject(pt, event, sample, type);
1527 ret = perf_session__deliver_synth_event(pt->session, event, sample);
1529 pr_err("Intel PT: failed to deliver event, error %d\n", ret);
1534 static int intel_pt_synth_branch_sample(struct intel_pt_queue *ptq)
1536 struct intel_pt *pt = ptq->pt;
1537 union perf_event *event = ptq->event_buf;
1538 struct perf_sample sample = { .ip = 0, };
1539 struct dummy_branch_stack {
1542 struct branch_entry entries;
1545 if (pt->branches_filter && !(pt->branches_filter & ptq->flags))
1548 if (intel_pt_skip_event(pt))
1551 intel_pt_prep_b_sample(pt, ptq, event, &sample);
1553 sample.id = ptq->pt->branches_id;
1554 sample.stream_id = ptq->pt->branches_id;
1557 * perf report cannot handle events without a branch stack when using
1558 * SORT_MODE__BRANCH so make a dummy one.
1560 if (pt->synth_opts.last_branch && sort__mode == SORT_MODE__BRANCH) {
1561 dummy_bs = (struct dummy_branch_stack){
1569 sample.branch_stack = (struct branch_stack *)&dummy_bs;
1572 if (ptq->state->flags & INTEL_PT_SAMPLE_IPC)
1573 sample.cyc_cnt = ptq->ipc_cyc_cnt - ptq->last_br_cyc_cnt;
1574 if (sample.cyc_cnt) {
1575 sample.insn_cnt = ptq->ipc_insn_cnt - ptq->last_br_insn_cnt;
1576 ptq->last_br_insn_cnt = ptq->ipc_insn_cnt;
1577 ptq->last_br_cyc_cnt = ptq->ipc_cyc_cnt;
1580 return intel_pt_deliver_synth_event(pt, event, &sample,
1581 pt->branches_sample_type);
1584 static void intel_pt_prep_sample(struct intel_pt *pt,
1585 struct intel_pt_queue *ptq,
1586 union perf_event *event,
1587 struct perf_sample *sample)
1589 intel_pt_prep_b_sample(pt, ptq, event, sample);
1591 if (pt->synth_opts.callchain) {
1592 thread_stack__sample(ptq->thread, ptq->cpu, ptq->chain,
1593 pt->synth_opts.callchain_sz + 1,
1594 sample->ip, pt->kernel_start);
1595 sample->callchain = ptq->chain;
1598 if (pt->synth_opts.last_branch) {
1599 thread_stack__br_sample(ptq->thread, ptq->cpu, ptq->last_branch,
1601 sample->branch_stack = ptq->last_branch;
1605 static int intel_pt_synth_instruction_sample(struct intel_pt_queue *ptq)
1607 struct intel_pt *pt = ptq->pt;
1608 union perf_event *event = ptq->event_buf;
1609 struct perf_sample sample = { .ip = 0, };
1611 if (intel_pt_skip_event(pt))
1614 intel_pt_prep_sample(pt, ptq, event, &sample);
1616 sample.id = ptq->pt->instructions_id;
1617 sample.stream_id = ptq->pt->instructions_id;
1618 if (pt->synth_opts.quick)
1621 sample.period = ptq->state->tot_insn_cnt - ptq->last_insn_cnt;
1623 if (ptq->state->flags & INTEL_PT_SAMPLE_IPC)
1624 sample.cyc_cnt = ptq->ipc_cyc_cnt - ptq->last_in_cyc_cnt;
1625 if (sample.cyc_cnt) {
1626 sample.insn_cnt = ptq->ipc_insn_cnt - ptq->last_in_insn_cnt;
1627 ptq->last_in_insn_cnt = ptq->ipc_insn_cnt;
1628 ptq->last_in_cyc_cnt = ptq->ipc_cyc_cnt;
1631 ptq->last_insn_cnt = ptq->state->tot_insn_cnt;
1633 return intel_pt_deliver_synth_event(pt, event, &sample,
1634 pt->instructions_sample_type);
1637 static int intel_pt_synth_transaction_sample(struct intel_pt_queue *ptq)
1639 struct intel_pt *pt = ptq->pt;
1640 union perf_event *event = ptq->event_buf;
1641 struct perf_sample sample = { .ip = 0, };
1643 if (intel_pt_skip_event(pt))
1646 intel_pt_prep_sample(pt, ptq, event, &sample);
1648 sample.id = ptq->pt->transactions_id;
1649 sample.stream_id = ptq->pt->transactions_id;
1651 return intel_pt_deliver_synth_event(pt, event, &sample,
1652 pt->transactions_sample_type);
1655 static void intel_pt_prep_p_sample(struct intel_pt *pt,
1656 struct intel_pt_queue *ptq,
1657 union perf_event *event,
1658 struct perf_sample *sample)
1660 intel_pt_prep_sample(pt, ptq, event, sample);
1663 * Zero IP is used to mean "trace start" but that is not the case for
1664 * power or PTWRITE events with no IP, so clear the flags.
1670 static int intel_pt_synth_ptwrite_sample(struct intel_pt_queue *ptq)
1672 struct intel_pt *pt = ptq->pt;
1673 union perf_event *event = ptq->event_buf;
1674 struct perf_sample sample = { .ip = 0, };
1675 struct perf_synth_intel_ptwrite raw;
1677 if (intel_pt_skip_event(pt))
1680 intel_pt_prep_p_sample(pt, ptq, event, &sample);
1682 sample.id = ptq->pt->ptwrites_id;
1683 sample.stream_id = ptq->pt->ptwrites_id;
1686 raw.ip = !!(ptq->state->flags & INTEL_PT_FUP_IP);
1687 raw.payload = cpu_to_le64(ptq->state->ptw_payload);
1689 sample.raw_size = perf_synth__raw_size(raw);
1690 sample.raw_data = perf_synth__raw_data(&raw);
1692 return intel_pt_deliver_synth_event(pt, event, &sample,
1693 pt->ptwrites_sample_type);
1696 static int intel_pt_synth_cbr_sample(struct intel_pt_queue *ptq)
1698 struct intel_pt *pt = ptq->pt;
1699 union perf_event *event = ptq->event_buf;
1700 struct perf_sample sample = { .ip = 0, };
1701 struct perf_synth_intel_cbr raw;
1704 if (intel_pt_skip_cbr_event(pt))
1707 ptq->cbr_seen = ptq->state->cbr;
1709 intel_pt_prep_p_sample(pt, ptq, event, &sample);
1711 sample.id = ptq->pt->cbr_id;
1712 sample.stream_id = ptq->pt->cbr_id;
1714 flags = (u16)ptq->state->cbr_payload | (pt->max_non_turbo_ratio << 16);
1715 raw.flags = cpu_to_le32(flags);
1716 raw.freq = cpu_to_le32(raw.cbr * pt->cbr2khz);
1719 sample.raw_size = perf_synth__raw_size(raw);
1720 sample.raw_data = perf_synth__raw_data(&raw);
1722 return intel_pt_deliver_synth_event(pt, event, &sample,
1723 pt->pwr_events_sample_type);
1726 static int intel_pt_synth_psb_sample(struct intel_pt_queue *ptq)
1728 struct intel_pt *pt = ptq->pt;
1729 union perf_event *event = ptq->event_buf;
1730 struct perf_sample sample = { .ip = 0, };
1731 struct perf_synth_intel_psb raw;
1733 if (intel_pt_skip_event(pt))
1736 intel_pt_prep_p_sample(pt, ptq, event, &sample);
1738 sample.id = ptq->pt->psb_id;
1739 sample.stream_id = ptq->pt->psb_id;
1743 raw.offset = ptq->state->psb_offset;
1745 sample.raw_size = perf_synth__raw_size(raw);
1746 sample.raw_data = perf_synth__raw_data(&raw);
1748 return intel_pt_deliver_synth_event(pt, event, &sample,
1749 pt->pwr_events_sample_type);
1752 static int intel_pt_synth_mwait_sample(struct intel_pt_queue *ptq)
1754 struct intel_pt *pt = ptq->pt;
1755 union perf_event *event = ptq->event_buf;
1756 struct perf_sample sample = { .ip = 0, };
1757 struct perf_synth_intel_mwait raw;
1759 if (intel_pt_skip_event(pt))
1762 intel_pt_prep_p_sample(pt, ptq, event, &sample);
1764 sample.id = ptq->pt->mwait_id;
1765 sample.stream_id = ptq->pt->mwait_id;
1768 raw.payload = cpu_to_le64(ptq->state->mwait_payload);
1770 sample.raw_size = perf_synth__raw_size(raw);
1771 sample.raw_data = perf_synth__raw_data(&raw);
1773 return intel_pt_deliver_synth_event(pt, event, &sample,
1774 pt->pwr_events_sample_type);
1777 static int intel_pt_synth_pwre_sample(struct intel_pt_queue *ptq)
1779 struct intel_pt *pt = ptq->pt;
1780 union perf_event *event = ptq->event_buf;
1781 struct perf_sample sample = { .ip = 0, };
1782 struct perf_synth_intel_pwre raw;
1784 if (intel_pt_skip_event(pt))
1787 intel_pt_prep_p_sample(pt, ptq, event, &sample);
1789 sample.id = ptq->pt->pwre_id;
1790 sample.stream_id = ptq->pt->pwre_id;
1793 raw.payload = cpu_to_le64(ptq->state->pwre_payload);
1795 sample.raw_size = perf_synth__raw_size(raw);
1796 sample.raw_data = perf_synth__raw_data(&raw);
1798 return intel_pt_deliver_synth_event(pt, event, &sample,
1799 pt->pwr_events_sample_type);
1802 static int intel_pt_synth_exstop_sample(struct intel_pt_queue *ptq)
1804 struct intel_pt *pt = ptq->pt;
1805 union perf_event *event = ptq->event_buf;
1806 struct perf_sample sample = { .ip = 0, };
1807 struct perf_synth_intel_exstop raw;
1809 if (intel_pt_skip_event(pt))
1812 intel_pt_prep_p_sample(pt, ptq, event, &sample);
1814 sample.id = ptq->pt->exstop_id;
1815 sample.stream_id = ptq->pt->exstop_id;
1818 raw.ip = !!(ptq->state->flags & INTEL_PT_FUP_IP);
1820 sample.raw_size = perf_synth__raw_size(raw);
1821 sample.raw_data = perf_synth__raw_data(&raw);
1823 return intel_pt_deliver_synth_event(pt, event, &sample,
1824 pt->pwr_events_sample_type);
1827 static int intel_pt_synth_pwrx_sample(struct intel_pt_queue *ptq)
1829 struct intel_pt *pt = ptq->pt;
1830 union perf_event *event = ptq->event_buf;
1831 struct perf_sample sample = { .ip = 0, };
1832 struct perf_synth_intel_pwrx raw;
1834 if (intel_pt_skip_event(pt))
1837 intel_pt_prep_p_sample(pt, ptq, event, &sample);
1839 sample.id = ptq->pt->pwrx_id;
1840 sample.stream_id = ptq->pt->pwrx_id;
1843 raw.payload = cpu_to_le64(ptq->state->pwrx_payload);
1845 sample.raw_size = perf_synth__raw_size(raw);
1846 sample.raw_data = perf_synth__raw_data(&raw);
1848 return intel_pt_deliver_synth_event(pt, event, &sample,
1849 pt->pwr_events_sample_type);
1853 * PEBS gp_regs array indexes plus 1 so that 0 means not present. Refer
1854 * intel_pt_add_gp_regs().
1856 static const int pebs_gp_regs[] = {
1857 [PERF_REG_X86_FLAGS] = 1,
1858 [PERF_REG_X86_IP] = 2,
1859 [PERF_REG_X86_AX] = 3,
1860 [PERF_REG_X86_CX] = 4,
1861 [PERF_REG_X86_DX] = 5,
1862 [PERF_REG_X86_BX] = 6,
1863 [PERF_REG_X86_SP] = 7,
1864 [PERF_REG_X86_BP] = 8,
1865 [PERF_REG_X86_SI] = 9,
1866 [PERF_REG_X86_DI] = 10,
1867 [PERF_REG_X86_R8] = 11,
1868 [PERF_REG_X86_R9] = 12,
1869 [PERF_REG_X86_R10] = 13,
1870 [PERF_REG_X86_R11] = 14,
1871 [PERF_REG_X86_R12] = 15,
1872 [PERF_REG_X86_R13] = 16,
1873 [PERF_REG_X86_R14] = 17,
1874 [PERF_REG_X86_R15] = 18,
1877 static u64 *intel_pt_add_gp_regs(struct regs_dump *intr_regs, u64 *pos,
1878 const struct intel_pt_blk_items *items,
1881 const u64 *gp_regs = items->val[INTEL_PT_GP_REGS_POS];
1882 u32 mask = items->mask[INTEL_PT_GP_REGS_POS];
1886 for (i = 0, bit = 1; i < PERF_REG_X86_64_MAX; i++, bit <<= 1) {
1887 /* Get the PEBS gp_regs array index */
1888 int n = pebs_gp_regs[i] - 1;
1893 * Add only registers that were requested (i.e. 'regs_mask') and
1894 * that were provided (i.e. 'mask'), and update the resulting
1895 * mask (i.e. 'intr_regs->mask') accordingly.
1897 if (mask & 1 << n && regs_mask & bit) {
1898 intr_regs->mask |= bit;
1899 *pos++ = gp_regs[n];
1906 #ifndef PERF_REG_X86_XMM0
1907 #define PERF_REG_X86_XMM0 32
1910 static void intel_pt_add_xmm(struct regs_dump *intr_regs, u64 *pos,
1911 const struct intel_pt_blk_items *items,
1914 u32 mask = items->has_xmm & (regs_mask >> PERF_REG_X86_XMM0);
1915 const u64 *xmm = items->xmm;
1918 * If there are any XMM registers, then there should be all of them.
1919 * Nevertheless, follow the logic to add only registers that were
1920 * requested (i.e. 'regs_mask') and that were provided (i.e. 'mask'),
1921 * and update the resulting mask (i.e. 'intr_regs->mask') accordingly.
1923 intr_regs->mask |= (u64)mask << PERF_REG_X86_XMM0;
1925 for (; mask; mask >>= 1, xmm++) {
1931 #define LBR_INFO_MISPRED (1ULL << 63)
1932 #define LBR_INFO_IN_TX (1ULL << 62)
1933 #define LBR_INFO_ABORT (1ULL << 61)
1934 #define LBR_INFO_CYCLES 0xffff
1936 /* Refer kernel's intel_pmu_store_pebs_lbrs() */
1937 static u64 intel_pt_lbr_flags(u64 info)
1940 struct branch_flags flags;
1945 u.flags.mispred = !!(info & LBR_INFO_MISPRED);
1946 u.flags.predicted = !(info & LBR_INFO_MISPRED);
1947 u.flags.in_tx = !!(info & LBR_INFO_IN_TX);
1948 u.flags.abort = !!(info & LBR_INFO_ABORT);
1949 u.flags.cycles = info & LBR_INFO_CYCLES;
1954 static void intel_pt_add_lbrs(struct branch_stack *br_stack,
1955 const struct intel_pt_blk_items *items)
1962 to = &br_stack->entries[0].from;
1964 for (i = INTEL_PT_LBR_0_POS; i <= INTEL_PT_LBR_2_POS; i++) {
1965 u32 mask = items->mask[i];
1966 const u64 *from = items->val[i];
1968 for (; mask; mask >>= 3, from += 3) {
1969 if ((mask & 7) == 7) {
1972 *to++ = intel_pt_lbr_flags(from[2]);
1979 static int intel_pt_synth_pebs_sample(struct intel_pt_queue *ptq)
1981 const struct intel_pt_blk_items *items = &ptq->state->items;
1982 struct perf_sample sample = { .ip = 0, };
1983 union perf_event *event = ptq->event_buf;
1984 struct intel_pt *pt = ptq->pt;
1985 struct evsel *evsel = pt->pebs_evsel;
1986 u64 sample_type = evsel->core.attr.sample_type;
1987 u64 id = evsel->core.id[0];
1989 u64 regs[8 * sizeof(sample.intr_regs.mask)];
1991 if (intel_pt_skip_event(pt))
1994 intel_pt_prep_a_sample(ptq, event, &sample);
1997 sample.stream_id = id;
1999 if (!evsel->core.attr.freq)
2000 sample.period = evsel->core.attr.sample_period;
2002 /* No support for non-zero CS base */
2004 sample.ip = items->ip;
2005 else if (items->has_rip)
2006 sample.ip = items->rip;
2008 sample.ip = ptq->state->from_ip;
2010 cpumode = intel_pt_cpumode(ptq, sample.ip, 0);
2012 event->sample.header.misc = cpumode | PERF_RECORD_MISC_EXACT_IP;
2014 sample.cpumode = cpumode;
2016 if (sample_type & PERF_SAMPLE_TIME) {
2019 if (items->has_timestamp)
2020 timestamp = items->timestamp;
2021 else if (!pt->timeless_decoding)
2022 timestamp = ptq->timestamp;
2024 sample.time = tsc_to_perf_time(timestamp, &pt->tc);
2027 if (sample_type & PERF_SAMPLE_CALLCHAIN &&
2028 pt->synth_opts.callchain) {
2029 thread_stack__sample(ptq->thread, ptq->cpu, ptq->chain,
2030 pt->synth_opts.callchain_sz, sample.ip,
2032 sample.callchain = ptq->chain;
2035 if (sample_type & PERF_SAMPLE_REGS_INTR &&
2036 (items->mask[INTEL_PT_GP_REGS_POS] ||
2037 items->mask[INTEL_PT_XMM_POS])) {
2038 u64 regs_mask = evsel->core.attr.sample_regs_intr;
2041 sample.intr_regs.abi = items->is_32_bit ?
2042 PERF_SAMPLE_REGS_ABI_32 :
2043 PERF_SAMPLE_REGS_ABI_64;
2044 sample.intr_regs.regs = regs;
2046 pos = intel_pt_add_gp_regs(&sample.intr_regs, regs, items, regs_mask);
2048 intel_pt_add_xmm(&sample.intr_regs, pos, items, regs_mask);
2051 if (sample_type & PERF_SAMPLE_BRANCH_STACK) {
2052 if (items->mask[INTEL_PT_LBR_0_POS] ||
2053 items->mask[INTEL_PT_LBR_1_POS] ||
2054 items->mask[INTEL_PT_LBR_2_POS]) {
2055 intel_pt_add_lbrs(ptq->last_branch, items);
2056 } else if (pt->synth_opts.last_branch) {
2057 thread_stack__br_sample(ptq->thread, ptq->cpu,
2061 ptq->last_branch->nr = 0;
2063 sample.branch_stack = ptq->last_branch;
2066 if (sample_type & PERF_SAMPLE_ADDR && items->has_mem_access_address)
2067 sample.addr = items->mem_access_address;
2069 if (sample_type & PERF_SAMPLE_WEIGHT_TYPE) {
2071 * Refer kernel's setup_pebs_adaptive_sample_data() and
2072 * intel_hsw_weight().
2074 if (items->has_mem_access_latency) {
2075 u64 weight = items->mem_access_latency >> 32;
2078 * Starts from SPR, the mem access latency field
2079 * contains both cache latency [47:32] and instruction
2080 * latency [15:0]. The cache latency is the same as the
2081 * mem access latency on previous platforms.
2083 * In practice, no memory access could last than 4G
2084 * cycles. Use latency >> 32 to distinguish the
2085 * different format of the mem access latency field.
2088 sample.weight = weight & 0xffff;
2089 sample.ins_lat = items->mem_access_latency & 0xffff;
2091 sample.weight = items->mem_access_latency;
2093 if (!sample.weight && items->has_tsx_aux_info) {
2094 /* Cycles last block */
2095 sample.weight = (u32)items->tsx_aux_info;
2099 if (sample_type & PERF_SAMPLE_TRANSACTION && items->has_tsx_aux_info) {
2100 u64 ax = items->has_rax ? items->rax : 0;
2101 /* Refer kernel's intel_hsw_transaction() */
2102 u64 txn = (u8)(items->tsx_aux_info >> 32);
2104 /* For RTM XABORTs also log the abort code from AX */
2105 if (txn & PERF_TXN_TRANSACTION && ax & 1)
2106 txn |= ((ax >> 24) & 0xff) << PERF_TXN_ABORT_SHIFT;
2107 sample.transaction = txn;
2110 return intel_pt_deliver_synth_event(pt, event, &sample, sample_type);
2113 static int intel_pt_synth_error(struct intel_pt *pt, int code, int cpu,
2114 pid_t pid, pid_t tid, u64 ip, u64 timestamp)
2116 union perf_event event;
2117 char msg[MAX_AUXTRACE_ERROR_MSG];
2120 if (pt->synth_opts.error_minus_flags) {
2121 if (code == INTEL_PT_ERR_OVR &&
2122 pt->synth_opts.error_minus_flags & AUXTRACE_ERR_FLG_OVERFLOW)
2124 if (code == INTEL_PT_ERR_LOST &&
2125 pt->synth_opts.error_minus_flags & AUXTRACE_ERR_FLG_DATA_LOST)
2129 intel_pt__strerror(code, msg, MAX_AUXTRACE_ERROR_MSG);
2131 auxtrace_synth_error(&event.auxtrace_error, PERF_AUXTRACE_ERROR_ITRACE,
2132 code, cpu, pid, tid, ip, msg, timestamp);
2134 err = perf_session__deliver_synth_event(pt->session, &event, NULL);
2136 pr_err("Intel Processor Trace: failed to deliver error event, error %d\n",
2142 static int intel_ptq_synth_error(struct intel_pt_queue *ptq,
2143 const struct intel_pt_state *state)
2145 struct intel_pt *pt = ptq->pt;
2146 u64 tm = ptq->timestamp;
2148 tm = pt->timeless_decoding ? 0 : tsc_to_perf_time(tm, &pt->tc);
2150 return intel_pt_synth_error(pt, state->err, ptq->cpu, ptq->pid,
2151 ptq->tid, state->from_ip, tm);
2154 static int intel_pt_next_tid(struct intel_pt *pt, struct intel_pt_queue *ptq)
2156 struct auxtrace_queue *queue;
2157 pid_t tid = ptq->next_tid;
2163 intel_pt_log("switch: cpu %d tid %d\n", ptq->cpu, tid);
2165 err = machine__set_current_tid(pt->machine, ptq->cpu, -1, tid);
2167 queue = &pt->queues.queue_array[ptq->queue_nr];
2168 intel_pt_set_pid_tid_cpu(pt, queue);
2175 static inline bool intel_pt_is_switch_ip(struct intel_pt_queue *ptq, u64 ip)
2177 struct intel_pt *pt = ptq->pt;
2179 return ip == pt->switch_ip &&
2180 (ptq->flags & PERF_IP_FLAG_BRANCH) &&
2181 !(ptq->flags & (PERF_IP_FLAG_CONDITIONAL | PERF_IP_FLAG_ASYNC |
2182 PERF_IP_FLAG_INTERRUPT | PERF_IP_FLAG_TX_ABORT));
2185 #define INTEL_PT_PWR_EVT (INTEL_PT_MWAIT_OP | INTEL_PT_PWR_ENTRY | \
2186 INTEL_PT_EX_STOP | INTEL_PT_PWR_EXIT)
2188 static int intel_pt_sample(struct intel_pt_queue *ptq)
2190 const struct intel_pt_state *state = ptq->state;
2191 struct intel_pt *pt = ptq->pt;
2194 if (!ptq->have_sample)
2197 ptq->have_sample = false;
2199 ptq->ipc_insn_cnt = ptq->state->tot_insn_cnt;
2200 ptq->ipc_cyc_cnt = ptq->state->tot_cyc_cnt;
2203 * Do PEBS first to allow for the possibility that the PEBS timestamp
2204 * precedes the current timestamp.
2206 if (pt->sample_pebs && state->type & INTEL_PT_BLK_ITEMS) {
2207 err = intel_pt_synth_pebs_sample(ptq);
2212 if (pt->sample_pwr_events) {
2213 if (state->type & INTEL_PT_PSB_EVT) {
2214 err = intel_pt_synth_psb_sample(ptq);
2218 if (ptq->state->cbr != ptq->cbr_seen) {
2219 err = intel_pt_synth_cbr_sample(ptq);
2223 if (state->type & INTEL_PT_PWR_EVT) {
2224 if (state->type & INTEL_PT_MWAIT_OP) {
2225 err = intel_pt_synth_mwait_sample(ptq);
2229 if (state->type & INTEL_PT_PWR_ENTRY) {
2230 err = intel_pt_synth_pwre_sample(ptq);
2234 if (state->type & INTEL_PT_EX_STOP) {
2235 err = intel_pt_synth_exstop_sample(ptq);
2239 if (state->type & INTEL_PT_PWR_EXIT) {
2240 err = intel_pt_synth_pwrx_sample(ptq);
2247 if (pt->sample_instructions && (state->type & INTEL_PT_INSTRUCTION)) {
2248 err = intel_pt_synth_instruction_sample(ptq);
2253 if (pt->sample_transactions && (state->type & INTEL_PT_TRANSACTION)) {
2254 err = intel_pt_synth_transaction_sample(ptq);
2259 if (pt->sample_ptwrites && (state->type & INTEL_PT_PTW)) {
2260 err = intel_pt_synth_ptwrite_sample(ptq);
2265 if (!(state->type & INTEL_PT_BRANCH))
2268 if (pt->use_thread_stack) {
2269 thread_stack__event(ptq->thread, ptq->cpu, ptq->flags,
2270 state->from_ip, state->to_ip, ptq->insn_len,
2271 state->trace_nr, pt->callstack,
2272 pt->br_stack_sz_plus,
2275 thread_stack__set_trace_nr(ptq->thread, ptq->cpu, state->trace_nr);
2278 if (pt->sample_branches) {
2279 if (state->from_nr != state->to_nr &&
2280 state->from_ip && state->to_ip) {
2281 struct intel_pt_state *st = (struct intel_pt_state *)state;
2282 u64 to_ip = st->to_ip;
2283 u64 from_ip = st->from_ip;
2286 * perf cannot handle having different machines for ip
2287 * and addr, so create 2 branches.
2290 err = intel_pt_synth_branch_sample(ptq);
2295 err = intel_pt_synth_branch_sample(ptq);
2296 st->from_ip = from_ip;
2298 err = intel_pt_synth_branch_sample(ptq);
2304 if (!ptq->sync_switch)
2307 if (intel_pt_is_switch_ip(ptq, state->to_ip)) {
2308 switch (ptq->switch_state) {
2309 case INTEL_PT_SS_NOT_TRACING:
2310 case INTEL_PT_SS_UNKNOWN:
2311 case INTEL_PT_SS_EXPECTING_SWITCH_IP:
2312 err = intel_pt_next_tid(pt, ptq);
2315 ptq->switch_state = INTEL_PT_SS_TRACING;
2318 ptq->switch_state = INTEL_PT_SS_EXPECTING_SWITCH_EVENT;
2321 } else if (!state->to_ip) {
2322 ptq->switch_state = INTEL_PT_SS_NOT_TRACING;
2323 } else if (ptq->switch_state == INTEL_PT_SS_NOT_TRACING) {
2324 ptq->switch_state = INTEL_PT_SS_UNKNOWN;
2325 } else if (ptq->switch_state == INTEL_PT_SS_UNKNOWN &&
2326 state->to_ip == pt->ptss_ip &&
2327 (ptq->flags & PERF_IP_FLAG_CALL)) {
2328 ptq->switch_state = INTEL_PT_SS_TRACING;
2334 static u64 intel_pt_switch_ip(struct intel_pt *pt, u64 *ptss_ip)
2336 struct machine *machine = pt->machine;
2338 struct symbol *sym, *start;
2339 u64 ip, switch_ip = 0;
2345 map = machine__kernel_map(machine);
2352 start = dso__first_symbol(map->dso);
2354 for (sym = start; sym; sym = dso__next_symbol(sym)) {
2355 if (sym->binding == STB_GLOBAL &&
2356 !strcmp(sym->name, "__switch_to")) {
2357 ip = map->unmap_ip(map, sym->start);
2358 if (ip >= map->start && ip < map->end) {
2365 if (!switch_ip || !ptss_ip)
2368 if (pt->have_sched_switch == 1)
2369 ptss = "perf_trace_sched_switch";
2371 ptss = "__perf_event_task_sched_out";
2373 for (sym = start; sym; sym = dso__next_symbol(sym)) {
2374 if (!strcmp(sym->name, ptss)) {
2375 ip = map->unmap_ip(map, sym->start);
2376 if (ip >= map->start && ip < map->end) {
2386 static void intel_pt_enable_sync_switch(struct intel_pt *pt)
2390 pt->sync_switch = true;
2392 for (i = 0; i < pt->queues.nr_queues; i++) {
2393 struct auxtrace_queue *queue = &pt->queues.queue_array[i];
2394 struct intel_pt_queue *ptq = queue->priv;
2397 ptq->sync_switch = true;
2402 * To filter against time ranges, it is only necessary to look at the next start
2405 static bool intel_pt_next_time(struct intel_pt_queue *ptq)
2407 struct intel_pt *pt = ptq->pt;
2409 if (ptq->sel_start) {
2410 /* Next time is an end time */
2411 ptq->sel_start = false;
2412 ptq->sel_timestamp = pt->time_ranges[ptq->sel_idx].end;
2414 } else if (ptq->sel_idx + 1 < pt->range_cnt) {
2415 /* Next time is a start time */
2416 ptq->sel_start = true;
2418 ptq->sel_timestamp = pt->time_ranges[ptq->sel_idx].start;
2426 static int intel_pt_time_filter(struct intel_pt_queue *ptq, u64 *ff_timestamp)
2431 if (ptq->sel_start) {
2432 if (ptq->timestamp >= ptq->sel_timestamp) {
2433 /* After start time, so consider next time */
2434 intel_pt_next_time(ptq);
2435 if (!ptq->sel_timestamp) {
2439 /* Check against end time */
2442 /* Before start time, so fast forward */
2443 ptq->have_sample = false;
2444 if (ptq->sel_timestamp > *ff_timestamp) {
2445 if (ptq->sync_switch) {
2446 intel_pt_next_tid(ptq->pt, ptq);
2447 ptq->switch_state = INTEL_PT_SS_UNKNOWN;
2449 *ff_timestamp = ptq->sel_timestamp;
2450 err = intel_pt_fast_forward(ptq->decoder,
2451 ptq->sel_timestamp);
2456 } else if (ptq->timestamp > ptq->sel_timestamp) {
2457 /* After end time, so consider next time */
2458 if (!intel_pt_next_time(ptq)) {
2459 /* No next time range, so stop decoding */
2460 ptq->have_sample = false;
2461 ptq->switch_state = INTEL_PT_SS_NOT_TRACING;
2464 /* Check against next start time */
2467 /* Before end time */
2473 static int intel_pt_run_decoder(struct intel_pt_queue *ptq, u64 *timestamp)
2475 const struct intel_pt_state *state = ptq->state;
2476 struct intel_pt *pt = ptq->pt;
2477 u64 ff_timestamp = 0;
2480 if (!pt->kernel_start) {
2481 pt->kernel_start = machine__kernel_start(pt->machine);
2482 if (pt->per_cpu_mmaps &&
2483 (pt->have_sched_switch == 1 || pt->have_sched_switch == 3) &&
2484 !pt->timeless_decoding && intel_pt_tracing_kernel(pt) &&
2485 !pt->sampling_mode && !pt->synth_opts.vm_time_correlation) {
2486 pt->switch_ip = intel_pt_switch_ip(pt, &pt->ptss_ip);
2487 if (pt->switch_ip) {
2488 intel_pt_log("switch_ip: %"PRIx64" ptss_ip: %"PRIx64"\n",
2489 pt->switch_ip, pt->ptss_ip);
2490 intel_pt_enable_sync_switch(pt);
2495 intel_pt_log("queue %u decoding cpu %d pid %d tid %d\n",
2496 ptq->queue_nr, ptq->cpu, ptq->pid, ptq->tid);
2498 err = intel_pt_sample(ptq);
2502 state = intel_pt_decode(ptq->decoder);
2504 if (state->err == INTEL_PT_ERR_NODATA)
2506 if (ptq->sync_switch &&
2507 state->from_ip >= pt->kernel_start) {
2508 ptq->sync_switch = false;
2509 intel_pt_next_tid(pt, ptq);
2511 if (pt->synth_opts.errors) {
2512 err = intel_ptq_synth_error(ptq, state);
2520 ptq->have_sample = true;
2521 intel_pt_sample_flags(ptq);
2523 /* Use estimated TSC upon return to user space */
2525 (state->from_ip >= pt->kernel_start || !state->from_ip) &&
2526 state->to_ip && state->to_ip < pt->kernel_start) {
2527 intel_pt_log("TSC %"PRIx64" est. TSC %"PRIx64"\n",
2528 state->timestamp, state->est_timestamp);
2529 ptq->timestamp = state->est_timestamp;
2530 /* Use estimated TSC in unknown switch state */
2531 } else if (ptq->sync_switch &&
2532 ptq->switch_state == INTEL_PT_SS_UNKNOWN &&
2533 intel_pt_is_switch_ip(ptq, state->to_ip) &&
2534 ptq->next_tid == -1) {
2535 intel_pt_log("TSC %"PRIx64" est. TSC %"PRIx64"\n",
2536 state->timestamp, state->est_timestamp);
2537 ptq->timestamp = state->est_timestamp;
2538 } else if (state->timestamp > ptq->timestamp) {
2539 ptq->timestamp = state->timestamp;
2542 if (ptq->sel_timestamp) {
2543 err = intel_pt_time_filter(ptq, &ff_timestamp);
2548 if (!pt->timeless_decoding && ptq->timestamp >= *timestamp) {
2549 *timestamp = ptq->timestamp;
2556 static inline int intel_pt_update_queues(struct intel_pt *pt)
2558 if (pt->queues.new_data) {
2559 pt->queues.new_data = false;
2560 return intel_pt_setup_queues(pt);
2565 static int intel_pt_process_queues(struct intel_pt *pt, u64 timestamp)
2567 unsigned int queue_nr;
2572 struct auxtrace_queue *queue;
2573 struct intel_pt_queue *ptq;
2575 if (!pt->heap.heap_cnt)
2578 if (pt->heap.heap_array[0].ordinal >= timestamp)
2581 queue_nr = pt->heap.heap_array[0].queue_nr;
2582 queue = &pt->queues.queue_array[queue_nr];
2585 intel_pt_log("queue %u processing 0x%" PRIx64 " to 0x%" PRIx64 "\n",
2586 queue_nr, pt->heap.heap_array[0].ordinal,
2589 auxtrace_heap__pop(&pt->heap);
2591 if (pt->heap.heap_cnt) {
2592 ts = pt->heap.heap_array[0].ordinal + 1;
2599 intel_pt_set_pid_tid_cpu(pt, queue);
2601 ret = intel_pt_run_decoder(ptq, &ts);
2604 auxtrace_heap__add(&pt->heap, queue_nr, ts);
2609 ret = auxtrace_heap__add(&pt->heap, queue_nr, ts);
2613 ptq->on_heap = false;
2620 static int intel_pt_process_timeless_queues(struct intel_pt *pt, pid_t tid,
2623 struct auxtrace_queues *queues = &pt->queues;
2627 for (i = 0; i < queues->nr_queues; i++) {
2628 struct auxtrace_queue *queue = &pt->queues.queue_array[i];
2629 struct intel_pt_queue *ptq = queue->priv;
2631 if (ptq && (tid == -1 || ptq->tid == tid)) {
2633 intel_pt_set_pid_tid_cpu(pt, queue);
2634 intel_pt_run_decoder(ptq, &ts);
2640 static void intel_pt_sample_set_pid_tid_cpu(struct intel_pt_queue *ptq,
2641 struct auxtrace_queue *queue,
2642 struct perf_sample *sample)
2644 struct machine *m = ptq->pt->machine;
2646 ptq->pid = sample->pid;
2647 ptq->tid = sample->tid;
2648 ptq->cpu = queue->cpu;
2650 intel_pt_log("queue %u cpu %d pid %d tid %d\n",
2651 ptq->queue_nr, ptq->cpu, ptq->pid, ptq->tid);
2653 thread__zput(ptq->thread);
2658 if (ptq->pid == -1) {
2659 ptq->thread = machine__find_thread(m, -1, ptq->tid);
2661 ptq->pid = ptq->thread->pid_;
2665 ptq->thread = machine__findnew_thread(m, ptq->pid, ptq->tid);
2668 static int intel_pt_process_timeless_sample(struct intel_pt *pt,
2669 struct perf_sample *sample)
2671 struct auxtrace_queue *queue;
2672 struct intel_pt_queue *ptq;
2675 queue = auxtrace_queues__sample_queue(&pt->queues, sample, pt->session);
2684 ptq->time = sample->time;
2685 intel_pt_sample_set_pid_tid_cpu(ptq, queue, sample);
2686 intel_pt_run_decoder(ptq, &ts);
2690 static int intel_pt_lost(struct intel_pt *pt, struct perf_sample *sample)
2692 return intel_pt_synth_error(pt, INTEL_PT_ERR_LOST, sample->cpu,
2693 sample->pid, sample->tid, 0, sample->time);
2696 static struct intel_pt_queue *intel_pt_cpu_to_ptq(struct intel_pt *pt, int cpu)
2700 if (cpu < 0 || !pt->queues.nr_queues)
2703 if ((unsigned)cpu >= pt->queues.nr_queues)
2704 i = pt->queues.nr_queues - 1;
2708 if (pt->queues.queue_array[i].cpu == cpu)
2709 return pt->queues.queue_array[i].priv;
2711 for (j = 0; i > 0; j++) {
2712 if (pt->queues.queue_array[--i].cpu == cpu)
2713 return pt->queues.queue_array[i].priv;
2716 for (; j < pt->queues.nr_queues; j++) {
2717 if (pt->queues.queue_array[j].cpu == cpu)
2718 return pt->queues.queue_array[j].priv;
2724 static int intel_pt_sync_switch(struct intel_pt *pt, int cpu, pid_t tid,
2727 struct intel_pt_queue *ptq;
2730 if (!pt->sync_switch)
2733 ptq = intel_pt_cpu_to_ptq(pt, cpu);
2734 if (!ptq || !ptq->sync_switch)
2737 switch (ptq->switch_state) {
2738 case INTEL_PT_SS_NOT_TRACING:
2740 case INTEL_PT_SS_UNKNOWN:
2741 case INTEL_PT_SS_TRACING:
2742 ptq->next_tid = tid;
2743 ptq->switch_state = INTEL_PT_SS_EXPECTING_SWITCH_IP;
2745 case INTEL_PT_SS_EXPECTING_SWITCH_EVENT:
2746 if (!ptq->on_heap) {
2747 ptq->timestamp = perf_time_to_tsc(timestamp,
2749 err = auxtrace_heap__add(&pt->heap, ptq->queue_nr,
2753 ptq->on_heap = true;
2755 ptq->switch_state = INTEL_PT_SS_TRACING;
2757 case INTEL_PT_SS_EXPECTING_SWITCH_IP:
2758 intel_pt_log("ERROR: cpu %d expecting switch ip\n", cpu);
2769 static int intel_pt_process_switch(struct intel_pt *pt,
2770 struct perf_sample *sample)
2774 struct evsel *evsel = evlist__id2evsel(pt->session->evlist, sample->id);
2776 if (evsel != pt->switch_evsel)
2779 tid = evsel__intval(evsel, sample, "next_pid");
2782 intel_pt_log("sched_switch: cpu %d tid %d time %"PRIu64" tsc %#"PRIx64"\n",
2783 cpu, tid, sample->time, perf_time_to_tsc(sample->time,
2786 ret = intel_pt_sync_switch(pt, cpu, tid, sample->time);
2790 return machine__set_current_tid(pt->machine, cpu, -1, tid);
2793 static int intel_pt_context_switch_in(struct intel_pt *pt,
2794 struct perf_sample *sample)
2796 pid_t pid = sample->pid;
2797 pid_t tid = sample->tid;
2798 int cpu = sample->cpu;
2800 if (pt->sync_switch) {
2801 struct intel_pt_queue *ptq;
2803 ptq = intel_pt_cpu_to_ptq(pt, cpu);
2804 if (ptq && ptq->sync_switch) {
2806 switch (ptq->switch_state) {
2807 case INTEL_PT_SS_NOT_TRACING:
2808 case INTEL_PT_SS_UNKNOWN:
2809 case INTEL_PT_SS_TRACING:
2811 case INTEL_PT_SS_EXPECTING_SWITCH_EVENT:
2812 case INTEL_PT_SS_EXPECTING_SWITCH_IP:
2813 ptq->switch_state = INTEL_PT_SS_TRACING;
2822 * If the current tid has not been updated yet, ensure it is now that
2823 * a "switch in" event has occurred.
2825 if (machine__get_current_tid(pt->machine, cpu) == tid)
2828 return machine__set_current_tid(pt->machine, cpu, pid, tid);
2831 static int intel_pt_context_switch(struct intel_pt *pt, union perf_event *event,
2832 struct perf_sample *sample)
2834 bool out = event->header.misc & PERF_RECORD_MISC_SWITCH_OUT;
2840 if (pt->have_sched_switch == 3) {
2842 return intel_pt_context_switch_in(pt, sample);
2843 if (event->header.type != PERF_RECORD_SWITCH_CPU_WIDE) {
2844 pr_err("Expecting CPU-wide context switch event\n");
2847 pid = event->context_switch.next_prev_pid;
2848 tid = event->context_switch.next_prev_tid;
2857 intel_pt_log("context_switch event has no tid\n");
2859 ret = intel_pt_sync_switch(pt, cpu, tid, sample->time);
2863 return machine__set_current_tid(pt->machine, cpu, pid, tid);
2866 static int intel_pt_process_itrace_start(struct intel_pt *pt,
2867 union perf_event *event,
2868 struct perf_sample *sample)
2870 if (!pt->per_cpu_mmaps)
2873 intel_pt_log("itrace_start: cpu %d pid %d tid %d time %"PRIu64" tsc %#"PRIx64"\n",
2874 sample->cpu, event->itrace_start.pid,
2875 event->itrace_start.tid, sample->time,
2876 perf_time_to_tsc(sample->time, &pt->tc));
2878 return machine__set_current_tid(pt->machine, sample->cpu,
2879 event->itrace_start.pid,
2880 event->itrace_start.tid);
2883 static int intel_pt_find_map(struct thread *thread, u8 cpumode, u64 addr,
2884 struct addr_location *al)
2886 if (!al->map || addr < al->map->start || addr >= al->map->end) {
2887 if (!thread__find_map(thread, cpumode, addr, al))
2894 /* Invalidate all instruction cache entries that overlap the text poke */
2895 static int intel_pt_text_poke(struct intel_pt *pt, union perf_event *event)
2897 u8 cpumode = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK;
2898 u64 addr = event->text_poke.addr + event->text_poke.new_len - 1;
2899 /* Assume text poke begins in a basic block no more than 4096 bytes */
2900 int cnt = 4096 + event->text_poke.new_len;
2901 struct thread *thread = pt->unknown_thread;
2902 struct addr_location al = { .map = NULL };
2903 struct machine *machine = pt->machine;
2904 struct intel_pt_cache_entry *e;
2907 if (!event->text_poke.new_len)
2910 for (; cnt; cnt--, addr--) {
2911 if (intel_pt_find_map(thread, cpumode, addr, &al)) {
2912 if (addr < event->text_poke.addr)
2917 if (!al.map->dso || !al.map->dso->auxtrace_cache)
2920 offset = al.map->map_ip(al.map, addr);
2922 e = intel_pt_cache_lookup(al.map->dso, machine, offset);
2926 if (addr + e->byte_cnt + e->length <= event->text_poke.addr) {
2928 * No overlap. Working backwards there cannot be another
2929 * basic block that overlaps the text poke if there is a
2930 * branch instruction before the text poke address.
2932 if (e->branch != INTEL_PT_BR_NO_BRANCH)
2935 intel_pt_cache_invalidate(al.map->dso, machine, offset);
2936 intel_pt_log("Invalidated instruction cache for %s at %#"PRIx64"\n",
2937 al.map->dso->long_name, addr);
2944 static int intel_pt_process_event(struct perf_session *session,
2945 union perf_event *event,
2946 struct perf_sample *sample,
2947 struct perf_tool *tool)
2949 struct intel_pt *pt = container_of(session->auxtrace, struct intel_pt,
2957 if (!tool->ordered_events) {
2958 pr_err("Intel Processor Trace requires ordered events\n");
2962 if (sample->time && sample->time != (u64)-1)
2963 timestamp = perf_time_to_tsc(sample->time, &pt->tc);
2967 if (timestamp || pt->timeless_decoding) {
2968 err = intel_pt_update_queues(pt);
2973 if (pt->timeless_decoding) {
2974 if (pt->sampling_mode) {
2975 if (sample->aux_sample.size)
2976 err = intel_pt_process_timeless_sample(pt,
2978 } else if (event->header.type == PERF_RECORD_EXIT) {
2979 err = intel_pt_process_timeless_queues(pt,
2983 } else if (timestamp) {
2984 if (!pt->first_timestamp)
2985 intel_pt_first_timestamp(pt, timestamp);
2986 err = intel_pt_process_queues(pt, timestamp);
2991 if (event->header.type == PERF_RECORD_SAMPLE) {
2992 if (pt->synth_opts.add_callchain && !sample->callchain)
2993 intel_pt_add_callchain(pt, sample);
2994 if (pt->synth_opts.add_last_branch && !sample->branch_stack)
2995 intel_pt_add_br_stack(pt, sample);
2998 if (event->header.type == PERF_RECORD_AUX &&
2999 (event->aux.flags & PERF_AUX_FLAG_TRUNCATED) &&
3000 pt->synth_opts.errors) {
3001 err = intel_pt_lost(pt, sample);
3006 if (pt->switch_evsel && event->header.type == PERF_RECORD_SAMPLE)
3007 err = intel_pt_process_switch(pt, sample);
3008 else if (event->header.type == PERF_RECORD_ITRACE_START)
3009 err = intel_pt_process_itrace_start(pt, event, sample);
3010 else if (event->header.type == PERF_RECORD_SWITCH ||
3011 event->header.type == PERF_RECORD_SWITCH_CPU_WIDE)
3012 err = intel_pt_context_switch(pt, event, sample);
3014 if (!err && event->header.type == PERF_RECORD_TEXT_POKE)
3015 err = intel_pt_text_poke(pt, event);
3017 if (intel_pt_enable_logging && intel_pt_log_events(pt, sample->time)) {
3018 intel_pt_log("event %u: cpu %d time %"PRIu64" tsc %#"PRIx64" ",
3019 event->header.type, sample->cpu, sample->time, timestamp);
3020 intel_pt_log_event(event);
3026 static int intel_pt_flush(struct perf_session *session, struct perf_tool *tool)
3028 struct intel_pt *pt = container_of(session->auxtrace, struct intel_pt,
3035 if (!tool->ordered_events)
3038 ret = intel_pt_update_queues(pt);
3042 if (pt->timeless_decoding)
3043 return intel_pt_process_timeless_queues(pt, -1,
3046 return intel_pt_process_queues(pt, MAX_TIMESTAMP);
3049 static void intel_pt_free_events(struct perf_session *session)
3051 struct intel_pt *pt = container_of(session->auxtrace, struct intel_pt,
3053 struct auxtrace_queues *queues = &pt->queues;
3056 for (i = 0; i < queues->nr_queues; i++) {
3057 intel_pt_free_queue(queues->queue_array[i].priv);
3058 queues->queue_array[i].priv = NULL;
3060 intel_pt_log_disable();
3061 auxtrace_queues__free(queues);
3064 static void intel_pt_free(struct perf_session *session)
3066 struct intel_pt *pt = container_of(session->auxtrace, struct intel_pt,
3069 auxtrace_heap__free(&pt->heap);
3070 intel_pt_free_events(session);
3071 session->auxtrace = NULL;
3072 intel_pt_free_vmcs_info(pt);
3073 thread__put(pt->unknown_thread);
3074 addr_filters__exit(&pt->filts);
3077 zfree(&pt->time_ranges);
3081 static bool intel_pt_evsel_is_auxtrace(struct perf_session *session,
3082 struct evsel *evsel)
3084 struct intel_pt *pt = container_of(session->auxtrace, struct intel_pt,
3087 return evsel->core.attr.type == pt->pmu_type;
3090 static int intel_pt_process_auxtrace_event(struct perf_session *session,
3091 union perf_event *event,
3092 struct perf_tool *tool __maybe_unused)
3094 struct intel_pt *pt = container_of(session->auxtrace, struct intel_pt,
3097 if (!pt->data_queued) {
3098 struct auxtrace_buffer *buffer;
3100 int fd = perf_data__fd(session->data);
3103 if (perf_data__is_pipe(session->data)) {
3106 data_offset = lseek(fd, 0, SEEK_CUR);
3107 if (data_offset == -1)
3111 err = auxtrace_queues__add_event(&pt->queues, session, event,
3112 data_offset, &buffer);
3116 /* Dump here now we have copied a piped trace out of the pipe */
3118 if (auxtrace_buffer__get_data(buffer, fd)) {
3119 intel_pt_dump_event(pt, buffer->data,
3121 auxtrace_buffer__put_data(buffer);
3129 static int intel_pt_queue_data(struct perf_session *session,
3130 struct perf_sample *sample,
3131 union perf_event *event, u64 data_offset)
3133 struct intel_pt *pt = container_of(session->auxtrace, struct intel_pt,
3138 return auxtrace_queues__add_event(&pt->queues, session, event,
3142 if (sample->time && sample->time != (u64)-1)
3143 timestamp = perf_time_to_tsc(sample->time, &pt->tc);
3147 return auxtrace_queues__add_sample(&pt->queues, session, sample,
3148 data_offset, timestamp);
3151 struct intel_pt_synth {
3152 struct perf_tool dummy_tool;
3153 struct perf_session *session;
3156 static int intel_pt_event_synth(struct perf_tool *tool,
3157 union perf_event *event,
3158 struct perf_sample *sample __maybe_unused,
3159 struct machine *machine __maybe_unused)
3161 struct intel_pt_synth *intel_pt_synth =
3162 container_of(tool, struct intel_pt_synth, dummy_tool);
3164 return perf_session__deliver_synth_event(intel_pt_synth->session, event,
3168 static int intel_pt_synth_event(struct perf_session *session, const char *name,
3169 struct perf_event_attr *attr, u64 id)
3171 struct intel_pt_synth intel_pt_synth;
3174 pr_debug("Synthesizing '%s' event with id %" PRIu64 " sample type %#" PRIx64 "\n",
3175 name, id, (u64)attr->sample_type);
3177 memset(&intel_pt_synth, 0, sizeof(struct intel_pt_synth));
3178 intel_pt_synth.session = session;
3180 err = perf_event__synthesize_attr(&intel_pt_synth.dummy_tool, attr, 1,
3181 &id, intel_pt_event_synth);
3183 pr_err("%s: failed to synthesize '%s' event type\n",
3189 static void intel_pt_set_event_name(struct evlist *evlist, u64 id,
3192 struct evsel *evsel;
3194 evlist__for_each_entry(evlist, evsel) {
3195 if (evsel->core.id && evsel->core.id[0] == id) {
3197 zfree(&evsel->name);
3198 evsel->name = strdup(name);
3204 static struct evsel *intel_pt_evsel(struct intel_pt *pt,
3205 struct evlist *evlist)
3207 struct evsel *evsel;
3209 evlist__for_each_entry(evlist, evsel) {
3210 if (evsel->core.attr.type == pt->pmu_type && evsel->core.ids)
3217 static int intel_pt_synth_events(struct intel_pt *pt,
3218 struct perf_session *session)
3220 struct evlist *evlist = session->evlist;
3221 struct evsel *evsel = intel_pt_evsel(pt, evlist);
3222 struct perf_event_attr attr;
3227 pr_debug("There are no selected events with Intel Processor Trace data\n");
3231 memset(&attr, 0, sizeof(struct perf_event_attr));
3232 attr.size = sizeof(struct perf_event_attr);
3233 attr.type = PERF_TYPE_HARDWARE;
3234 attr.sample_type = evsel->core.attr.sample_type & PERF_SAMPLE_MASK;
3235 attr.sample_type |= PERF_SAMPLE_IP | PERF_SAMPLE_TID |
3237 if (pt->timeless_decoding)
3238 attr.sample_type &= ~(u64)PERF_SAMPLE_TIME;
3240 attr.sample_type |= PERF_SAMPLE_TIME;
3241 if (!pt->per_cpu_mmaps)
3242 attr.sample_type &= ~(u64)PERF_SAMPLE_CPU;
3243 attr.exclude_user = evsel->core.attr.exclude_user;
3244 attr.exclude_kernel = evsel->core.attr.exclude_kernel;
3245 attr.exclude_hv = evsel->core.attr.exclude_hv;
3246 attr.exclude_host = evsel->core.attr.exclude_host;
3247 attr.exclude_guest = evsel->core.attr.exclude_guest;
3248 attr.sample_id_all = evsel->core.attr.sample_id_all;
3249 attr.read_format = evsel->core.attr.read_format;
3251 id = evsel->core.id[0] + 1000000000;
3255 if (pt->synth_opts.branches) {
3256 attr.config = PERF_COUNT_HW_BRANCH_INSTRUCTIONS;
3257 attr.sample_period = 1;
3258 attr.sample_type |= PERF_SAMPLE_ADDR;
3259 err = intel_pt_synth_event(session, "branches", &attr, id);
3262 pt->sample_branches = true;
3263 pt->branches_sample_type = attr.sample_type;
3264 pt->branches_id = id;
3266 attr.sample_type &= ~(u64)PERF_SAMPLE_ADDR;
3269 if (pt->synth_opts.callchain)
3270 attr.sample_type |= PERF_SAMPLE_CALLCHAIN;
3271 if (pt->synth_opts.last_branch) {
3272 attr.sample_type |= PERF_SAMPLE_BRANCH_STACK;
3274 * We don't use the hardware index, but the sample generation
3275 * code uses the new format branch_stack with this field,
3276 * so the event attributes must indicate that it's present.
3278 attr.branch_sample_type |= PERF_SAMPLE_BRANCH_HW_INDEX;
3281 if (pt->synth_opts.instructions) {
3282 attr.config = PERF_COUNT_HW_INSTRUCTIONS;
3283 if (pt->synth_opts.period_type == PERF_ITRACE_PERIOD_NANOSECS)
3284 attr.sample_period =
3285 intel_pt_ns_to_ticks(pt, pt->synth_opts.period);
3287 attr.sample_period = pt->synth_opts.period;
3288 err = intel_pt_synth_event(session, "instructions", &attr, id);
3291 pt->sample_instructions = true;
3292 pt->instructions_sample_type = attr.sample_type;
3293 pt->instructions_id = id;
3297 attr.sample_type &= ~(u64)PERF_SAMPLE_PERIOD;
3298 attr.sample_period = 1;
3300 if (pt->synth_opts.transactions) {
3301 attr.config = PERF_COUNT_HW_INSTRUCTIONS;
3302 err = intel_pt_synth_event(session, "transactions", &attr, id);
3305 pt->sample_transactions = true;
3306 pt->transactions_sample_type = attr.sample_type;
3307 pt->transactions_id = id;
3308 intel_pt_set_event_name(evlist, id, "transactions");
3312 attr.type = PERF_TYPE_SYNTH;
3313 attr.sample_type |= PERF_SAMPLE_RAW;
3315 if (pt->synth_opts.ptwrites) {
3316 attr.config = PERF_SYNTH_INTEL_PTWRITE;
3317 err = intel_pt_synth_event(session, "ptwrite", &attr, id);
3320 pt->sample_ptwrites = true;
3321 pt->ptwrites_sample_type = attr.sample_type;
3322 pt->ptwrites_id = id;
3323 intel_pt_set_event_name(evlist, id, "ptwrite");
3327 if (pt->synth_opts.pwr_events) {
3328 pt->sample_pwr_events = true;
3329 pt->pwr_events_sample_type = attr.sample_type;
3331 attr.config = PERF_SYNTH_INTEL_CBR;
3332 err = intel_pt_synth_event(session, "cbr", &attr, id);
3336 intel_pt_set_event_name(evlist, id, "cbr");
3339 attr.config = PERF_SYNTH_INTEL_PSB;
3340 err = intel_pt_synth_event(session, "psb", &attr, id);
3344 intel_pt_set_event_name(evlist, id, "psb");
3348 if (pt->synth_opts.pwr_events && (evsel->core.attr.config & 0x10)) {
3349 attr.config = PERF_SYNTH_INTEL_MWAIT;
3350 err = intel_pt_synth_event(session, "mwait", &attr, id);
3354 intel_pt_set_event_name(evlist, id, "mwait");
3357 attr.config = PERF_SYNTH_INTEL_PWRE;
3358 err = intel_pt_synth_event(session, "pwre", &attr, id);
3362 intel_pt_set_event_name(evlist, id, "pwre");
3365 attr.config = PERF_SYNTH_INTEL_EXSTOP;
3366 err = intel_pt_synth_event(session, "exstop", &attr, id);
3370 intel_pt_set_event_name(evlist, id, "exstop");
3373 attr.config = PERF_SYNTH_INTEL_PWRX;
3374 err = intel_pt_synth_event(session, "pwrx", &attr, id);
3378 intel_pt_set_event_name(evlist, id, "pwrx");
3385 static void intel_pt_setup_pebs_events(struct intel_pt *pt)
3387 struct evsel *evsel;
3389 if (!pt->synth_opts.other_events)
3392 evlist__for_each_entry(pt->session->evlist, evsel) {
3393 if (evsel->core.attr.aux_output && evsel->core.id) {
3394 pt->sample_pebs = true;
3395 pt->pebs_evsel = evsel;
3401 static struct evsel *intel_pt_find_sched_switch(struct evlist *evlist)
3403 struct evsel *evsel;
3405 evlist__for_each_entry_reverse(evlist, evsel) {
3406 const char *name = evsel__name(evsel);
3408 if (!strcmp(name, "sched:sched_switch"))
3415 static bool intel_pt_find_switch(struct evlist *evlist)
3417 struct evsel *evsel;
3419 evlist__for_each_entry(evlist, evsel) {
3420 if (evsel->core.attr.context_switch)
3427 static int intel_pt_perf_config(const char *var, const char *value, void *data)
3429 struct intel_pt *pt = data;
3431 if (!strcmp(var, "intel-pt.mispred-all"))
3432 pt->mispred_all = perf_config_bool(var, value);
3437 /* Find least TSC which converts to ns or later */
3438 static u64 intel_pt_tsc_start(u64 ns, struct intel_pt *pt)
3442 tsc = perf_time_to_tsc(ns, &pt->tc);
3445 tm = tsc_to_perf_time(tsc, &pt->tc);
3452 tm = tsc_to_perf_time(++tsc, &pt->tc);
3457 /* Find greatest TSC which converts to ns or earlier */
3458 static u64 intel_pt_tsc_end(u64 ns, struct intel_pt *pt)
3462 tsc = perf_time_to_tsc(ns, &pt->tc);
3465 tm = tsc_to_perf_time(tsc, &pt->tc);
3472 tm = tsc_to_perf_time(--tsc, &pt->tc);
3477 static int intel_pt_setup_time_ranges(struct intel_pt *pt,
3478 struct itrace_synth_opts *opts)
3480 struct perf_time_interval *p = opts->ptime_range;
3481 int n = opts->range_num;
3484 if (!n || !p || pt->timeless_decoding)
3487 pt->time_ranges = calloc(n, sizeof(struct range));
3488 if (!pt->time_ranges)
3493 intel_pt_log("%s: %u range(s)\n", __func__, n);
3495 for (i = 0; i < n; i++) {
3496 struct range *r = &pt->time_ranges[i];
3497 u64 ts = p[i].start;
3501 * Take care to ensure the TSC range matches the perf-time range
3502 * when converted back to perf-time.
3504 r->start = ts ? intel_pt_tsc_start(ts, pt) : 0;
3505 r->end = te ? intel_pt_tsc_end(te, pt) : 0;
3507 intel_pt_log("range %d: perf time interval: %"PRIu64" to %"PRIu64"\n",
3509 intel_pt_log("range %d: TSC time interval: %#"PRIx64" to %#"PRIx64"\n",
3510 i, r->start, r->end);
3516 static int intel_pt_parse_vm_tm_corr_arg(struct intel_pt *pt, char **args)
3518 struct intel_pt_vmcs_info *vmcs_info;
3519 u64 tsc_offset, vmcs;
3528 tsc_offset = strtoull(p, &p, 0);
3533 pt->dflt_tsc_offset = tsc_offset;
3538 vmcs = strtoull(p, &p, 0);
3543 vmcs_info = intel_pt_findnew_vmcs(&pt->vmcs_info, vmcs, tsc_offset);
3555 static int intel_pt_parse_vm_tm_corr_args(struct intel_pt *pt)
3557 char *args = pt->synth_opts.vm_tm_corr_args;
3564 ret = intel_pt_parse_vm_tm_corr_arg(pt, &args);
3568 pr_err("Failed to parse VM Time Correlation options\n");
3575 static const char * const intel_pt_info_fmts[] = {
3576 [INTEL_PT_PMU_TYPE] = " PMU Type %"PRId64"\n",
3577 [INTEL_PT_TIME_SHIFT] = " Time Shift %"PRIu64"\n",
3578 [INTEL_PT_TIME_MULT] = " Time Muliplier %"PRIu64"\n",
3579 [INTEL_PT_TIME_ZERO] = " Time Zero %"PRIu64"\n",
3580 [INTEL_PT_CAP_USER_TIME_ZERO] = " Cap Time Zero %"PRId64"\n",
3581 [INTEL_PT_TSC_BIT] = " TSC bit %#"PRIx64"\n",
3582 [INTEL_PT_NORETCOMP_BIT] = " NoRETComp bit %#"PRIx64"\n",
3583 [INTEL_PT_HAVE_SCHED_SWITCH] = " Have sched_switch %"PRId64"\n",
3584 [INTEL_PT_SNAPSHOT_MODE] = " Snapshot mode %"PRId64"\n",
3585 [INTEL_PT_PER_CPU_MMAPS] = " Per-cpu maps %"PRId64"\n",
3586 [INTEL_PT_MTC_BIT] = " MTC bit %#"PRIx64"\n",
3587 [INTEL_PT_TSC_CTC_N] = " TSC:CTC numerator %"PRIu64"\n",
3588 [INTEL_PT_TSC_CTC_D] = " TSC:CTC denominator %"PRIu64"\n",
3589 [INTEL_PT_CYC_BIT] = " CYC bit %#"PRIx64"\n",
3590 [INTEL_PT_MAX_NONTURBO_RATIO] = " Max non-turbo ratio %"PRIu64"\n",
3591 [INTEL_PT_FILTER_STR_LEN] = " Filter string len. %"PRIu64"\n",
3594 static void intel_pt_print_info(__u64 *arr, int start, int finish)
3601 for (i = start; i <= finish; i++)
3602 fprintf(stdout, intel_pt_info_fmts[i], arr[i]);
3605 static void intel_pt_print_info_str(const char *name, const char *str)
3610 fprintf(stdout, " %-20s%s\n", name, str ? str : "");
3613 static bool intel_pt_has(struct perf_record_auxtrace_info *auxtrace_info, int pos)
3615 return auxtrace_info->header.size >=
3616 sizeof(struct perf_record_auxtrace_info) + (sizeof(u64) * (pos + 1));
3619 int intel_pt_process_auxtrace_info(union perf_event *event,
3620 struct perf_session *session)
3622 struct perf_record_auxtrace_info *auxtrace_info = &event->auxtrace_info;
3623 size_t min_sz = sizeof(u64) * INTEL_PT_PER_CPU_MMAPS;
3624 struct intel_pt *pt;
3629 if (auxtrace_info->header.size < sizeof(struct perf_record_auxtrace_info) +
3633 pt = zalloc(sizeof(struct intel_pt));
3637 pt->vmcs_info = RB_ROOT;
3639 addr_filters__init(&pt->filts);
3641 err = perf_config(intel_pt_perf_config, pt);
3645 err = auxtrace_queues__init(&pt->queues);
3649 intel_pt_log_set_name(INTEL_PT_PMU_NAME);
3651 if (session->itrace_synth_opts->set) {
3652 pt->synth_opts = *session->itrace_synth_opts;
3654 struct itrace_synth_opts *opts = session->itrace_synth_opts;
3656 itrace_synth_opts__set_default(&pt->synth_opts, opts->default_no_sample);
3657 if (!opts->default_no_sample && !opts->inject) {
3658 pt->synth_opts.branches = false;
3659 pt->synth_opts.callchain = true;
3660 pt->synth_opts.add_callchain = true;
3662 pt->synth_opts.thread_stack = opts->thread_stack;
3665 pt->session = session;
3666 pt->machine = &session->machines.host; /* No kvm support */
3667 pt->auxtrace_type = auxtrace_info->type;
3668 pt->pmu_type = auxtrace_info->priv[INTEL_PT_PMU_TYPE];
3669 pt->tc.time_shift = auxtrace_info->priv[INTEL_PT_TIME_SHIFT];
3670 pt->tc.time_mult = auxtrace_info->priv[INTEL_PT_TIME_MULT];
3671 pt->tc.time_zero = auxtrace_info->priv[INTEL_PT_TIME_ZERO];
3672 pt->cap_user_time_zero = auxtrace_info->priv[INTEL_PT_CAP_USER_TIME_ZERO];
3673 pt->tsc_bit = auxtrace_info->priv[INTEL_PT_TSC_BIT];
3674 pt->noretcomp_bit = auxtrace_info->priv[INTEL_PT_NORETCOMP_BIT];
3675 pt->have_sched_switch = auxtrace_info->priv[INTEL_PT_HAVE_SCHED_SWITCH];
3676 pt->snapshot_mode = auxtrace_info->priv[INTEL_PT_SNAPSHOT_MODE];
3677 pt->per_cpu_mmaps = auxtrace_info->priv[INTEL_PT_PER_CPU_MMAPS];
3678 intel_pt_print_info(&auxtrace_info->priv[0], INTEL_PT_PMU_TYPE,
3679 INTEL_PT_PER_CPU_MMAPS);
3681 if (intel_pt_has(auxtrace_info, INTEL_PT_CYC_BIT)) {
3682 pt->mtc_bit = auxtrace_info->priv[INTEL_PT_MTC_BIT];
3683 pt->mtc_freq_bits = auxtrace_info->priv[INTEL_PT_MTC_FREQ_BITS];
3684 pt->tsc_ctc_ratio_n = auxtrace_info->priv[INTEL_PT_TSC_CTC_N];
3685 pt->tsc_ctc_ratio_d = auxtrace_info->priv[INTEL_PT_TSC_CTC_D];
3686 pt->cyc_bit = auxtrace_info->priv[INTEL_PT_CYC_BIT];
3687 intel_pt_print_info(&auxtrace_info->priv[0], INTEL_PT_MTC_BIT,
3691 if (intel_pt_has(auxtrace_info, INTEL_PT_MAX_NONTURBO_RATIO)) {
3692 pt->max_non_turbo_ratio =
3693 auxtrace_info->priv[INTEL_PT_MAX_NONTURBO_RATIO];
3694 intel_pt_print_info(&auxtrace_info->priv[0],
3695 INTEL_PT_MAX_NONTURBO_RATIO,
3696 INTEL_PT_MAX_NONTURBO_RATIO);
3699 info = &auxtrace_info->priv[INTEL_PT_FILTER_STR_LEN] + 1;
3700 info_end = (void *)info + auxtrace_info->header.size;
3702 if (intel_pt_has(auxtrace_info, INTEL_PT_FILTER_STR_LEN)) {
3705 len = auxtrace_info->priv[INTEL_PT_FILTER_STR_LEN];
3706 intel_pt_print_info(&auxtrace_info->priv[0],
3707 INTEL_PT_FILTER_STR_LEN,
3708 INTEL_PT_FILTER_STR_LEN);
3710 const char *filter = (const char *)info;
3712 len = roundup(len + 1, 8);
3714 if ((void *)info > info_end) {
3715 pr_err("%s: bad filter string length\n", __func__);
3717 goto err_free_queues;
3719 pt->filter = memdup(filter, len);
3722 goto err_free_queues;
3724 if (session->header.needs_swap)
3725 mem_bswap_64(pt->filter, len);
3726 if (pt->filter[len - 1]) {
3727 pr_err("%s: filter string not null terminated\n", __func__);
3729 goto err_free_queues;
3731 err = addr_filters__parse_bare_filter(&pt->filts,
3734 goto err_free_queues;
3736 intel_pt_print_info_str("Filter string", pt->filter);
3739 pt->timeless_decoding = intel_pt_timeless_decoding(pt);
3740 if (pt->timeless_decoding && !pt->tc.time_mult)
3741 pt->tc.time_mult = 1;
3742 pt->have_tsc = intel_pt_have_tsc(pt);
3743 pt->sampling_mode = intel_pt_sampling_mode(pt);
3744 pt->est_tsc = !pt->timeless_decoding;
3746 if (pt->synth_opts.vm_time_correlation) {
3747 if (pt->timeless_decoding) {
3748 pr_err("Intel PT has no time information for VM Time Correlation\n");
3750 goto err_free_queues;
3752 if (session->itrace_synth_opts->ptime_range) {
3753 pr_err("Time ranges cannot be specified with VM Time Correlation\n");
3755 goto err_free_queues;
3757 /* Currently TSC Offset is calculated using MTC packets */
3758 if (!intel_pt_have_mtc(pt)) {
3759 pr_err("MTC packets must have been enabled for VM Time Correlation\n");
3761 goto err_free_queues;
3763 err = intel_pt_parse_vm_tm_corr_args(pt);
3765 goto err_free_queues;
3768 pt->unknown_thread = thread__new(999999999, 999999999);
3769 if (!pt->unknown_thread) {
3771 goto err_free_queues;
3775 * Since this thread will not be kept in any rbtree not in a
3776 * list, initialize its list node so that at thread__put() the
3777 * current thread lifetime assumption is kept and we don't segfault
3778 * at list_del_init().
3780 INIT_LIST_HEAD(&pt->unknown_thread->node);
3782 err = thread__set_comm(pt->unknown_thread, "unknown", 0);
3784 goto err_delete_thread;
3785 if (thread__init_maps(pt->unknown_thread, pt->machine)) {
3787 goto err_delete_thread;
3790 pt->auxtrace.process_event = intel_pt_process_event;
3791 pt->auxtrace.process_auxtrace_event = intel_pt_process_auxtrace_event;
3792 pt->auxtrace.queue_data = intel_pt_queue_data;
3793 pt->auxtrace.dump_auxtrace_sample = intel_pt_dump_sample;
3794 pt->auxtrace.flush_events = intel_pt_flush;
3795 pt->auxtrace.free_events = intel_pt_free_events;
3796 pt->auxtrace.free = intel_pt_free;
3797 pt->auxtrace.evsel_is_auxtrace = intel_pt_evsel_is_auxtrace;
3798 session->auxtrace = &pt->auxtrace;
3803 if (pt->have_sched_switch == 1) {
3804 pt->switch_evsel = intel_pt_find_sched_switch(session->evlist);
3805 if (!pt->switch_evsel) {
3806 pr_err("%s: missing sched_switch event\n", __func__);
3808 goto err_delete_thread;
3810 } else if (pt->have_sched_switch == 2 &&
3811 !intel_pt_find_switch(session->evlist)) {
3812 pr_err("%s: missing context_switch attribute flag\n", __func__);
3814 goto err_delete_thread;
3817 if (pt->synth_opts.log)
3818 intel_pt_log_enable();
3820 /* Maximum non-turbo ratio is TSC freq / 100 MHz */
3821 if (pt->tc.time_mult) {
3822 u64 tsc_freq = intel_pt_ns_to_ticks(pt, 1000000000);
3824 if (!pt->max_non_turbo_ratio)
3825 pt->max_non_turbo_ratio =
3826 (tsc_freq + 50000000) / 100000000;
3827 intel_pt_log("TSC frequency %"PRIu64"\n", tsc_freq);
3828 intel_pt_log("Maximum non-turbo ratio %u\n",
3829 pt->max_non_turbo_ratio);
3830 pt->cbr2khz = tsc_freq / pt->max_non_turbo_ratio / 1000;
3833 err = intel_pt_setup_time_ranges(pt, session->itrace_synth_opts);
3835 goto err_delete_thread;
3837 if (pt->synth_opts.calls)
3838 pt->branches_filter |= PERF_IP_FLAG_CALL | PERF_IP_FLAG_ASYNC |
3839 PERF_IP_FLAG_TRACE_END;
3840 if (pt->synth_opts.returns)
3841 pt->branches_filter |= PERF_IP_FLAG_RETURN |
3842 PERF_IP_FLAG_TRACE_BEGIN;
3844 if ((pt->synth_opts.callchain || pt->synth_opts.add_callchain) &&
3845 !symbol_conf.use_callchain) {
3846 symbol_conf.use_callchain = true;
3847 if (callchain_register_param(&callchain_param) < 0) {
3848 symbol_conf.use_callchain = false;
3849 pt->synth_opts.callchain = false;
3850 pt->synth_opts.add_callchain = false;
3854 if (pt->synth_opts.add_callchain) {
3855 err = intel_pt_callchain_init(pt);
3857 goto err_delete_thread;
3860 if (pt->synth_opts.last_branch || pt->synth_opts.add_last_branch) {
3861 pt->br_stack_sz = pt->synth_opts.last_branch_sz;
3862 pt->br_stack_sz_plus = pt->br_stack_sz;
3865 if (pt->synth_opts.add_last_branch) {
3866 err = intel_pt_br_stack_init(pt);
3868 goto err_delete_thread;
3870 * Additional branch stack size to cater for tracing from the
3871 * actual sample ip to where the sample time is recorded.
3872 * Measured at about 200 branches, but generously set to 1024.
3873 * If kernel space is not being traced, then add just 1 for the
3874 * branch to kernel space.
3876 if (intel_pt_tracing_kernel(pt))
3877 pt->br_stack_sz_plus += 1024;
3879 pt->br_stack_sz_plus += 1;
3882 pt->use_thread_stack = pt->synth_opts.callchain ||
3883 pt->synth_opts.add_callchain ||
3884 pt->synth_opts.thread_stack ||
3885 pt->synth_opts.last_branch ||
3886 pt->synth_opts.add_last_branch;
3888 pt->callstack = pt->synth_opts.callchain ||
3889 pt->synth_opts.add_callchain ||
3890 pt->synth_opts.thread_stack;
3892 err = intel_pt_synth_events(pt, session);
3894 goto err_delete_thread;
3896 intel_pt_setup_pebs_events(pt);
3898 if (pt->sampling_mode || list_empty(&session->auxtrace_index))
3899 err = auxtrace_queue_data(session, true, true);
3901 err = auxtrace_queues__process_index(&pt->queues, session);
3903 goto err_delete_thread;
3905 if (pt->queues.populated)
3906 pt->data_queued = true;
3908 if (pt->timeless_decoding)
3909 pr_debug2("Intel PT decoding without timestamps\n");
3915 thread__zput(pt->unknown_thread);
3917 intel_pt_log_disable();
3918 auxtrace_queues__free(&pt->queues);
3919 session->auxtrace = NULL;
3921 addr_filters__exit(&pt->filts);
3923 zfree(&pt->time_ranges);