1 // SPDX-License-Identifier: GPL-2.0-only
3 * intel_pt.c: Intel Processor Trace support
4 * Copyright (c) 2013-2015, Intel Corporation.
11 #include <linux/kernel.h>
12 #include <linux/string.h>
13 #include <linux/types.h>
14 #include <linux/zalloc.h>
27 #include "thread-stack.h"
29 #include "callchain.h"
36 #include "util/perf_api_probe.h"
37 #include "util/synthetic-events.h"
38 #include "time-utils.h"
40 #include "../arch/x86/include/uapi/asm/perf_regs.h"
42 #include "intel-pt-decoder/intel-pt-log.h"
43 #include "intel-pt-decoder/intel-pt-decoder.h"
44 #include "intel-pt-decoder/intel-pt-insn-decoder.h"
45 #include "intel-pt-decoder/intel-pt-pkt-decoder.h"
47 #define MAX_TIMESTAMP (~0ULL)
55 struct auxtrace auxtrace;
56 struct auxtrace_queues queues;
57 struct auxtrace_heap heap;
59 struct perf_session *session;
60 struct machine *machine;
61 struct evsel *switch_evsel;
62 struct thread *unknown_thread;
63 bool timeless_decoding;
72 bool use_thread_stack;
74 unsigned int br_stack_sz;
75 unsigned int br_stack_sz_plus;
76 int have_sched_switch;
82 struct perf_tsc_conversion tc;
83 bool cap_user_time_zero;
85 struct itrace_synth_opts synth_opts;
87 bool sample_instructions;
88 u64 instructions_sample_type;
93 u64 branches_sample_type;
96 bool sample_transactions;
97 u64 transactions_sample_type;
100 bool sample_ptwrites;
101 u64 ptwrites_sample_type;
104 bool sample_pwr_events;
105 u64 pwr_events_sample_type;
114 struct evsel *pebs_evsel;
123 unsigned max_non_turbo_ratio;
126 unsigned long num_events;
129 struct addr_filters filts;
131 struct range *time_ranges;
132 unsigned int range_cnt;
134 struct ip_callchain *chain;
135 struct branch_stack *br_stack;
139 INTEL_PT_SS_NOT_TRACING,
142 INTEL_PT_SS_EXPECTING_SWITCH_EVENT,
143 INTEL_PT_SS_EXPECTING_SWITCH_IP,
146 struct intel_pt_queue {
148 unsigned int queue_nr;
149 struct auxtrace_buffer *buffer;
150 struct auxtrace_buffer *old_buffer;
152 const struct intel_pt_state *state;
153 struct ip_callchain *chain;
154 struct branch_stack *last_branch;
155 union perf_event *event_buf;
158 bool step_through_buffers;
159 bool use_buffer_pid_tid;
165 struct thread *thread;
166 struct machine *guest_machine;
167 struct thread *unknown_guest_thread;
168 pid_t guest_machine_pid;
175 unsigned int sel_idx;
181 u64 last_in_insn_cnt;
183 u64 last_br_insn_cnt;
185 unsigned int cbr_seen;
186 char insn[INTEL_PT_INSN_BUF_SZ];
189 static void intel_pt_dump(struct intel_pt *pt __maybe_unused,
190 unsigned char *buf, size_t len)
192 struct intel_pt_pkt packet;
195 char desc[INTEL_PT_PKT_DESC_MAX];
196 const char *color = PERF_COLOR_BLUE;
197 enum intel_pt_pkt_ctx ctx = INTEL_PT_NO_CTX;
199 color_fprintf(stdout, color,
200 ". ... Intel Processor Trace data: size %zu bytes\n",
204 ret = intel_pt_get_packet(buf, len, &packet, &ctx);
210 color_fprintf(stdout, color, " %08x: ", pos);
211 for (i = 0; i < pkt_len; i++)
212 color_fprintf(stdout, color, " %02x", buf[i]);
214 color_fprintf(stdout, color, " ");
216 ret = intel_pt_pkt_desc(&packet, desc,
217 INTEL_PT_PKT_DESC_MAX);
219 color_fprintf(stdout, color, " %s\n", desc);
221 color_fprintf(stdout, color, " Bad packet!\n");
229 static void intel_pt_dump_event(struct intel_pt *pt, unsigned char *buf,
233 intel_pt_dump(pt, buf, len);
236 static void intel_pt_log_event(union perf_event *event)
238 FILE *f = intel_pt_log_fp();
240 if (!intel_pt_enable_logging || !f)
243 perf_event__fprintf(event, NULL, f);
246 static void intel_pt_dump_sample(struct perf_session *session,
247 struct perf_sample *sample)
249 struct intel_pt *pt = container_of(session->auxtrace, struct intel_pt,
253 intel_pt_dump(pt, sample->aux_sample.data, sample->aux_sample.size);
256 static bool intel_pt_log_events(struct intel_pt *pt, u64 tm)
258 struct perf_time_interval *range = pt->synth_opts.ptime_range;
259 int n = pt->synth_opts.range_num;
261 if (pt->synth_opts.log_plus_flags & AUXTRACE_LOG_FLG_ALL_PERF_EVTS)
264 if (pt->synth_opts.log_minus_flags & AUXTRACE_LOG_FLG_ALL_PERF_EVTS)
267 /* perf_time__ranges_skip_sample does not work if time is zero */
271 return !n || !perf_time__ranges_skip_sample(range, n, tm);
274 static int intel_pt_do_fix_overlap(struct intel_pt *pt, struct auxtrace_buffer *a,
275 struct auxtrace_buffer *b)
277 bool consecutive = false;
280 start = intel_pt_find_overlap(a->data, a->size, b->data, b->size,
281 pt->have_tsc, &consecutive);
284 b->use_size = b->data + b->size - start;
286 if (b->use_size && consecutive)
287 b->consecutive = true;
291 static int intel_pt_get_buffer(struct intel_pt_queue *ptq,
292 struct auxtrace_buffer *buffer,
293 struct auxtrace_buffer *old_buffer,
294 struct intel_pt_buffer *b)
299 int fd = perf_data__fd(ptq->pt->session->data);
301 buffer->data = auxtrace_buffer__get_data(buffer, fd);
306 might_overlap = ptq->pt->snapshot_mode || ptq->pt->sampling_mode;
307 if (might_overlap && !buffer->consecutive && old_buffer &&
308 intel_pt_do_fix_overlap(ptq->pt, old_buffer, buffer))
311 if (buffer->use_data) {
312 b->len = buffer->use_size;
313 b->buf = buffer->use_data;
315 b->len = buffer->size;
316 b->buf = buffer->data;
318 b->ref_timestamp = buffer->reference;
320 if (!old_buffer || (might_overlap && !buffer->consecutive)) {
321 b->consecutive = false;
322 b->trace_nr = buffer->buffer_nr + 1;
324 b->consecutive = true;
330 /* Do not drop buffers with references - refer intel_pt_get_trace() */
331 static void intel_pt_lookahead_drop_buffer(struct intel_pt_queue *ptq,
332 struct auxtrace_buffer *buffer)
334 if (!buffer || buffer == ptq->buffer || buffer == ptq->old_buffer)
337 auxtrace_buffer__drop_data(buffer);
340 /* Must be serialized with respect to intel_pt_get_trace() */
341 static int intel_pt_lookahead(void *data, intel_pt_lookahead_cb_t cb,
344 struct intel_pt_queue *ptq = data;
345 struct auxtrace_buffer *buffer = ptq->buffer;
346 struct auxtrace_buffer *old_buffer = ptq->old_buffer;
347 struct auxtrace_queue *queue;
350 queue = &ptq->pt->queues.queue_array[ptq->queue_nr];
353 struct intel_pt_buffer b = { .len = 0 };
355 buffer = auxtrace_buffer__next(queue, buffer);
359 err = intel_pt_get_buffer(ptq, buffer, old_buffer, &b);
364 intel_pt_lookahead_drop_buffer(ptq, old_buffer);
367 intel_pt_lookahead_drop_buffer(ptq, buffer);
371 err = cb(&b, cb_data);
376 if (buffer != old_buffer)
377 intel_pt_lookahead_drop_buffer(ptq, buffer);
378 intel_pt_lookahead_drop_buffer(ptq, old_buffer);
384 * This function assumes data is processed sequentially only.
385 * Must be serialized with respect to intel_pt_lookahead()
387 static int intel_pt_get_trace(struct intel_pt_buffer *b, void *data)
389 struct intel_pt_queue *ptq = data;
390 struct auxtrace_buffer *buffer = ptq->buffer;
391 struct auxtrace_buffer *old_buffer = ptq->old_buffer;
392 struct auxtrace_queue *queue;
400 queue = &ptq->pt->queues.queue_array[ptq->queue_nr];
402 buffer = auxtrace_buffer__next(queue, buffer);
405 auxtrace_buffer__drop_data(old_buffer);
410 ptq->buffer = buffer;
412 err = intel_pt_get_buffer(ptq, buffer, old_buffer, b);
416 if (ptq->step_through_buffers)
421 auxtrace_buffer__drop_data(old_buffer);
422 ptq->old_buffer = buffer;
424 auxtrace_buffer__drop_data(buffer);
425 return intel_pt_get_trace(b, data);
431 struct intel_pt_cache_entry {
432 struct auxtrace_cache_entry entry;
435 enum intel_pt_insn_op op;
436 enum intel_pt_insn_branch branch;
439 char insn[INTEL_PT_INSN_BUF_SZ];
442 static int intel_pt_config_div(const char *var, const char *value, void *data)
447 if (!strcmp(var, "intel-pt.cache-divisor")) {
448 val = strtol(value, NULL, 0);
449 if (val > 0 && val <= INT_MAX)
456 static int intel_pt_cache_divisor(void)
463 perf_config(intel_pt_config_div, &d);
471 static unsigned int intel_pt_cache_size(struct dso *dso,
472 struct machine *machine)
476 size = dso__data_size(dso, machine);
477 size /= intel_pt_cache_divisor();
480 if (size > (1 << 21))
482 return 32 - __builtin_clz(size);
485 static struct auxtrace_cache *intel_pt_cache(struct dso *dso,
486 struct machine *machine)
488 struct auxtrace_cache *c;
491 if (dso->auxtrace_cache)
492 return dso->auxtrace_cache;
494 bits = intel_pt_cache_size(dso, machine);
496 /* Ignoring cache creation failure */
497 c = auxtrace_cache__new(bits, sizeof(struct intel_pt_cache_entry), 200);
499 dso->auxtrace_cache = c;
504 static int intel_pt_cache_add(struct dso *dso, struct machine *machine,
505 u64 offset, u64 insn_cnt, u64 byte_cnt,
506 struct intel_pt_insn *intel_pt_insn)
508 struct auxtrace_cache *c = intel_pt_cache(dso, machine);
509 struct intel_pt_cache_entry *e;
515 e = auxtrace_cache__alloc_entry(c);
519 e->insn_cnt = insn_cnt;
520 e->byte_cnt = byte_cnt;
521 e->op = intel_pt_insn->op;
522 e->branch = intel_pt_insn->branch;
523 e->length = intel_pt_insn->length;
524 e->rel = intel_pt_insn->rel;
525 memcpy(e->insn, intel_pt_insn->buf, INTEL_PT_INSN_BUF_SZ);
527 err = auxtrace_cache__add(c, offset, &e->entry);
529 auxtrace_cache__free_entry(c, e);
534 static struct intel_pt_cache_entry *
535 intel_pt_cache_lookup(struct dso *dso, struct machine *machine, u64 offset)
537 struct auxtrace_cache *c = intel_pt_cache(dso, machine);
542 return auxtrace_cache__lookup(dso->auxtrace_cache, offset);
545 static void intel_pt_cache_invalidate(struct dso *dso, struct machine *machine,
548 struct auxtrace_cache *c = intel_pt_cache(dso, machine);
553 auxtrace_cache__remove(dso->auxtrace_cache, offset);
556 static inline bool intel_pt_guest_kernel_ip(uint64_t ip)
558 /* Assumes 64-bit kernel */
559 return ip & (1ULL << 63);
562 static inline u8 intel_pt_nr_cpumode(struct intel_pt_queue *ptq, uint64_t ip, bool nr)
565 return intel_pt_guest_kernel_ip(ip) ?
566 PERF_RECORD_MISC_GUEST_KERNEL :
567 PERF_RECORD_MISC_GUEST_USER;
570 return ip >= ptq->pt->kernel_start ?
571 PERF_RECORD_MISC_KERNEL :
572 PERF_RECORD_MISC_USER;
575 static inline u8 intel_pt_cpumode(struct intel_pt_queue *ptq, uint64_t from_ip, uint64_t to_ip)
577 /* No support for non-zero CS base */
579 return intel_pt_nr_cpumode(ptq, from_ip, ptq->state->from_nr);
580 return intel_pt_nr_cpumode(ptq, to_ip, ptq->state->to_nr);
583 static int intel_pt_get_guest(struct intel_pt_queue *ptq)
585 struct machines *machines = &ptq->pt->session->machines;
586 struct machine *machine;
587 pid_t pid = ptq->pid <= 0 ? DEFAULT_GUEST_KERNEL_ID : ptq->pid;
589 if (ptq->guest_machine && pid == ptq->guest_machine_pid)
592 ptq->guest_machine = NULL;
593 thread__zput(ptq->unknown_guest_thread);
595 machine = machines__find_guest(machines, pid);
599 ptq->unknown_guest_thread = machine__idle_thread(machine);
600 if (!ptq->unknown_guest_thread)
603 ptq->guest_machine = machine;
604 ptq->guest_machine_pid = pid;
609 static int intel_pt_walk_next_insn(struct intel_pt_insn *intel_pt_insn,
610 uint64_t *insn_cnt_ptr, uint64_t *ip,
611 uint64_t to_ip, uint64_t max_insn_cnt,
614 struct intel_pt_queue *ptq = data;
615 struct machine *machine = ptq->pt->machine;
616 struct thread *thread;
617 struct addr_location al;
618 unsigned char buf[INTEL_PT_INSN_BUF_SZ];
622 u64 offset, start_offset, start_ip;
627 intel_pt_insn->length = 0;
629 if (to_ip && *ip == to_ip)
632 nr = ptq->state->to_nr;
633 cpumode = intel_pt_nr_cpumode(ptq, *ip, nr);
636 if (cpumode != PERF_RECORD_MISC_GUEST_KERNEL ||
637 intel_pt_get_guest(ptq))
639 machine = ptq->guest_machine;
640 thread = ptq->unknown_guest_thread;
642 thread = ptq->thread;
644 if (cpumode != PERF_RECORD_MISC_KERNEL)
646 thread = ptq->pt->unknown_thread;
651 if (!thread__find_map(thread, cpumode, *ip, &al) || !al.map->dso)
654 if (al.map->dso->data.status == DSO_DATA_STATUS_ERROR &&
655 dso__data_status_seen(al.map->dso,
656 DSO_DATA_STATUS_SEEN_ITRACE))
659 offset = al.map->map_ip(al.map, *ip);
661 if (!to_ip && one_map) {
662 struct intel_pt_cache_entry *e;
664 e = intel_pt_cache_lookup(al.map->dso, machine, offset);
666 (!max_insn_cnt || e->insn_cnt <= max_insn_cnt)) {
667 *insn_cnt_ptr = e->insn_cnt;
669 intel_pt_insn->op = e->op;
670 intel_pt_insn->branch = e->branch;
671 intel_pt_insn->length = e->length;
672 intel_pt_insn->rel = e->rel;
673 memcpy(intel_pt_insn->buf, e->insn,
674 INTEL_PT_INSN_BUF_SZ);
675 intel_pt_log_insn_no_data(intel_pt_insn, *ip);
680 start_offset = offset;
683 /* Load maps to ensure dso->is_64_bit has been updated */
686 x86_64 = al.map->dso->is_64_bit;
689 len = dso__data_read_offset(al.map->dso, machine,
691 INTEL_PT_INSN_BUF_SZ);
695 if (intel_pt_get_insn(buf, len, x86_64, intel_pt_insn))
698 intel_pt_log_insn(intel_pt_insn, *ip);
702 if (intel_pt_insn->branch != INTEL_PT_BR_NO_BRANCH)
705 if (max_insn_cnt && insn_cnt >= max_insn_cnt)
708 *ip += intel_pt_insn->length;
710 if (to_ip && *ip == to_ip) {
711 intel_pt_insn->length = 0;
715 if (*ip >= al.map->end)
718 offset += intel_pt_insn->length;
723 *insn_cnt_ptr = insn_cnt;
729 * Didn't lookup in the 'to_ip' case, so do it now to prevent duplicate
733 struct intel_pt_cache_entry *e;
735 e = intel_pt_cache_lookup(al.map->dso, machine, start_offset);
740 /* Ignore cache errors */
741 intel_pt_cache_add(al.map->dso, machine, start_offset, insn_cnt,
742 *ip - start_ip, intel_pt_insn);
747 *insn_cnt_ptr = insn_cnt;
751 static bool intel_pt_match_pgd_ip(struct intel_pt *pt, uint64_t ip,
752 uint64_t offset, const char *filename)
754 struct addr_filter *filt;
755 bool have_filter = false;
756 bool hit_tracestop = false;
757 bool hit_filter = false;
759 list_for_each_entry(filt, &pt->filts.head, list) {
763 if ((filename && !filt->filename) ||
764 (!filename && filt->filename) ||
765 (filename && strcmp(filename, filt->filename)))
768 if (!(offset >= filt->addr && offset < filt->addr + filt->size))
771 intel_pt_log("TIP.PGD ip %#"PRIx64" offset %#"PRIx64" in %s hit filter: %s offset %#"PRIx64" size %#"PRIx64"\n",
772 ip, offset, filename ? filename : "[kernel]",
773 filt->start ? "filter" : "stop",
774 filt->addr, filt->size);
779 hit_tracestop = true;
782 if (!hit_tracestop && !hit_filter)
783 intel_pt_log("TIP.PGD ip %#"PRIx64" offset %#"PRIx64" in %s is not in a filter region\n",
784 ip, offset, filename ? filename : "[kernel]");
786 return hit_tracestop || (have_filter && !hit_filter);
789 static int __intel_pt_pgd_ip(uint64_t ip, void *data)
791 struct intel_pt_queue *ptq = data;
792 struct thread *thread;
793 struct addr_location al;
797 if (ptq->state->to_nr) {
798 if (intel_pt_guest_kernel_ip(ip))
799 return intel_pt_match_pgd_ip(ptq->pt, ip, ip, NULL);
800 /* No support for decoding guest user space */
802 } else if (ip >= ptq->pt->kernel_start) {
803 return intel_pt_match_pgd_ip(ptq->pt, ip, ip, NULL);
806 cpumode = PERF_RECORD_MISC_USER;
808 thread = ptq->thread;
812 if (!thread__find_map(thread, cpumode, ip, &al) || !al.map->dso)
815 offset = al.map->map_ip(al.map, ip);
817 return intel_pt_match_pgd_ip(ptq->pt, ip, offset,
818 al.map->dso->long_name);
821 static bool intel_pt_pgd_ip(uint64_t ip, void *data)
823 return __intel_pt_pgd_ip(ip, data) > 0;
826 static bool intel_pt_get_config(struct intel_pt *pt,
827 struct perf_event_attr *attr, u64 *config)
829 if (attr->type == pt->pmu_type) {
831 *config = attr->config;
838 static bool intel_pt_exclude_kernel(struct intel_pt *pt)
842 evlist__for_each_entry(pt->session->evlist, evsel) {
843 if (intel_pt_get_config(pt, &evsel->core.attr, NULL) &&
844 !evsel->core.attr.exclude_kernel)
850 static bool intel_pt_return_compression(struct intel_pt *pt)
855 if (!pt->noretcomp_bit)
858 evlist__for_each_entry(pt->session->evlist, evsel) {
859 if (intel_pt_get_config(pt, &evsel->core.attr, &config) &&
860 (config & pt->noretcomp_bit))
866 static bool intel_pt_branch_enable(struct intel_pt *pt)
871 evlist__for_each_entry(pt->session->evlist, evsel) {
872 if (intel_pt_get_config(pt, &evsel->core.attr, &config) &&
873 (config & 1) && !(config & 0x2000))
879 static unsigned int intel_pt_mtc_period(struct intel_pt *pt)
885 if (!pt->mtc_freq_bits)
888 for (shift = 0, config = pt->mtc_freq_bits; !(config & 1); shift++)
891 evlist__for_each_entry(pt->session->evlist, evsel) {
892 if (intel_pt_get_config(pt, &evsel->core.attr, &config))
893 return (config & pt->mtc_freq_bits) >> shift;
898 static bool intel_pt_timeless_decoding(struct intel_pt *pt)
901 bool timeless_decoding = true;
904 if (!pt->tsc_bit || !pt->cap_user_time_zero)
907 evlist__for_each_entry(pt->session->evlist, evsel) {
908 if (!(evsel->core.attr.sample_type & PERF_SAMPLE_TIME))
910 if (intel_pt_get_config(pt, &evsel->core.attr, &config)) {
911 if (config & pt->tsc_bit)
912 timeless_decoding = false;
917 return timeless_decoding;
920 static bool intel_pt_tracing_kernel(struct intel_pt *pt)
924 evlist__for_each_entry(pt->session->evlist, evsel) {
925 if (intel_pt_get_config(pt, &evsel->core.attr, NULL) &&
926 !evsel->core.attr.exclude_kernel)
932 static bool intel_pt_have_tsc(struct intel_pt *pt)
935 bool have_tsc = false;
941 evlist__for_each_entry(pt->session->evlist, evsel) {
942 if (intel_pt_get_config(pt, &evsel->core.attr, &config)) {
943 if (config & pt->tsc_bit)
952 static bool intel_pt_sampling_mode(struct intel_pt *pt)
956 evlist__for_each_entry(pt->session->evlist, evsel) {
957 if ((evsel->core.attr.sample_type & PERF_SAMPLE_AUX) &&
958 evsel->core.attr.aux_sample_size)
964 static u64 intel_pt_ctl(struct intel_pt *pt)
969 evlist__for_each_entry(pt->session->evlist, evsel) {
970 if (intel_pt_get_config(pt, &evsel->core.attr, &config))
976 static u64 intel_pt_ns_to_ticks(const struct intel_pt *pt, u64 ns)
980 quot = ns / pt->tc.time_mult;
981 rem = ns % pt->tc.time_mult;
982 return (quot << pt->tc.time_shift) + (rem << pt->tc.time_shift) /
986 static struct ip_callchain *intel_pt_alloc_chain(struct intel_pt *pt)
988 size_t sz = sizeof(struct ip_callchain);
990 /* Add 1 to callchain_sz for callchain context */
991 sz += (pt->synth_opts.callchain_sz + 1) * sizeof(u64);
995 static int intel_pt_callchain_init(struct intel_pt *pt)
999 evlist__for_each_entry(pt->session->evlist, evsel) {
1000 if (!(evsel->core.attr.sample_type & PERF_SAMPLE_CALLCHAIN))
1001 evsel->synth_sample_type |= PERF_SAMPLE_CALLCHAIN;
1004 pt->chain = intel_pt_alloc_chain(pt);
1011 static void intel_pt_add_callchain(struct intel_pt *pt,
1012 struct perf_sample *sample)
1014 struct thread *thread = machine__findnew_thread(pt->machine,
1018 thread_stack__sample_late(thread, sample->cpu, pt->chain,
1019 pt->synth_opts.callchain_sz + 1, sample->ip,
1022 sample->callchain = pt->chain;
1025 static struct branch_stack *intel_pt_alloc_br_stack(unsigned int entry_cnt)
1027 size_t sz = sizeof(struct branch_stack);
1029 sz += entry_cnt * sizeof(struct branch_entry);
1033 static int intel_pt_br_stack_init(struct intel_pt *pt)
1035 struct evsel *evsel;
1037 evlist__for_each_entry(pt->session->evlist, evsel) {
1038 if (!(evsel->core.attr.sample_type & PERF_SAMPLE_BRANCH_STACK))
1039 evsel->synth_sample_type |= PERF_SAMPLE_BRANCH_STACK;
1042 pt->br_stack = intel_pt_alloc_br_stack(pt->br_stack_sz);
1049 static void intel_pt_add_br_stack(struct intel_pt *pt,
1050 struct perf_sample *sample)
1052 struct thread *thread = machine__findnew_thread(pt->machine,
1056 thread_stack__br_sample_late(thread, sample->cpu, pt->br_stack,
1057 pt->br_stack_sz, sample->ip,
1060 sample->branch_stack = pt->br_stack;
1063 /* INTEL_PT_LBR_0, INTEL_PT_LBR_1 and INTEL_PT_LBR_2 */
1064 #define LBRS_MAX (INTEL_PT_BLK_ITEM_ID_CNT * 3U)
1066 static struct intel_pt_queue *intel_pt_alloc_queue(struct intel_pt *pt,
1067 unsigned int queue_nr)
1069 struct intel_pt_params params = { .get_trace = 0, };
1070 struct perf_env *env = pt->machine->env;
1071 struct intel_pt_queue *ptq;
1073 ptq = zalloc(sizeof(struct intel_pt_queue));
1077 if (pt->synth_opts.callchain) {
1078 ptq->chain = intel_pt_alloc_chain(pt);
1083 if (pt->synth_opts.last_branch || pt->synth_opts.other_events) {
1084 unsigned int entry_cnt = max(LBRS_MAX, pt->br_stack_sz);
1086 ptq->last_branch = intel_pt_alloc_br_stack(entry_cnt);
1087 if (!ptq->last_branch)
1091 ptq->event_buf = malloc(PERF_SAMPLE_MAX_SIZE);
1092 if (!ptq->event_buf)
1096 ptq->queue_nr = queue_nr;
1097 ptq->exclude_kernel = intel_pt_exclude_kernel(pt);
1103 params.get_trace = intel_pt_get_trace;
1104 params.walk_insn = intel_pt_walk_next_insn;
1105 params.lookahead = intel_pt_lookahead;
1107 params.return_compression = intel_pt_return_compression(pt);
1108 params.branch_enable = intel_pt_branch_enable(pt);
1109 params.ctl = intel_pt_ctl(pt);
1110 params.max_non_turbo_ratio = pt->max_non_turbo_ratio;
1111 params.mtc_period = intel_pt_mtc_period(pt);
1112 params.tsc_ctc_ratio_n = pt->tsc_ctc_ratio_n;
1113 params.tsc_ctc_ratio_d = pt->tsc_ctc_ratio_d;
1114 params.quick = pt->synth_opts.quick;
1116 if (pt->filts.cnt > 0)
1117 params.pgd_ip = intel_pt_pgd_ip;
1119 if (pt->synth_opts.instructions) {
1120 if (pt->synth_opts.period) {
1121 switch (pt->synth_opts.period_type) {
1122 case PERF_ITRACE_PERIOD_INSTRUCTIONS:
1123 params.period_type =
1124 INTEL_PT_PERIOD_INSTRUCTIONS;
1125 params.period = pt->synth_opts.period;
1127 case PERF_ITRACE_PERIOD_TICKS:
1128 params.period_type = INTEL_PT_PERIOD_TICKS;
1129 params.period = pt->synth_opts.period;
1131 case PERF_ITRACE_PERIOD_NANOSECS:
1132 params.period_type = INTEL_PT_PERIOD_TICKS;
1133 params.period = intel_pt_ns_to_ticks(pt,
1134 pt->synth_opts.period);
1141 if (!params.period) {
1142 params.period_type = INTEL_PT_PERIOD_INSTRUCTIONS;
1147 if (env->cpuid && !strncmp(env->cpuid, "GenuineIntel,6,92,", 18))
1148 params.flags |= INTEL_PT_FUP_WITH_NLIP;
1150 ptq->decoder = intel_pt_decoder_new(¶ms);
1157 zfree(&ptq->event_buf);
1158 zfree(&ptq->last_branch);
1164 static void intel_pt_free_queue(void *priv)
1166 struct intel_pt_queue *ptq = priv;
1170 thread__zput(ptq->thread);
1171 thread__zput(ptq->unknown_guest_thread);
1172 intel_pt_decoder_free(ptq->decoder);
1173 zfree(&ptq->event_buf);
1174 zfree(&ptq->last_branch);
1179 static void intel_pt_set_pid_tid_cpu(struct intel_pt *pt,
1180 struct auxtrace_queue *queue)
1182 struct intel_pt_queue *ptq = queue->priv;
1184 if (queue->tid == -1 || pt->have_sched_switch) {
1185 ptq->tid = machine__get_current_tid(pt->machine, ptq->cpu);
1188 thread__zput(ptq->thread);
1191 if (!ptq->thread && ptq->tid != -1)
1192 ptq->thread = machine__find_thread(pt->machine, -1, ptq->tid);
1195 ptq->pid = ptq->thread->pid_;
1196 if (queue->cpu == -1)
1197 ptq->cpu = ptq->thread->cpu;
1201 static void intel_pt_sample_flags(struct intel_pt_queue *ptq)
1204 if (ptq->state->flags & INTEL_PT_ABORT_TX) {
1205 ptq->flags = PERF_IP_FLAG_BRANCH | PERF_IP_FLAG_TX_ABORT;
1206 } else if (ptq->state->flags & INTEL_PT_ASYNC) {
1207 if (!ptq->state->to_ip)
1208 ptq->flags = PERF_IP_FLAG_BRANCH |
1209 PERF_IP_FLAG_TRACE_END;
1210 else if (ptq->state->from_nr && !ptq->state->to_nr)
1211 ptq->flags = PERF_IP_FLAG_BRANCH | PERF_IP_FLAG_CALL |
1212 PERF_IP_FLAG_VMEXIT;
1214 ptq->flags = PERF_IP_FLAG_BRANCH | PERF_IP_FLAG_CALL |
1215 PERF_IP_FLAG_ASYNC |
1216 PERF_IP_FLAG_INTERRUPT;
1218 if (ptq->state->from_ip)
1219 ptq->flags = intel_pt_insn_type(ptq->state->insn_op);
1221 ptq->flags = PERF_IP_FLAG_BRANCH |
1222 PERF_IP_FLAG_TRACE_BEGIN;
1223 if (ptq->state->flags & INTEL_PT_IN_TX)
1224 ptq->flags |= PERF_IP_FLAG_IN_TX;
1225 ptq->insn_len = ptq->state->insn_len;
1226 memcpy(ptq->insn, ptq->state->insn, INTEL_PT_INSN_BUF_SZ);
1229 if (ptq->state->type & INTEL_PT_TRACE_BEGIN)
1230 ptq->flags |= PERF_IP_FLAG_TRACE_BEGIN;
1231 if (ptq->state->type & INTEL_PT_TRACE_END)
1232 ptq->flags |= PERF_IP_FLAG_TRACE_END;
1235 static void intel_pt_setup_time_range(struct intel_pt *pt,
1236 struct intel_pt_queue *ptq)
1241 ptq->sel_timestamp = pt->time_ranges[0].start;
1244 if (ptq->sel_timestamp) {
1245 ptq->sel_start = true;
1247 ptq->sel_timestamp = pt->time_ranges[0].end;
1248 ptq->sel_start = false;
1252 static int intel_pt_setup_queue(struct intel_pt *pt,
1253 struct auxtrace_queue *queue,
1254 unsigned int queue_nr)
1256 struct intel_pt_queue *ptq = queue->priv;
1258 if (list_empty(&queue->head))
1262 ptq = intel_pt_alloc_queue(pt, queue_nr);
1267 if (queue->cpu != -1)
1268 ptq->cpu = queue->cpu;
1269 ptq->tid = queue->tid;
1271 ptq->cbr_seen = UINT_MAX;
1273 if (pt->sampling_mode && !pt->snapshot_mode &&
1274 pt->timeless_decoding)
1275 ptq->step_through_buffers = true;
1277 ptq->sync_switch = pt->sync_switch;
1279 intel_pt_setup_time_range(pt, ptq);
1282 if (!ptq->on_heap &&
1283 (!ptq->sync_switch ||
1284 ptq->switch_state != INTEL_PT_SS_EXPECTING_SWITCH_EVENT)) {
1285 const struct intel_pt_state *state;
1288 if (pt->timeless_decoding)
1291 intel_pt_log("queue %u getting timestamp\n", queue_nr);
1292 intel_pt_log("queue %u decoding cpu %d pid %d tid %d\n",
1293 queue_nr, ptq->cpu, ptq->pid, ptq->tid);
1295 if (ptq->sel_start && ptq->sel_timestamp) {
1296 ret = intel_pt_fast_forward(ptq->decoder,
1297 ptq->sel_timestamp);
1303 state = intel_pt_decode(ptq->decoder);
1305 if (state->err == INTEL_PT_ERR_NODATA) {
1306 intel_pt_log("queue %u has no timestamp\n",
1312 if (state->timestamp)
1316 ptq->timestamp = state->timestamp;
1317 intel_pt_log("queue %u timestamp 0x%" PRIx64 "\n",
1318 queue_nr, ptq->timestamp);
1320 ptq->have_sample = true;
1321 if (ptq->sel_start && ptq->sel_timestamp &&
1322 ptq->timestamp < ptq->sel_timestamp)
1323 ptq->have_sample = false;
1324 intel_pt_sample_flags(ptq);
1325 ret = auxtrace_heap__add(&pt->heap, queue_nr, ptq->timestamp);
1328 ptq->on_heap = true;
1334 static int intel_pt_setup_queues(struct intel_pt *pt)
1339 for (i = 0; i < pt->queues.nr_queues; i++) {
1340 ret = intel_pt_setup_queue(pt, &pt->queues.queue_array[i], i);
1347 static inline bool intel_pt_skip_event(struct intel_pt *pt)
1349 return pt->synth_opts.initial_skip &&
1350 pt->num_events++ < pt->synth_opts.initial_skip;
1354 * Cannot count CBR as skipped because it won't go away until cbr == cbr_seen.
1355 * Also ensure CBR is first non-skipped event by allowing for 4 more samples
1356 * from this decoder state.
1358 static inline bool intel_pt_skip_cbr_event(struct intel_pt *pt)
1360 return pt->synth_opts.initial_skip &&
1361 pt->num_events + 4 < pt->synth_opts.initial_skip;
1364 static void intel_pt_prep_a_sample(struct intel_pt_queue *ptq,
1365 union perf_event *event,
1366 struct perf_sample *sample)
1368 event->sample.header.type = PERF_RECORD_SAMPLE;
1369 event->sample.header.size = sizeof(struct perf_event_header);
1371 sample->pid = ptq->pid;
1372 sample->tid = ptq->tid;
1373 sample->cpu = ptq->cpu;
1374 sample->insn_len = ptq->insn_len;
1375 memcpy(sample->insn, ptq->insn, INTEL_PT_INSN_BUF_SZ);
1378 static void intel_pt_prep_b_sample(struct intel_pt *pt,
1379 struct intel_pt_queue *ptq,
1380 union perf_event *event,
1381 struct perf_sample *sample)
1383 intel_pt_prep_a_sample(ptq, event, sample);
1385 if (!pt->timeless_decoding)
1386 sample->time = tsc_to_perf_time(ptq->timestamp, &pt->tc);
1388 sample->ip = ptq->state->from_ip;
1389 sample->addr = ptq->state->to_ip;
1390 sample->cpumode = intel_pt_cpumode(ptq, sample->ip, sample->addr);
1392 sample->flags = ptq->flags;
1394 event->sample.header.misc = sample->cpumode;
1397 static int intel_pt_inject_event(union perf_event *event,
1398 struct perf_sample *sample, u64 type)
1400 event->header.size = perf_event__sample_event_size(sample, type, 0);
1401 return perf_event__synthesize_sample(event, type, 0, sample);
1404 static inline int intel_pt_opt_inject(struct intel_pt *pt,
1405 union perf_event *event,
1406 struct perf_sample *sample, u64 type)
1408 if (!pt->synth_opts.inject)
1411 return intel_pt_inject_event(event, sample, type);
1414 static int intel_pt_deliver_synth_event(struct intel_pt *pt,
1415 union perf_event *event,
1416 struct perf_sample *sample, u64 type)
1420 ret = intel_pt_opt_inject(pt, event, sample, type);
1424 ret = perf_session__deliver_synth_event(pt->session, event, sample);
1426 pr_err("Intel PT: failed to deliver event, error %d\n", ret);
1431 static int intel_pt_synth_branch_sample(struct intel_pt_queue *ptq)
1433 struct intel_pt *pt = ptq->pt;
1434 union perf_event *event = ptq->event_buf;
1435 struct perf_sample sample = { .ip = 0, };
1436 struct dummy_branch_stack {
1439 struct branch_entry entries;
1442 if (pt->branches_filter && !(pt->branches_filter & ptq->flags))
1445 if (intel_pt_skip_event(pt))
1448 intel_pt_prep_b_sample(pt, ptq, event, &sample);
1450 sample.id = ptq->pt->branches_id;
1451 sample.stream_id = ptq->pt->branches_id;
1454 * perf report cannot handle events without a branch stack when using
1455 * SORT_MODE__BRANCH so make a dummy one.
1457 if (pt->synth_opts.last_branch && sort__mode == SORT_MODE__BRANCH) {
1458 dummy_bs = (struct dummy_branch_stack){
1466 sample.branch_stack = (struct branch_stack *)&dummy_bs;
1469 if (ptq->state->flags & INTEL_PT_SAMPLE_IPC)
1470 sample.cyc_cnt = ptq->ipc_cyc_cnt - ptq->last_br_cyc_cnt;
1471 if (sample.cyc_cnt) {
1472 sample.insn_cnt = ptq->ipc_insn_cnt - ptq->last_br_insn_cnt;
1473 ptq->last_br_insn_cnt = ptq->ipc_insn_cnt;
1474 ptq->last_br_cyc_cnt = ptq->ipc_cyc_cnt;
1477 return intel_pt_deliver_synth_event(pt, event, &sample,
1478 pt->branches_sample_type);
1481 static void intel_pt_prep_sample(struct intel_pt *pt,
1482 struct intel_pt_queue *ptq,
1483 union perf_event *event,
1484 struct perf_sample *sample)
1486 intel_pt_prep_b_sample(pt, ptq, event, sample);
1488 if (pt->synth_opts.callchain) {
1489 thread_stack__sample(ptq->thread, ptq->cpu, ptq->chain,
1490 pt->synth_opts.callchain_sz + 1,
1491 sample->ip, pt->kernel_start);
1492 sample->callchain = ptq->chain;
1495 if (pt->synth_opts.last_branch) {
1496 thread_stack__br_sample(ptq->thread, ptq->cpu, ptq->last_branch,
1498 sample->branch_stack = ptq->last_branch;
1502 static int intel_pt_synth_instruction_sample(struct intel_pt_queue *ptq)
1504 struct intel_pt *pt = ptq->pt;
1505 union perf_event *event = ptq->event_buf;
1506 struct perf_sample sample = { .ip = 0, };
1508 if (intel_pt_skip_event(pt))
1511 intel_pt_prep_sample(pt, ptq, event, &sample);
1513 sample.id = ptq->pt->instructions_id;
1514 sample.stream_id = ptq->pt->instructions_id;
1515 if (pt->synth_opts.quick)
1518 sample.period = ptq->state->tot_insn_cnt - ptq->last_insn_cnt;
1520 if (ptq->state->flags & INTEL_PT_SAMPLE_IPC)
1521 sample.cyc_cnt = ptq->ipc_cyc_cnt - ptq->last_in_cyc_cnt;
1522 if (sample.cyc_cnt) {
1523 sample.insn_cnt = ptq->ipc_insn_cnt - ptq->last_in_insn_cnt;
1524 ptq->last_in_insn_cnt = ptq->ipc_insn_cnt;
1525 ptq->last_in_cyc_cnt = ptq->ipc_cyc_cnt;
1528 ptq->last_insn_cnt = ptq->state->tot_insn_cnt;
1530 return intel_pt_deliver_synth_event(pt, event, &sample,
1531 pt->instructions_sample_type);
1534 static int intel_pt_synth_transaction_sample(struct intel_pt_queue *ptq)
1536 struct intel_pt *pt = ptq->pt;
1537 union perf_event *event = ptq->event_buf;
1538 struct perf_sample sample = { .ip = 0, };
1540 if (intel_pt_skip_event(pt))
1543 intel_pt_prep_sample(pt, ptq, event, &sample);
1545 sample.id = ptq->pt->transactions_id;
1546 sample.stream_id = ptq->pt->transactions_id;
1548 return intel_pt_deliver_synth_event(pt, event, &sample,
1549 pt->transactions_sample_type);
1552 static void intel_pt_prep_p_sample(struct intel_pt *pt,
1553 struct intel_pt_queue *ptq,
1554 union perf_event *event,
1555 struct perf_sample *sample)
1557 intel_pt_prep_sample(pt, ptq, event, sample);
1560 * Zero IP is used to mean "trace start" but that is not the case for
1561 * power or PTWRITE events with no IP, so clear the flags.
1567 static int intel_pt_synth_ptwrite_sample(struct intel_pt_queue *ptq)
1569 struct intel_pt *pt = ptq->pt;
1570 union perf_event *event = ptq->event_buf;
1571 struct perf_sample sample = { .ip = 0, };
1572 struct perf_synth_intel_ptwrite raw;
1574 if (intel_pt_skip_event(pt))
1577 intel_pt_prep_p_sample(pt, ptq, event, &sample);
1579 sample.id = ptq->pt->ptwrites_id;
1580 sample.stream_id = ptq->pt->ptwrites_id;
1583 raw.ip = !!(ptq->state->flags & INTEL_PT_FUP_IP);
1584 raw.payload = cpu_to_le64(ptq->state->ptw_payload);
1586 sample.raw_size = perf_synth__raw_size(raw);
1587 sample.raw_data = perf_synth__raw_data(&raw);
1589 return intel_pt_deliver_synth_event(pt, event, &sample,
1590 pt->ptwrites_sample_type);
1593 static int intel_pt_synth_cbr_sample(struct intel_pt_queue *ptq)
1595 struct intel_pt *pt = ptq->pt;
1596 union perf_event *event = ptq->event_buf;
1597 struct perf_sample sample = { .ip = 0, };
1598 struct perf_synth_intel_cbr raw;
1601 if (intel_pt_skip_cbr_event(pt))
1604 ptq->cbr_seen = ptq->state->cbr;
1606 intel_pt_prep_p_sample(pt, ptq, event, &sample);
1608 sample.id = ptq->pt->cbr_id;
1609 sample.stream_id = ptq->pt->cbr_id;
1611 flags = (u16)ptq->state->cbr_payload | (pt->max_non_turbo_ratio << 16);
1612 raw.flags = cpu_to_le32(flags);
1613 raw.freq = cpu_to_le32(raw.cbr * pt->cbr2khz);
1616 sample.raw_size = perf_synth__raw_size(raw);
1617 sample.raw_data = perf_synth__raw_data(&raw);
1619 return intel_pt_deliver_synth_event(pt, event, &sample,
1620 pt->pwr_events_sample_type);
1623 static int intel_pt_synth_psb_sample(struct intel_pt_queue *ptq)
1625 struct intel_pt *pt = ptq->pt;
1626 union perf_event *event = ptq->event_buf;
1627 struct perf_sample sample = { .ip = 0, };
1628 struct perf_synth_intel_psb raw;
1630 if (intel_pt_skip_event(pt))
1633 intel_pt_prep_p_sample(pt, ptq, event, &sample);
1635 sample.id = ptq->pt->psb_id;
1636 sample.stream_id = ptq->pt->psb_id;
1640 raw.offset = ptq->state->psb_offset;
1642 sample.raw_size = perf_synth__raw_size(raw);
1643 sample.raw_data = perf_synth__raw_data(&raw);
1645 return intel_pt_deliver_synth_event(pt, event, &sample,
1646 pt->pwr_events_sample_type);
1649 static int intel_pt_synth_mwait_sample(struct intel_pt_queue *ptq)
1651 struct intel_pt *pt = ptq->pt;
1652 union perf_event *event = ptq->event_buf;
1653 struct perf_sample sample = { .ip = 0, };
1654 struct perf_synth_intel_mwait raw;
1656 if (intel_pt_skip_event(pt))
1659 intel_pt_prep_p_sample(pt, ptq, event, &sample);
1661 sample.id = ptq->pt->mwait_id;
1662 sample.stream_id = ptq->pt->mwait_id;
1665 raw.payload = cpu_to_le64(ptq->state->mwait_payload);
1667 sample.raw_size = perf_synth__raw_size(raw);
1668 sample.raw_data = perf_synth__raw_data(&raw);
1670 return intel_pt_deliver_synth_event(pt, event, &sample,
1671 pt->pwr_events_sample_type);
1674 static int intel_pt_synth_pwre_sample(struct intel_pt_queue *ptq)
1676 struct intel_pt *pt = ptq->pt;
1677 union perf_event *event = ptq->event_buf;
1678 struct perf_sample sample = { .ip = 0, };
1679 struct perf_synth_intel_pwre raw;
1681 if (intel_pt_skip_event(pt))
1684 intel_pt_prep_p_sample(pt, ptq, event, &sample);
1686 sample.id = ptq->pt->pwre_id;
1687 sample.stream_id = ptq->pt->pwre_id;
1690 raw.payload = cpu_to_le64(ptq->state->pwre_payload);
1692 sample.raw_size = perf_synth__raw_size(raw);
1693 sample.raw_data = perf_synth__raw_data(&raw);
1695 return intel_pt_deliver_synth_event(pt, event, &sample,
1696 pt->pwr_events_sample_type);
1699 static int intel_pt_synth_exstop_sample(struct intel_pt_queue *ptq)
1701 struct intel_pt *pt = ptq->pt;
1702 union perf_event *event = ptq->event_buf;
1703 struct perf_sample sample = { .ip = 0, };
1704 struct perf_synth_intel_exstop raw;
1706 if (intel_pt_skip_event(pt))
1709 intel_pt_prep_p_sample(pt, ptq, event, &sample);
1711 sample.id = ptq->pt->exstop_id;
1712 sample.stream_id = ptq->pt->exstop_id;
1715 raw.ip = !!(ptq->state->flags & INTEL_PT_FUP_IP);
1717 sample.raw_size = perf_synth__raw_size(raw);
1718 sample.raw_data = perf_synth__raw_data(&raw);
1720 return intel_pt_deliver_synth_event(pt, event, &sample,
1721 pt->pwr_events_sample_type);
1724 static int intel_pt_synth_pwrx_sample(struct intel_pt_queue *ptq)
1726 struct intel_pt *pt = ptq->pt;
1727 union perf_event *event = ptq->event_buf;
1728 struct perf_sample sample = { .ip = 0, };
1729 struct perf_synth_intel_pwrx raw;
1731 if (intel_pt_skip_event(pt))
1734 intel_pt_prep_p_sample(pt, ptq, event, &sample);
1736 sample.id = ptq->pt->pwrx_id;
1737 sample.stream_id = ptq->pt->pwrx_id;
1740 raw.payload = cpu_to_le64(ptq->state->pwrx_payload);
1742 sample.raw_size = perf_synth__raw_size(raw);
1743 sample.raw_data = perf_synth__raw_data(&raw);
1745 return intel_pt_deliver_synth_event(pt, event, &sample,
1746 pt->pwr_events_sample_type);
1750 * PEBS gp_regs array indexes plus 1 so that 0 means not present. Refer
1751 * intel_pt_add_gp_regs().
1753 static const int pebs_gp_regs[] = {
1754 [PERF_REG_X86_FLAGS] = 1,
1755 [PERF_REG_X86_IP] = 2,
1756 [PERF_REG_X86_AX] = 3,
1757 [PERF_REG_X86_CX] = 4,
1758 [PERF_REG_X86_DX] = 5,
1759 [PERF_REG_X86_BX] = 6,
1760 [PERF_REG_X86_SP] = 7,
1761 [PERF_REG_X86_BP] = 8,
1762 [PERF_REG_X86_SI] = 9,
1763 [PERF_REG_X86_DI] = 10,
1764 [PERF_REG_X86_R8] = 11,
1765 [PERF_REG_X86_R9] = 12,
1766 [PERF_REG_X86_R10] = 13,
1767 [PERF_REG_X86_R11] = 14,
1768 [PERF_REG_X86_R12] = 15,
1769 [PERF_REG_X86_R13] = 16,
1770 [PERF_REG_X86_R14] = 17,
1771 [PERF_REG_X86_R15] = 18,
1774 static u64 *intel_pt_add_gp_regs(struct regs_dump *intr_regs, u64 *pos,
1775 const struct intel_pt_blk_items *items,
1778 const u64 *gp_regs = items->val[INTEL_PT_GP_REGS_POS];
1779 u32 mask = items->mask[INTEL_PT_GP_REGS_POS];
1783 for (i = 0, bit = 1; i < PERF_REG_X86_64_MAX; i++, bit <<= 1) {
1784 /* Get the PEBS gp_regs array index */
1785 int n = pebs_gp_regs[i] - 1;
1790 * Add only registers that were requested (i.e. 'regs_mask') and
1791 * that were provided (i.e. 'mask'), and update the resulting
1792 * mask (i.e. 'intr_regs->mask') accordingly.
1794 if (mask & 1 << n && regs_mask & bit) {
1795 intr_regs->mask |= bit;
1796 *pos++ = gp_regs[n];
1803 #ifndef PERF_REG_X86_XMM0
1804 #define PERF_REG_X86_XMM0 32
1807 static void intel_pt_add_xmm(struct regs_dump *intr_regs, u64 *pos,
1808 const struct intel_pt_blk_items *items,
1811 u32 mask = items->has_xmm & (regs_mask >> PERF_REG_X86_XMM0);
1812 const u64 *xmm = items->xmm;
1815 * If there are any XMM registers, then there should be all of them.
1816 * Nevertheless, follow the logic to add only registers that were
1817 * requested (i.e. 'regs_mask') and that were provided (i.e. 'mask'),
1818 * and update the resulting mask (i.e. 'intr_regs->mask') accordingly.
1820 intr_regs->mask |= (u64)mask << PERF_REG_X86_XMM0;
1822 for (; mask; mask >>= 1, xmm++) {
1828 #define LBR_INFO_MISPRED (1ULL << 63)
1829 #define LBR_INFO_IN_TX (1ULL << 62)
1830 #define LBR_INFO_ABORT (1ULL << 61)
1831 #define LBR_INFO_CYCLES 0xffff
1833 /* Refer kernel's intel_pmu_store_pebs_lbrs() */
1834 static u64 intel_pt_lbr_flags(u64 info)
1837 struct branch_flags flags;
1842 u.flags.mispred = !!(info & LBR_INFO_MISPRED);
1843 u.flags.predicted = !(info & LBR_INFO_MISPRED);
1844 u.flags.in_tx = !!(info & LBR_INFO_IN_TX);
1845 u.flags.abort = !!(info & LBR_INFO_ABORT);
1846 u.flags.cycles = info & LBR_INFO_CYCLES;
1851 static void intel_pt_add_lbrs(struct branch_stack *br_stack,
1852 const struct intel_pt_blk_items *items)
1859 to = &br_stack->entries[0].from;
1861 for (i = INTEL_PT_LBR_0_POS; i <= INTEL_PT_LBR_2_POS; i++) {
1862 u32 mask = items->mask[i];
1863 const u64 *from = items->val[i];
1865 for (; mask; mask >>= 3, from += 3) {
1866 if ((mask & 7) == 7) {
1869 *to++ = intel_pt_lbr_flags(from[2]);
1876 static int intel_pt_synth_pebs_sample(struct intel_pt_queue *ptq)
1878 const struct intel_pt_blk_items *items = &ptq->state->items;
1879 struct perf_sample sample = { .ip = 0, };
1880 union perf_event *event = ptq->event_buf;
1881 struct intel_pt *pt = ptq->pt;
1882 struct evsel *evsel = pt->pebs_evsel;
1883 u64 sample_type = evsel->core.attr.sample_type;
1884 u64 id = evsel->core.id[0];
1886 u64 regs[8 * sizeof(sample.intr_regs.mask)];
1888 if (intel_pt_skip_event(pt))
1891 intel_pt_prep_a_sample(ptq, event, &sample);
1894 sample.stream_id = id;
1896 if (!evsel->core.attr.freq)
1897 sample.period = evsel->core.attr.sample_period;
1899 /* No support for non-zero CS base */
1901 sample.ip = items->ip;
1902 else if (items->has_rip)
1903 sample.ip = items->rip;
1905 sample.ip = ptq->state->from_ip;
1907 cpumode = intel_pt_cpumode(ptq, sample.ip, 0);
1909 event->sample.header.misc = cpumode | PERF_RECORD_MISC_EXACT_IP;
1911 sample.cpumode = cpumode;
1913 if (sample_type & PERF_SAMPLE_TIME) {
1916 if (items->has_timestamp)
1917 timestamp = items->timestamp;
1918 else if (!pt->timeless_decoding)
1919 timestamp = ptq->timestamp;
1921 sample.time = tsc_to_perf_time(timestamp, &pt->tc);
1924 if (sample_type & PERF_SAMPLE_CALLCHAIN &&
1925 pt->synth_opts.callchain) {
1926 thread_stack__sample(ptq->thread, ptq->cpu, ptq->chain,
1927 pt->synth_opts.callchain_sz, sample.ip,
1929 sample.callchain = ptq->chain;
1932 if (sample_type & PERF_SAMPLE_REGS_INTR &&
1933 (items->mask[INTEL_PT_GP_REGS_POS] ||
1934 items->mask[INTEL_PT_XMM_POS])) {
1935 u64 regs_mask = evsel->core.attr.sample_regs_intr;
1938 sample.intr_regs.abi = items->is_32_bit ?
1939 PERF_SAMPLE_REGS_ABI_32 :
1940 PERF_SAMPLE_REGS_ABI_64;
1941 sample.intr_regs.regs = regs;
1943 pos = intel_pt_add_gp_regs(&sample.intr_regs, regs, items, regs_mask);
1945 intel_pt_add_xmm(&sample.intr_regs, pos, items, regs_mask);
1948 if (sample_type & PERF_SAMPLE_BRANCH_STACK) {
1949 if (items->mask[INTEL_PT_LBR_0_POS] ||
1950 items->mask[INTEL_PT_LBR_1_POS] ||
1951 items->mask[INTEL_PT_LBR_2_POS]) {
1952 intel_pt_add_lbrs(ptq->last_branch, items);
1953 } else if (pt->synth_opts.last_branch) {
1954 thread_stack__br_sample(ptq->thread, ptq->cpu,
1958 ptq->last_branch->nr = 0;
1960 sample.branch_stack = ptq->last_branch;
1963 if (sample_type & PERF_SAMPLE_ADDR && items->has_mem_access_address)
1964 sample.addr = items->mem_access_address;
1966 if (sample_type & PERF_SAMPLE_WEIGHT_TYPE) {
1968 * Refer kernel's setup_pebs_adaptive_sample_data() and
1969 * intel_hsw_weight().
1971 if (items->has_mem_access_latency) {
1972 u64 weight = items->mem_access_latency >> 32;
1975 * Starts from SPR, the mem access latency field
1976 * contains both cache latency [47:32] and instruction
1977 * latency [15:0]. The cache latency is the same as the
1978 * mem access latency on previous platforms.
1980 * In practice, no memory access could last than 4G
1981 * cycles. Use latency >> 32 to distinguish the
1982 * different format of the mem access latency field.
1985 sample.weight = weight & 0xffff;
1986 sample.ins_lat = items->mem_access_latency & 0xffff;
1988 sample.weight = items->mem_access_latency;
1990 if (!sample.weight && items->has_tsx_aux_info) {
1991 /* Cycles last block */
1992 sample.weight = (u32)items->tsx_aux_info;
1996 if (sample_type & PERF_SAMPLE_TRANSACTION && items->has_tsx_aux_info) {
1997 u64 ax = items->has_rax ? items->rax : 0;
1998 /* Refer kernel's intel_hsw_transaction() */
1999 u64 txn = (u8)(items->tsx_aux_info >> 32);
2001 /* For RTM XABORTs also log the abort code from AX */
2002 if (txn & PERF_TXN_TRANSACTION && ax & 1)
2003 txn |= ((ax >> 24) & 0xff) << PERF_TXN_ABORT_SHIFT;
2004 sample.transaction = txn;
2007 return intel_pt_deliver_synth_event(pt, event, &sample, sample_type);
2010 static int intel_pt_synth_error(struct intel_pt *pt, int code, int cpu,
2011 pid_t pid, pid_t tid, u64 ip, u64 timestamp)
2013 union perf_event event;
2014 char msg[MAX_AUXTRACE_ERROR_MSG];
2017 if (pt->synth_opts.error_minus_flags) {
2018 if (code == INTEL_PT_ERR_OVR &&
2019 pt->synth_opts.error_minus_flags & AUXTRACE_ERR_FLG_OVERFLOW)
2021 if (code == INTEL_PT_ERR_LOST &&
2022 pt->synth_opts.error_minus_flags & AUXTRACE_ERR_FLG_DATA_LOST)
2026 intel_pt__strerror(code, msg, MAX_AUXTRACE_ERROR_MSG);
2028 auxtrace_synth_error(&event.auxtrace_error, PERF_AUXTRACE_ERROR_ITRACE,
2029 code, cpu, pid, tid, ip, msg, timestamp);
2031 err = perf_session__deliver_synth_event(pt->session, &event, NULL);
2033 pr_err("Intel Processor Trace: failed to deliver error event, error %d\n",
2039 static int intel_ptq_synth_error(struct intel_pt_queue *ptq,
2040 const struct intel_pt_state *state)
2042 struct intel_pt *pt = ptq->pt;
2043 u64 tm = ptq->timestamp;
2045 tm = pt->timeless_decoding ? 0 : tsc_to_perf_time(tm, &pt->tc);
2047 return intel_pt_synth_error(pt, state->err, ptq->cpu, ptq->pid,
2048 ptq->tid, state->from_ip, tm);
2051 static int intel_pt_next_tid(struct intel_pt *pt, struct intel_pt_queue *ptq)
2053 struct auxtrace_queue *queue;
2054 pid_t tid = ptq->next_tid;
2060 intel_pt_log("switch: cpu %d tid %d\n", ptq->cpu, tid);
2062 err = machine__set_current_tid(pt->machine, ptq->cpu, -1, tid);
2064 queue = &pt->queues.queue_array[ptq->queue_nr];
2065 intel_pt_set_pid_tid_cpu(pt, queue);
2072 static inline bool intel_pt_is_switch_ip(struct intel_pt_queue *ptq, u64 ip)
2074 struct intel_pt *pt = ptq->pt;
2076 return ip == pt->switch_ip &&
2077 (ptq->flags & PERF_IP_FLAG_BRANCH) &&
2078 !(ptq->flags & (PERF_IP_FLAG_CONDITIONAL | PERF_IP_FLAG_ASYNC |
2079 PERF_IP_FLAG_INTERRUPT | PERF_IP_FLAG_TX_ABORT));
2082 #define INTEL_PT_PWR_EVT (INTEL_PT_MWAIT_OP | INTEL_PT_PWR_ENTRY | \
2083 INTEL_PT_EX_STOP | INTEL_PT_PWR_EXIT)
2085 static int intel_pt_sample(struct intel_pt_queue *ptq)
2087 const struct intel_pt_state *state = ptq->state;
2088 struct intel_pt *pt = ptq->pt;
2091 if (!ptq->have_sample)
2094 ptq->have_sample = false;
2096 ptq->ipc_insn_cnt = ptq->state->tot_insn_cnt;
2097 ptq->ipc_cyc_cnt = ptq->state->tot_cyc_cnt;
2100 * Do PEBS first to allow for the possibility that the PEBS timestamp
2101 * precedes the current timestamp.
2103 if (pt->sample_pebs && state->type & INTEL_PT_BLK_ITEMS) {
2104 err = intel_pt_synth_pebs_sample(ptq);
2109 if (pt->sample_pwr_events) {
2110 if (state->type & INTEL_PT_PSB_EVT) {
2111 err = intel_pt_synth_psb_sample(ptq);
2115 if (ptq->state->cbr != ptq->cbr_seen) {
2116 err = intel_pt_synth_cbr_sample(ptq);
2120 if (state->type & INTEL_PT_PWR_EVT) {
2121 if (state->type & INTEL_PT_MWAIT_OP) {
2122 err = intel_pt_synth_mwait_sample(ptq);
2126 if (state->type & INTEL_PT_PWR_ENTRY) {
2127 err = intel_pt_synth_pwre_sample(ptq);
2131 if (state->type & INTEL_PT_EX_STOP) {
2132 err = intel_pt_synth_exstop_sample(ptq);
2136 if (state->type & INTEL_PT_PWR_EXIT) {
2137 err = intel_pt_synth_pwrx_sample(ptq);
2144 if (pt->sample_instructions && (state->type & INTEL_PT_INSTRUCTION)) {
2145 err = intel_pt_synth_instruction_sample(ptq);
2150 if (pt->sample_transactions && (state->type & INTEL_PT_TRANSACTION)) {
2151 err = intel_pt_synth_transaction_sample(ptq);
2156 if (pt->sample_ptwrites && (state->type & INTEL_PT_PTW)) {
2157 err = intel_pt_synth_ptwrite_sample(ptq);
2162 if (!(state->type & INTEL_PT_BRANCH))
2165 if (pt->use_thread_stack) {
2166 thread_stack__event(ptq->thread, ptq->cpu, ptq->flags,
2167 state->from_ip, state->to_ip, ptq->insn_len,
2168 state->trace_nr, pt->callstack,
2169 pt->br_stack_sz_plus,
2172 thread_stack__set_trace_nr(ptq->thread, ptq->cpu, state->trace_nr);
2175 if (pt->sample_branches) {
2176 if (state->from_nr != state->to_nr &&
2177 state->from_ip && state->to_ip) {
2178 struct intel_pt_state *st = (struct intel_pt_state *)state;
2179 u64 to_ip = st->to_ip;
2180 u64 from_ip = st->from_ip;
2183 * perf cannot handle having different machines for ip
2184 * and addr, so create 2 branches.
2187 err = intel_pt_synth_branch_sample(ptq);
2192 err = intel_pt_synth_branch_sample(ptq);
2193 st->from_ip = from_ip;
2195 err = intel_pt_synth_branch_sample(ptq);
2201 if (!ptq->sync_switch)
2204 if (intel_pt_is_switch_ip(ptq, state->to_ip)) {
2205 switch (ptq->switch_state) {
2206 case INTEL_PT_SS_NOT_TRACING:
2207 case INTEL_PT_SS_UNKNOWN:
2208 case INTEL_PT_SS_EXPECTING_SWITCH_IP:
2209 err = intel_pt_next_tid(pt, ptq);
2212 ptq->switch_state = INTEL_PT_SS_TRACING;
2215 ptq->switch_state = INTEL_PT_SS_EXPECTING_SWITCH_EVENT;
2218 } else if (!state->to_ip) {
2219 ptq->switch_state = INTEL_PT_SS_NOT_TRACING;
2220 } else if (ptq->switch_state == INTEL_PT_SS_NOT_TRACING) {
2221 ptq->switch_state = INTEL_PT_SS_UNKNOWN;
2222 } else if (ptq->switch_state == INTEL_PT_SS_UNKNOWN &&
2223 state->to_ip == pt->ptss_ip &&
2224 (ptq->flags & PERF_IP_FLAG_CALL)) {
2225 ptq->switch_state = INTEL_PT_SS_TRACING;
2231 static u64 intel_pt_switch_ip(struct intel_pt *pt, u64 *ptss_ip)
2233 struct machine *machine = pt->machine;
2235 struct symbol *sym, *start;
2236 u64 ip, switch_ip = 0;
2242 map = machine__kernel_map(machine);
2249 start = dso__first_symbol(map->dso);
2251 for (sym = start; sym; sym = dso__next_symbol(sym)) {
2252 if (sym->binding == STB_GLOBAL &&
2253 !strcmp(sym->name, "__switch_to")) {
2254 ip = map->unmap_ip(map, sym->start);
2255 if (ip >= map->start && ip < map->end) {
2262 if (!switch_ip || !ptss_ip)
2265 if (pt->have_sched_switch == 1)
2266 ptss = "perf_trace_sched_switch";
2268 ptss = "__perf_event_task_sched_out";
2270 for (sym = start; sym; sym = dso__next_symbol(sym)) {
2271 if (!strcmp(sym->name, ptss)) {
2272 ip = map->unmap_ip(map, sym->start);
2273 if (ip >= map->start && ip < map->end) {
2283 static void intel_pt_enable_sync_switch(struct intel_pt *pt)
2287 pt->sync_switch = true;
2289 for (i = 0; i < pt->queues.nr_queues; i++) {
2290 struct auxtrace_queue *queue = &pt->queues.queue_array[i];
2291 struct intel_pt_queue *ptq = queue->priv;
2294 ptq->sync_switch = true;
2299 * To filter against time ranges, it is only necessary to look at the next start
2302 static bool intel_pt_next_time(struct intel_pt_queue *ptq)
2304 struct intel_pt *pt = ptq->pt;
2306 if (ptq->sel_start) {
2307 /* Next time is an end time */
2308 ptq->sel_start = false;
2309 ptq->sel_timestamp = pt->time_ranges[ptq->sel_idx].end;
2311 } else if (ptq->sel_idx + 1 < pt->range_cnt) {
2312 /* Next time is a start time */
2313 ptq->sel_start = true;
2315 ptq->sel_timestamp = pt->time_ranges[ptq->sel_idx].start;
2323 static int intel_pt_time_filter(struct intel_pt_queue *ptq, u64 *ff_timestamp)
2328 if (ptq->sel_start) {
2329 if (ptq->timestamp >= ptq->sel_timestamp) {
2330 /* After start time, so consider next time */
2331 intel_pt_next_time(ptq);
2332 if (!ptq->sel_timestamp) {
2336 /* Check against end time */
2339 /* Before start time, so fast forward */
2340 ptq->have_sample = false;
2341 if (ptq->sel_timestamp > *ff_timestamp) {
2342 if (ptq->sync_switch) {
2343 intel_pt_next_tid(ptq->pt, ptq);
2344 ptq->switch_state = INTEL_PT_SS_UNKNOWN;
2346 *ff_timestamp = ptq->sel_timestamp;
2347 err = intel_pt_fast_forward(ptq->decoder,
2348 ptq->sel_timestamp);
2353 } else if (ptq->timestamp > ptq->sel_timestamp) {
2354 /* After end time, so consider next time */
2355 if (!intel_pt_next_time(ptq)) {
2356 /* No next time range, so stop decoding */
2357 ptq->have_sample = false;
2358 ptq->switch_state = INTEL_PT_SS_NOT_TRACING;
2361 /* Check against next start time */
2364 /* Before end time */
2370 static int intel_pt_run_decoder(struct intel_pt_queue *ptq, u64 *timestamp)
2372 const struct intel_pt_state *state = ptq->state;
2373 struct intel_pt *pt = ptq->pt;
2374 u64 ff_timestamp = 0;
2377 if (!pt->kernel_start) {
2378 pt->kernel_start = machine__kernel_start(pt->machine);
2379 if (pt->per_cpu_mmaps &&
2380 (pt->have_sched_switch == 1 || pt->have_sched_switch == 3) &&
2381 !pt->timeless_decoding && intel_pt_tracing_kernel(pt) &&
2382 !pt->sampling_mode) {
2383 pt->switch_ip = intel_pt_switch_ip(pt, &pt->ptss_ip);
2384 if (pt->switch_ip) {
2385 intel_pt_log("switch_ip: %"PRIx64" ptss_ip: %"PRIx64"\n",
2386 pt->switch_ip, pt->ptss_ip);
2387 intel_pt_enable_sync_switch(pt);
2392 intel_pt_log("queue %u decoding cpu %d pid %d tid %d\n",
2393 ptq->queue_nr, ptq->cpu, ptq->pid, ptq->tid);
2395 err = intel_pt_sample(ptq);
2399 state = intel_pt_decode(ptq->decoder);
2401 if (state->err == INTEL_PT_ERR_NODATA)
2403 if (ptq->sync_switch &&
2404 state->from_ip >= pt->kernel_start) {
2405 ptq->sync_switch = false;
2406 intel_pt_next_tid(pt, ptq);
2408 if (pt->synth_opts.errors) {
2409 err = intel_ptq_synth_error(ptq, state);
2417 ptq->have_sample = true;
2418 intel_pt_sample_flags(ptq);
2420 /* Use estimated TSC upon return to user space */
2422 (state->from_ip >= pt->kernel_start || !state->from_ip) &&
2423 state->to_ip && state->to_ip < pt->kernel_start) {
2424 intel_pt_log("TSC %"PRIx64" est. TSC %"PRIx64"\n",
2425 state->timestamp, state->est_timestamp);
2426 ptq->timestamp = state->est_timestamp;
2427 /* Use estimated TSC in unknown switch state */
2428 } else if (ptq->sync_switch &&
2429 ptq->switch_state == INTEL_PT_SS_UNKNOWN &&
2430 intel_pt_is_switch_ip(ptq, state->to_ip) &&
2431 ptq->next_tid == -1) {
2432 intel_pt_log("TSC %"PRIx64" est. TSC %"PRIx64"\n",
2433 state->timestamp, state->est_timestamp);
2434 ptq->timestamp = state->est_timestamp;
2435 } else if (state->timestamp > ptq->timestamp) {
2436 ptq->timestamp = state->timestamp;
2439 if (ptq->sel_timestamp) {
2440 err = intel_pt_time_filter(ptq, &ff_timestamp);
2445 if (!pt->timeless_decoding && ptq->timestamp >= *timestamp) {
2446 *timestamp = ptq->timestamp;
2453 static inline int intel_pt_update_queues(struct intel_pt *pt)
2455 if (pt->queues.new_data) {
2456 pt->queues.new_data = false;
2457 return intel_pt_setup_queues(pt);
2462 static int intel_pt_process_queues(struct intel_pt *pt, u64 timestamp)
2464 unsigned int queue_nr;
2469 struct auxtrace_queue *queue;
2470 struct intel_pt_queue *ptq;
2472 if (!pt->heap.heap_cnt)
2475 if (pt->heap.heap_array[0].ordinal >= timestamp)
2478 queue_nr = pt->heap.heap_array[0].queue_nr;
2479 queue = &pt->queues.queue_array[queue_nr];
2482 intel_pt_log("queue %u processing 0x%" PRIx64 " to 0x%" PRIx64 "\n",
2483 queue_nr, pt->heap.heap_array[0].ordinal,
2486 auxtrace_heap__pop(&pt->heap);
2488 if (pt->heap.heap_cnt) {
2489 ts = pt->heap.heap_array[0].ordinal + 1;
2496 intel_pt_set_pid_tid_cpu(pt, queue);
2498 ret = intel_pt_run_decoder(ptq, &ts);
2501 auxtrace_heap__add(&pt->heap, queue_nr, ts);
2506 ret = auxtrace_heap__add(&pt->heap, queue_nr, ts);
2510 ptq->on_heap = false;
2517 static int intel_pt_process_timeless_queues(struct intel_pt *pt, pid_t tid,
2520 struct auxtrace_queues *queues = &pt->queues;
2524 for (i = 0; i < queues->nr_queues; i++) {
2525 struct auxtrace_queue *queue = &pt->queues.queue_array[i];
2526 struct intel_pt_queue *ptq = queue->priv;
2528 if (ptq && (tid == -1 || ptq->tid == tid)) {
2530 intel_pt_set_pid_tid_cpu(pt, queue);
2531 intel_pt_run_decoder(ptq, &ts);
2537 static void intel_pt_sample_set_pid_tid_cpu(struct intel_pt_queue *ptq,
2538 struct auxtrace_queue *queue,
2539 struct perf_sample *sample)
2541 struct machine *m = ptq->pt->machine;
2543 ptq->pid = sample->pid;
2544 ptq->tid = sample->tid;
2545 ptq->cpu = queue->cpu;
2547 intel_pt_log("queue %u cpu %d pid %d tid %d\n",
2548 ptq->queue_nr, ptq->cpu, ptq->pid, ptq->tid);
2550 thread__zput(ptq->thread);
2555 if (ptq->pid == -1) {
2556 ptq->thread = machine__find_thread(m, -1, ptq->tid);
2558 ptq->pid = ptq->thread->pid_;
2562 ptq->thread = machine__findnew_thread(m, ptq->pid, ptq->tid);
2565 static int intel_pt_process_timeless_sample(struct intel_pt *pt,
2566 struct perf_sample *sample)
2568 struct auxtrace_queue *queue;
2569 struct intel_pt_queue *ptq;
2572 queue = auxtrace_queues__sample_queue(&pt->queues, sample, pt->session);
2581 ptq->time = sample->time;
2582 intel_pt_sample_set_pid_tid_cpu(ptq, queue, sample);
2583 intel_pt_run_decoder(ptq, &ts);
2587 static int intel_pt_lost(struct intel_pt *pt, struct perf_sample *sample)
2589 return intel_pt_synth_error(pt, INTEL_PT_ERR_LOST, sample->cpu,
2590 sample->pid, sample->tid, 0, sample->time);
2593 static struct intel_pt_queue *intel_pt_cpu_to_ptq(struct intel_pt *pt, int cpu)
2597 if (cpu < 0 || !pt->queues.nr_queues)
2600 if ((unsigned)cpu >= pt->queues.nr_queues)
2601 i = pt->queues.nr_queues - 1;
2605 if (pt->queues.queue_array[i].cpu == cpu)
2606 return pt->queues.queue_array[i].priv;
2608 for (j = 0; i > 0; j++) {
2609 if (pt->queues.queue_array[--i].cpu == cpu)
2610 return pt->queues.queue_array[i].priv;
2613 for (; j < pt->queues.nr_queues; j++) {
2614 if (pt->queues.queue_array[j].cpu == cpu)
2615 return pt->queues.queue_array[j].priv;
2621 static int intel_pt_sync_switch(struct intel_pt *pt, int cpu, pid_t tid,
2624 struct intel_pt_queue *ptq;
2627 if (!pt->sync_switch)
2630 ptq = intel_pt_cpu_to_ptq(pt, cpu);
2631 if (!ptq || !ptq->sync_switch)
2634 switch (ptq->switch_state) {
2635 case INTEL_PT_SS_NOT_TRACING:
2637 case INTEL_PT_SS_UNKNOWN:
2638 case INTEL_PT_SS_TRACING:
2639 ptq->next_tid = tid;
2640 ptq->switch_state = INTEL_PT_SS_EXPECTING_SWITCH_IP;
2642 case INTEL_PT_SS_EXPECTING_SWITCH_EVENT:
2643 if (!ptq->on_heap) {
2644 ptq->timestamp = perf_time_to_tsc(timestamp,
2646 err = auxtrace_heap__add(&pt->heap, ptq->queue_nr,
2650 ptq->on_heap = true;
2652 ptq->switch_state = INTEL_PT_SS_TRACING;
2654 case INTEL_PT_SS_EXPECTING_SWITCH_IP:
2655 intel_pt_log("ERROR: cpu %d expecting switch ip\n", cpu);
2666 static int intel_pt_process_switch(struct intel_pt *pt,
2667 struct perf_sample *sample)
2671 struct evsel *evsel = evlist__id2evsel(pt->session->evlist, sample->id);
2673 if (evsel != pt->switch_evsel)
2676 tid = evsel__intval(evsel, sample, "next_pid");
2679 intel_pt_log("sched_switch: cpu %d tid %d time %"PRIu64" tsc %#"PRIx64"\n",
2680 cpu, tid, sample->time, perf_time_to_tsc(sample->time,
2683 ret = intel_pt_sync_switch(pt, cpu, tid, sample->time);
2687 return machine__set_current_tid(pt->machine, cpu, -1, tid);
2690 static int intel_pt_context_switch_in(struct intel_pt *pt,
2691 struct perf_sample *sample)
2693 pid_t pid = sample->pid;
2694 pid_t tid = sample->tid;
2695 int cpu = sample->cpu;
2697 if (pt->sync_switch) {
2698 struct intel_pt_queue *ptq;
2700 ptq = intel_pt_cpu_to_ptq(pt, cpu);
2701 if (ptq && ptq->sync_switch) {
2703 switch (ptq->switch_state) {
2704 case INTEL_PT_SS_NOT_TRACING:
2705 case INTEL_PT_SS_UNKNOWN:
2706 case INTEL_PT_SS_TRACING:
2708 case INTEL_PT_SS_EXPECTING_SWITCH_EVENT:
2709 case INTEL_PT_SS_EXPECTING_SWITCH_IP:
2710 ptq->switch_state = INTEL_PT_SS_TRACING;
2719 * If the current tid has not been updated yet, ensure it is now that
2720 * a "switch in" event has occurred.
2722 if (machine__get_current_tid(pt->machine, cpu) == tid)
2725 return machine__set_current_tid(pt->machine, cpu, pid, tid);
2728 static int intel_pt_context_switch(struct intel_pt *pt, union perf_event *event,
2729 struct perf_sample *sample)
2731 bool out = event->header.misc & PERF_RECORD_MISC_SWITCH_OUT;
2737 if (pt->have_sched_switch == 3) {
2739 return intel_pt_context_switch_in(pt, sample);
2740 if (event->header.type != PERF_RECORD_SWITCH_CPU_WIDE) {
2741 pr_err("Expecting CPU-wide context switch event\n");
2744 pid = event->context_switch.next_prev_pid;
2745 tid = event->context_switch.next_prev_tid;
2754 intel_pt_log("context_switch event has no tid\n");
2756 ret = intel_pt_sync_switch(pt, cpu, tid, sample->time);
2760 return machine__set_current_tid(pt->machine, cpu, pid, tid);
2763 static int intel_pt_process_itrace_start(struct intel_pt *pt,
2764 union perf_event *event,
2765 struct perf_sample *sample)
2767 if (!pt->per_cpu_mmaps)
2770 intel_pt_log("itrace_start: cpu %d pid %d tid %d time %"PRIu64" tsc %#"PRIx64"\n",
2771 sample->cpu, event->itrace_start.pid,
2772 event->itrace_start.tid, sample->time,
2773 perf_time_to_tsc(sample->time, &pt->tc));
2775 return machine__set_current_tid(pt->machine, sample->cpu,
2776 event->itrace_start.pid,
2777 event->itrace_start.tid);
2780 static int intel_pt_find_map(struct thread *thread, u8 cpumode, u64 addr,
2781 struct addr_location *al)
2783 if (!al->map || addr < al->map->start || addr >= al->map->end) {
2784 if (!thread__find_map(thread, cpumode, addr, al))
2791 /* Invalidate all instruction cache entries that overlap the text poke */
2792 static int intel_pt_text_poke(struct intel_pt *pt, union perf_event *event)
2794 u8 cpumode = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK;
2795 u64 addr = event->text_poke.addr + event->text_poke.new_len - 1;
2796 /* Assume text poke begins in a basic block no more than 4096 bytes */
2797 int cnt = 4096 + event->text_poke.new_len;
2798 struct thread *thread = pt->unknown_thread;
2799 struct addr_location al = { .map = NULL };
2800 struct machine *machine = pt->machine;
2801 struct intel_pt_cache_entry *e;
2804 if (!event->text_poke.new_len)
2807 for (; cnt; cnt--, addr--) {
2808 if (intel_pt_find_map(thread, cpumode, addr, &al)) {
2809 if (addr < event->text_poke.addr)
2814 if (!al.map->dso || !al.map->dso->auxtrace_cache)
2817 offset = al.map->map_ip(al.map, addr);
2819 e = intel_pt_cache_lookup(al.map->dso, machine, offset);
2823 if (addr + e->byte_cnt + e->length <= event->text_poke.addr) {
2825 * No overlap. Working backwards there cannot be another
2826 * basic block that overlaps the text poke if there is a
2827 * branch instruction before the text poke address.
2829 if (e->branch != INTEL_PT_BR_NO_BRANCH)
2832 intel_pt_cache_invalidate(al.map->dso, machine, offset);
2833 intel_pt_log("Invalidated instruction cache for %s at %#"PRIx64"\n",
2834 al.map->dso->long_name, addr);
2841 static int intel_pt_process_event(struct perf_session *session,
2842 union perf_event *event,
2843 struct perf_sample *sample,
2844 struct perf_tool *tool)
2846 struct intel_pt *pt = container_of(session->auxtrace, struct intel_pt,
2854 if (!tool->ordered_events) {
2855 pr_err("Intel Processor Trace requires ordered events\n");
2859 if (sample->time && sample->time != (u64)-1)
2860 timestamp = perf_time_to_tsc(sample->time, &pt->tc);
2864 if (timestamp || pt->timeless_decoding) {
2865 err = intel_pt_update_queues(pt);
2870 if (pt->timeless_decoding) {
2871 if (pt->sampling_mode) {
2872 if (sample->aux_sample.size)
2873 err = intel_pt_process_timeless_sample(pt,
2875 } else if (event->header.type == PERF_RECORD_EXIT) {
2876 err = intel_pt_process_timeless_queues(pt,
2880 } else if (timestamp) {
2881 err = intel_pt_process_queues(pt, timestamp);
2886 if (event->header.type == PERF_RECORD_SAMPLE) {
2887 if (pt->synth_opts.add_callchain && !sample->callchain)
2888 intel_pt_add_callchain(pt, sample);
2889 if (pt->synth_opts.add_last_branch && !sample->branch_stack)
2890 intel_pt_add_br_stack(pt, sample);
2893 if (event->header.type == PERF_RECORD_AUX &&
2894 (event->aux.flags & PERF_AUX_FLAG_TRUNCATED) &&
2895 pt->synth_opts.errors) {
2896 err = intel_pt_lost(pt, sample);
2901 if (pt->switch_evsel && event->header.type == PERF_RECORD_SAMPLE)
2902 err = intel_pt_process_switch(pt, sample);
2903 else if (event->header.type == PERF_RECORD_ITRACE_START)
2904 err = intel_pt_process_itrace_start(pt, event, sample);
2905 else if (event->header.type == PERF_RECORD_SWITCH ||
2906 event->header.type == PERF_RECORD_SWITCH_CPU_WIDE)
2907 err = intel_pt_context_switch(pt, event, sample);
2909 if (!err && event->header.type == PERF_RECORD_TEXT_POKE)
2910 err = intel_pt_text_poke(pt, event);
2912 if (intel_pt_enable_logging && intel_pt_log_events(pt, sample->time)) {
2913 intel_pt_log("event %u: cpu %d time %"PRIu64" tsc %#"PRIx64" ",
2914 event->header.type, sample->cpu, sample->time, timestamp);
2915 intel_pt_log_event(event);
2921 static int intel_pt_flush(struct perf_session *session, struct perf_tool *tool)
2923 struct intel_pt *pt = container_of(session->auxtrace, struct intel_pt,
2930 if (!tool->ordered_events)
2933 ret = intel_pt_update_queues(pt);
2937 if (pt->timeless_decoding)
2938 return intel_pt_process_timeless_queues(pt, -1,
2941 return intel_pt_process_queues(pt, MAX_TIMESTAMP);
2944 static void intel_pt_free_events(struct perf_session *session)
2946 struct intel_pt *pt = container_of(session->auxtrace, struct intel_pt,
2948 struct auxtrace_queues *queues = &pt->queues;
2951 for (i = 0; i < queues->nr_queues; i++) {
2952 intel_pt_free_queue(queues->queue_array[i].priv);
2953 queues->queue_array[i].priv = NULL;
2955 intel_pt_log_disable();
2956 auxtrace_queues__free(queues);
2959 static void intel_pt_free(struct perf_session *session)
2961 struct intel_pt *pt = container_of(session->auxtrace, struct intel_pt,
2964 auxtrace_heap__free(&pt->heap);
2965 intel_pt_free_events(session);
2966 session->auxtrace = NULL;
2967 thread__put(pt->unknown_thread);
2968 addr_filters__exit(&pt->filts);
2971 zfree(&pt->time_ranges);
2975 static bool intel_pt_evsel_is_auxtrace(struct perf_session *session,
2976 struct evsel *evsel)
2978 struct intel_pt *pt = container_of(session->auxtrace, struct intel_pt,
2981 return evsel->core.attr.type == pt->pmu_type;
2984 static int intel_pt_process_auxtrace_event(struct perf_session *session,
2985 union perf_event *event,
2986 struct perf_tool *tool __maybe_unused)
2988 struct intel_pt *pt = container_of(session->auxtrace, struct intel_pt,
2991 if (!pt->data_queued) {
2992 struct auxtrace_buffer *buffer;
2994 int fd = perf_data__fd(session->data);
2997 if (perf_data__is_pipe(session->data)) {
3000 data_offset = lseek(fd, 0, SEEK_CUR);
3001 if (data_offset == -1)
3005 err = auxtrace_queues__add_event(&pt->queues, session, event,
3006 data_offset, &buffer);
3010 /* Dump here now we have copied a piped trace out of the pipe */
3012 if (auxtrace_buffer__get_data(buffer, fd)) {
3013 intel_pt_dump_event(pt, buffer->data,
3015 auxtrace_buffer__put_data(buffer);
3023 static int intel_pt_queue_data(struct perf_session *session,
3024 struct perf_sample *sample,
3025 union perf_event *event, u64 data_offset)
3027 struct intel_pt *pt = container_of(session->auxtrace, struct intel_pt,
3032 return auxtrace_queues__add_event(&pt->queues, session, event,
3036 if (sample->time && sample->time != (u64)-1)
3037 timestamp = perf_time_to_tsc(sample->time, &pt->tc);
3041 return auxtrace_queues__add_sample(&pt->queues, session, sample,
3042 data_offset, timestamp);
3045 struct intel_pt_synth {
3046 struct perf_tool dummy_tool;
3047 struct perf_session *session;
3050 static int intel_pt_event_synth(struct perf_tool *tool,
3051 union perf_event *event,
3052 struct perf_sample *sample __maybe_unused,
3053 struct machine *machine __maybe_unused)
3055 struct intel_pt_synth *intel_pt_synth =
3056 container_of(tool, struct intel_pt_synth, dummy_tool);
3058 return perf_session__deliver_synth_event(intel_pt_synth->session, event,
3062 static int intel_pt_synth_event(struct perf_session *session, const char *name,
3063 struct perf_event_attr *attr, u64 id)
3065 struct intel_pt_synth intel_pt_synth;
3068 pr_debug("Synthesizing '%s' event with id %" PRIu64 " sample type %#" PRIx64 "\n",
3069 name, id, (u64)attr->sample_type);
3071 memset(&intel_pt_synth, 0, sizeof(struct intel_pt_synth));
3072 intel_pt_synth.session = session;
3074 err = perf_event__synthesize_attr(&intel_pt_synth.dummy_tool, attr, 1,
3075 &id, intel_pt_event_synth);
3077 pr_err("%s: failed to synthesize '%s' event type\n",
3083 static void intel_pt_set_event_name(struct evlist *evlist, u64 id,
3086 struct evsel *evsel;
3088 evlist__for_each_entry(evlist, evsel) {
3089 if (evsel->core.id && evsel->core.id[0] == id) {
3091 zfree(&evsel->name);
3092 evsel->name = strdup(name);
3098 static struct evsel *intel_pt_evsel(struct intel_pt *pt,
3099 struct evlist *evlist)
3101 struct evsel *evsel;
3103 evlist__for_each_entry(evlist, evsel) {
3104 if (evsel->core.attr.type == pt->pmu_type && evsel->core.ids)
3111 static int intel_pt_synth_events(struct intel_pt *pt,
3112 struct perf_session *session)
3114 struct evlist *evlist = session->evlist;
3115 struct evsel *evsel = intel_pt_evsel(pt, evlist);
3116 struct perf_event_attr attr;
3121 pr_debug("There are no selected events with Intel Processor Trace data\n");
3125 memset(&attr, 0, sizeof(struct perf_event_attr));
3126 attr.size = sizeof(struct perf_event_attr);
3127 attr.type = PERF_TYPE_HARDWARE;
3128 attr.sample_type = evsel->core.attr.sample_type & PERF_SAMPLE_MASK;
3129 attr.sample_type |= PERF_SAMPLE_IP | PERF_SAMPLE_TID |
3131 if (pt->timeless_decoding)
3132 attr.sample_type &= ~(u64)PERF_SAMPLE_TIME;
3134 attr.sample_type |= PERF_SAMPLE_TIME;
3135 if (!pt->per_cpu_mmaps)
3136 attr.sample_type &= ~(u64)PERF_SAMPLE_CPU;
3137 attr.exclude_user = evsel->core.attr.exclude_user;
3138 attr.exclude_kernel = evsel->core.attr.exclude_kernel;
3139 attr.exclude_hv = evsel->core.attr.exclude_hv;
3140 attr.exclude_host = evsel->core.attr.exclude_host;
3141 attr.exclude_guest = evsel->core.attr.exclude_guest;
3142 attr.sample_id_all = evsel->core.attr.sample_id_all;
3143 attr.read_format = evsel->core.attr.read_format;
3145 id = evsel->core.id[0] + 1000000000;
3149 if (pt->synth_opts.branches) {
3150 attr.config = PERF_COUNT_HW_BRANCH_INSTRUCTIONS;
3151 attr.sample_period = 1;
3152 attr.sample_type |= PERF_SAMPLE_ADDR;
3153 err = intel_pt_synth_event(session, "branches", &attr, id);
3156 pt->sample_branches = true;
3157 pt->branches_sample_type = attr.sample_type;
3158 pt->branches_id = id;
3160 attr.sample_type &= ~(u64)PERF_SAMPLE_ADDR;
3163 if (pt->synth_opts.callchain)
3164 attr.sample_type |= PERF_SAMPLE_CALLCHAIN;
3165 if (pt->synth_opts.last_branch) {
3166 attr.sample_type |= PERF_SAMPLE_BRANCH_STACK;
3168 * We don't use the hardware index, but the sample generation
3169 * code uses the new format branch_stack with this field,
3170 * so the event attributes must indicate that it's present.
3172 attr.branch_sample_type |= PERF_SAMPLE_BRANCH_HW_INDEX;
3175 if (pt->synth_opts.instructions) {
3176 attr.config = PERF_COUNT_HW_INSTRUCTIONS;
3177 if (pt->synth_opts.period_type == PERF_ITRACE_PERIOD_NANOSECS)
3178 attr.sample_period =
3179 intel_pt_ns_to_ticks(pt, pt->synth_opts.period);
3181 attr.sample_period = pt->synth_opts.period;
3182 err = intel_pt_synth_event(session, "instructions", &attr, id);
3185 pt->sample_instructions = true;
3186 pt->instructions_sample_type = attr.sample_type;
3187 pt->instructions_id = id;
3191 attr.sample_type &= ~(u64)PERF_SAMPLE_PERIOD;
3192 attr.sample_period = 1;
3194 if (pt->synth_opts.transactions) {
3195 attr.config = PERF_COUNT_HW_INSTRUCTIONS;
3196 err = intel_pt_synth_event(session, "transactions", &attr, id);
3199 pt->sample_transactions = true;
3200 pt->transactions_sample_type = attr.sample_type;
3201 pt->transactions_id = id;
3202 intel_pt_set_event_name(evlist, id, "transactions");
3206 attr.type = PERF_TYPE_SYNTH;
3207 attr.sample_type |= PERF_SAMPLE_RAW;
3209 if (pt->synth_opts.ptwrites) {
3210 attr.config = PERF_SYNTH_INTEL_PTWRITE;
3211 err = intel_pt_synth_event(session, "ptwrite", &attr, id);
3214 pt->sample_ptwrites = true;
3215 pt->ptwrites_sample_type = attr.sample_type;
3216 pt->ptwrites_id = id;
3217 intel_pt_set_event_name(evlist, id, "ptwrite");
3221 if (pt->synth_opts.pwr_events) {
3222 pt->sample_pwr_events = true;
3223 pt->pwr_events_sample_type = attr.sample_type;
3225 attr.config = PERF_SYNTH_INTEL_CBR;
3226 err = intel_pt_synth_event(session, "cbr", &attr, id);
3230 intel_pt_set_event_name(evlist, id, "cbr");
3233 attr.config = PERF_SYNTH_INTEL_PSB;
3234 err = intel_pt_synth_event(session, "psb", &attr, id);
3238 intel_pt_set_event_name(evlist, id, "psb");
3242 if (pt->synth_opts.pwr_events && (evsel->core.attr.config & 0x10)) {
3243 attr.config = PERF_SYNTH_INTEL_MWAIT;
3244 err = intel_pt_synth_event(session, "mwait", &attr, id);
3248 intel_pt_set_event_name(evlist, id, "mwait");
3251 attr.config = PERF_SYNTH_INTEL_PWRE;
3252 err = intel_pt_synth_event(session, "pwre", &attr, id);
3256 intel_pt_set_event_name(evlist, id, "pwre");
3259 attr.config = PERF_SYNTH_INTEL_EXSTOP;
3260 err = intel_pt_synth_event(session, "exstop", &attr, id);
3264 intel_pt_set_event_name(evlist, id, "exstop");
3267 attr.config = PERF_SYNTH_INTEL_PWRX;
3268 err = intel_pt_synth_event(session, "pwrx", &attr, id);
3272 intel_pt_set_event_name(evlist, id, "pwrx");
3279 static void intel_pt_setup_pebs_events(struct intel_pt *pt)
3281 struct evsel *evsel;
3283 if (!pt->synth_opts.other_events)
3286 evlist__for_each_entry(pt->session->evlist, evsel) {
3287 if (evsel->core.attr.aux_output && evsel->core.id) {
3288 pt->sample_pebs = true;
3289 pt->pebs_evsel = evsel;
3295 static struct evsel *intel_pt_find_sched_switch(struct evlist *evlist)
3297 struct evsel *evsel;
3299 evlist__for_each_entry_reverse(evlist, evsel) {
3300 const char *name = evsel__name(evsel);
3302 if (!strcmp(name, "sched:sched_switch"))
3309 static bool intel_pt_find_switch(struct evlist *evlist)
3311 struct evsel *evsel;
3313 evlist__for_each_entry(evlist, evsel) {
3314 if (evsel->core.attr.context_switch)
3321 static int intel_pt_perf_config(const char *var, const char *value, void *data)
3323 struct intel_pt *pt = data;
3325 if (!strcmp(var, "intel-pt.mispred-all"))
3326 pt->mispred_all = perf_config_bool(var, value);
3331 /* Find least TSC which converts to ns or later */
3332 static u64 intel_pt_tsc_start(u64 ns, struct intel_pt *pt)
3336 tsc = perf_time_to_tsc(ns, &pt->tc);
3339 tm = tsc_to_perf_time(tsc, &pt->tc);
3346 tm = tsc_to_perf_time(++tsc, &pt->tc);
3351 /* Find greatest TSC which converts to ns or earlier */
3352 static u64 intel_pt_tsc_end(u64 ns, struct intel_pt *pt)
3356 tsc = perf_time_to_tsc(ns, &pt->tc);
3359 tm = tsc_to_perf_time(tsc, &pt->tc);
3366 tm = tsc_to_perf_time(--tsc, &pt->tc);
3371 static int intel_pt_setup_time_ranges(struct intel_pt *pt,
3372 struct itrace_synth_opts *opts)
3374 struct perf_time_interval *p = opts->ptime_range;
3375 int n = opts->range_num;
3378 if (!n || !p || pt->timeless_decoding)
3381 pt->time_ranges = calloc(n, sizeof(struct range));
3382 if (!pt->time_ranges)
3387 intel_pt_log("%s: %u range(s)\n", __func__, n);
3389 for (i = 0; i < n; i++) {
3390 struct range *r = &pt->time_ranges[i];
3391 u64 ts = p[i].start;
3395 * Take care to ensure the TSC range matches the perf-time range
3396 * when converted back to perf-time.
3398 r->start = ts ? intel_pt_tsc_start(ts, pt) : 0;
3399 r->end = te ? intel_pt_tsc_end(te, pt) : 0;
3401 intel_pt_log("range %d: perf time interval: %"PRIu64" to %"PRIu64"\n",
3403 intel_pt_log("range %d: TSC time interval: %#"PRIx64" to %#"PRIx64"\n",
3404 i, r->start, r->end);
3410 static const char * const intel_pt_info_fmts[] = {
3411 [INTEL_PT_PMU_TYPE] = " PMU Type %"PRId64"\n",
3412 [INTEL_PT_TIME_SHIFT] = " Time Shift %"PRIu64"\n",
3413 [INTEL_PT_TIME_MULT] = " Time Muliplier %"PRIu64"\n",
3414 [INTEL_PT_TIME_ZERO] = " Time Zero %"PRIu64"\n",
3415 [INTEL_PT_CAP_USER_TIME_ZERO] = " Cap Time Zero %"PRId64"\n",
3416 [INTEL_PT_TSC_BIT] = " TSC bit %#"PRIx64"\n",
3417 [INTEL_PT_NORETCOMP_BIT] = " NoRETComp bit %#"PRIx64"\n",
3418 [INTEL_PT_HAVE_SCHED_SWITCH] = " Have sched_switch %"PRId64"\n",
3419 [INTEL_PT_SNAPSHOT_MODE] = " Snapshot mode %"PRId64"\n",
3420 [INTEL_PT_PER_CPU_MMAPS] = " Per-cpu maps %"PRId64"\n",
3421 [INTEL_PT_MTC_BIT] = " MTC bit %#"PRIx64"\n",
3422 [INTEL_PT_TSC_CTC_N] = " TSC:CTC numerator %"PRIu64"\n",
3423 [INTEL_PT_TSC_CTC_D] = " TSC:CTC denominator %"PRIu64"\n",
3424 [INTEL_PT_CYC_BIT] = " CYC bit %#"PRIx64"\n",
3425 [INTEL_PT_MAX_NONTURBO_RATIO] = " Max non-turbo ratio %"PRIu64"\n",
3426 [INTEL_PT_FILTER_STR_LEN] = " Filter string len. %"PRIu64"\n",
3429 static void intel_pt_print_info(__u64 *arr, int start, int finish)
3436 for (i = start; i <= finish; i++)
3437 fprintf(stdout, intel_pt_info_fmts[i], arr[i]);
3440 static void intel_pt_print_info_str(const char *name, const char *str)
3445 fprintf(stdout, " %-20s%s\n", name, str ? str : "");
3448 static bool intel_pt_has(struct perf_record_auxtrace_info *auxtrace_info, int pos)
3450 return auxtrace_info->header.size >=
3451 sizeof(struct perf_record_auxtrace_info) + (sizeof(u64) * (pos + 1));
3454 int intel_pt_process_auxtrace_info(union perf_event *event,
3455 struct perf_session *session)
3457 struct perf_record_auxtrace_info *auxtrace_info = &event->auxtrace_info;
3458 size_t min_sz = sizeof(u64) * INTEL_PT_PER_CPU_MMAPS;
3459 struct intel_pt *pt;
3464 if (auxtrace_info->header.size < sizeof(struct perf_record_auxtrace_info) +
3468 pt = zalloc(sizeof(struct intel_pt));
3472 addr_filters__init(&pt->filts);
3474 err = perf_config(intel_pt_perf_config, pt);
3478 err = auxtrace_queues__init(&pt->queues);
3482 intel_pt_log_set_name(INTEL_PT_PMU_NAME);
3484 pt->session = session;
3485 pt->machine = &session->machines.host; /* No kvm support */
3486 pt->auxtrace_type = auxtrace_info->type;
3487 pt->pmu_type = auxtrace_info->priv[INTEL_PT_PMU_TYPE];
3488 pt->tc.time_shift = auxtrace_info->priv[INTEL_PT_TIME_SHIFT];
3489 pt->tc.time_mult = auxtrace_info->priv[INTEL_PT_TIME_MULT];
3490 pt->tc.time_zero = auxtrace_info->priv[INTEL_PT_TIME_ZERO];
3491 pt->cap_user_time_zero = auxtrace_info->priv[INTEL_PT_CAP_USER_TIME_ZERO];
3492 pt->tsc_bit = auxtrace_info->priv[INTEL_PT_TSC_BIT];
3493 pt->noretcomp_bit = auxtrace_info->priv[INTEL_PT_NORETCOMP_BIT];
3494 pt->have_sched_switch = auxtrace_info->priv[INTEL_PT_HAVE_SCHED_SWITCH];
3495 pt->snapshot_mode = auxtrace_info->priv[INTEL_PT_SNAPSHOT_MODE];
3496 pt->per_cpu_mmaps = auxtrace_info->priv[INTEL_PT_PER_CPU_MMAPS];
3497 intel_pt_print_info(&auxtrace_info->priv[0], INTEL_PT_PMU_TYPE,
3498 INTEL_PT_PER_CPU_MMAPS);
3500 if (intel_pt_has(auxtrace_info, INTEL_PT_CYC_BIT)) {
3501 pt->mtc_bit = auxtrace_info->priv[INTEL_PT_MTC_BIT];
3502 pt->mtc_freq_bits = auxtrace_info->priv[INTEL_PT_MTC_FREQ_BITS];
3503 pt->tsc_ctc_ratio_n = auxtrace_info->priv[INTEL_PT_TSC_CTC_N];
3504 pt->tsc_ctc_ratio_d = auxtrace_info->priv[INTEL_PT_TSC_CTC_D];
3505 pt->cyc_bit = auxtrace_info->priv[INTEL_PT_CYC_BIT];
3506 intel_pt_print_info(&auxtrace_info->priv[0], INTEL_PT_MTC_BIT,
3510 if (intel_pt_has(auxtrace_info, INTEL_PT_MAX_NONTURBO_RATIO)) {
3511 pt->max_non_turbo_ratio =
3512 auxtrace_info->priv[INTEL_PT_MAX_NONTURBO_RATIO];
3513 intel_pt_print_info(&auxtrace_info->priv[0],
3514 INTEL_PT_MAX_NONTURBO_RATIO,
3515 INTEL_PT_MAX_NONTURBO_RATIO);
3518 info = &auxtrace_info->priv[INTEL_PT_FILTER_STR_LEN] + 1;
3519 info_end = (void *)info + auxtrace_info->header.size;
3521 if (intel_pt_has(auxtrace_info, INTEL_PT_FILTER_STR_LEN)) {
3524 len = auxtrace_info->priv[INTEL_PT_FILTER_STR_LEN];
3525 intel_pt_print_info(&auxtrace_info->priv[0],
3526 INTEL_PT_FILTER_STR_LEN,
3527 INTEL_PT_FILTER_STR_LEN);
3529 const char *filter = (const char *)info;
3531 len = roundup(len + 1, 8);
3533 if ((void *)info > info_end) {
3534 pr_err("%s: bad filter string length\n", __func__);
3536 goto err_free_queues;
3538 pt->filter = memdup(filter, len);
3541 goto err_free_queues;
3543 if (session->header.needs_swap)
3544 mem_bswap_64(pt->filter, len);
3545 if (pt->filter[len - 1]) {
3546 pr_err("%s: filter string not null terminated\n", __func__);
3548 goto err_free_queues;
3550 err = addr_filters__parse_bare_filter(&pt->filts,
3553 goto err_free_queues;
3555 intel_pt_print_info_str("Filter string", pt->filter);
3558 pt->timeless_decoding = intel_pt_timeless_decoding(pt);
3559 if (pt->timeless_decoding && !pt->tc.time_mult)
3560 pt->tc.time_mult = 1;
3561 pt->have_tsc = intel_pt_have_tsc(pt);
3562 pt->sampling_mode = intel_pt_sampling_mode(pt);
3563 pt->est_tsc = !pt->timeless_decoding;
3565 pt->unknown_thread = thread__new(999999999, 999999999);
3566 if (!pt->unknown_thread) {
3568 goto err_free_queues;
3572 * Since this thread will not be kept in any rbtree not in a
3573 * list, initialize its list node so that at thread__put() the
3574 * current thread lifetime assumption is kept and we don't segfault
3575 * at list_del_init().
3577 INIT_LIST_HEAD(&pt->unknown_thread->node);
3579 err = thread__set_comm(pt->unknown_thread, "unknown", 0);
3581 goto err_delete_thread;
3582 if (thread__init_maps(pt->unknown_thread, pt->machine)) {
3584 goto err_delete_thread;
3587 pt->auxtrace.process_event = intel_pt_process_event;
3588 pt->auxtrace.process_auxtrace_event = intel_pt_process_auxtrace_event;
3589 pt->auxtrace.queue_data = intel_pt_queue_data;
3590 pt->auxtrace.dump_auxtrace_sample = intel_pt_dump_sample;
3591 pt->auxtrace.flush_events = intel_pt_flush;
3592 pt->auxtrace.free_events = intel_pt_free_events;
3593 pt->auxtrace.free = intel_pt_free;
3594 pt->auxtrace.evsel_is_auxtrace = intel_pt_evsel_is_auxtrace;
3595 session->auxtrace = &pt->auxtrace;
3600 if (pt->have_sched_switch == 1) {
3601 pt->switch_evsel = intel_pt_find_sched_switch(session->evlist);
3602 if (!pt->switch_evsel) {
3603 pr_err("%s: missing sched_switch event\n", __func__);
3605 goto err_delete_thread;
3607 } else if (pt->have_sched_switch == 2 &&
3608 !intel_pt_find_switch(session->evlist)) {
3609 pr_err("%s: missing context_switch attribute flag\n", __func__);
3611 goto err_delete_thread;
3614 if (session->itrace_synth_opts->set) {
3615 pt->synth_opts = *session->itrace_synth_opts;
3617 itrace_synth_opts__set_default(&pt->synth_opts,
3618 session->itrace_synth_opts->default_no_sample);
3619 if (!session->itrace_synth_opts->default_no_sample &&
3620 !session->itrace_synth_opts->inject) {
3621 pt->synth_opts.branches = false;
3622 pt->synth_opts.callchain = true;
3623 pt->synth_opts.add_callchain = true;
3625 pt->synth_opts.thread_stack =
3626 session->itrace_synth_opts->thread_stack;
3629 if (pt->synth_opts.log)
3630 intel_pt_log_enable();
3632 /* Maximum non-turbo ratio is TSC freq / 100 MHz */
3633 if (pt->tc.time_mult) {
3634 u64 tsc_freq = intel_pt_ns_to_ticks(pt, 1000000000);
3636 if (!pt->max_non_turbo_ratio)
3637 pt->max_non_turbo_ratio =
3638 (tsc_freq + 50000000) / 100000000;
3639 intel_pt_log("TSC frequency %"PRIu64"\n", tsc_freq);
3640 intel_pt_log("Maximum non-turbo ratio %u\n",
3641 pt->max_non_turbo_ratio);
3642 pt->cbr2khz = tsc_freq / pt->max_non_turbo_ratio / 1000;
3645 err = intel_pt_setup_time_ranges(pt, session->itrace_synth_opts);
3647 goto err_delete_thread;
3649 if (pt->synth_opts.calls)
3650 pt->branches_filter |= PERF_IP_FLAG_CALL | PERF_IP_FLAG_ASYNC |
3651 PERF_IP_FLAG_TRACE_END;
3652 if (pt->synth_opts.returns)
3653 pt->branches_filter |= PERF_IP_FLAG_RETURN |
3654 PERF_IP_FLAG_TRACE_BEGIN;
3656 if ((pt->synth_opts.callchain || pt->synth_opts.add_callchain) &&
3657 !symbol_conf.use_callchain) {
3658 symbol_conf.use_callchain = true;
3659 if (callchain_register_param(&callchain_param) < 0) {
3660 symbol_conf.use_callchain = false;
3661 pt->synth_opts.callchain = false;
3662 pt->synth_opts.add_callchain = false;
3666 if (pt->synth_opts.add_callchain) {
3667 err = intel_pt_callchain_init(pt);
3669 goto err_delete_thread;
3672 if (pt->synth_opts.last_branch || pt->synth_opts.add_last_branch) {
3673 pt->br_stack_sz = pt->synth_opts.last_branch_sz;
3674 pt->br_stack_sz_plus = pt->br_stack_sz;
3677 if (pt->synth_opts.add_last_branch) {
3678 err = intel_pt_br_stack_init(pt);
3680 goto err_delete_thread;
3682 * Additional branch stack size to cater for tracing from the
3683 * actual sample ip to where the sample time is recorded.
3684 * Measured at about 200 branches, but generously set to 1024.
3685 * If kernel space is not being traced, then add just 1 for the
3686 * branch to kernel space.
3688 if (intel_pt_tracing_kernel(pt))
3689 pt->br_stack_sz_plus += 1024;
3691 pt->br_stack_sz_plus += 1;
3694 pt->use_thread_stack = pt->synth_opts.callchain ||
3695 pt->synth_opts.add_callchain ||
3696 pt->synth_opts.thread_stack ||
3697 pt->synth_opts.last_branch ||
3698 pt->synth_opts.add_last_branch;
3700 pt->callstack = pt->synth_opts.callchain ||
3701 pt->synth_opts.add_callchain ||
3702 pt->synth_opts.thread_stack;
3704 err = intel_pt_synth_events(pt, session);
3706 goto err_delete_thread;
3708 intel_pt_setup_pebs_events(pt);
3710 if (pt->sampling_mode || list_empty(&session->auxtrace_index))
3711 err = auxtrace_queue_data(session, true, true);
3713 err = auxtrace_queues__process_index(&pt->queues, session);
3715 goto err_delete_thread;
3717 if (pt->queues.populated)
3718 pt->data_queued = true;
3720 if (pt->timeless_decoding)
3721 pr_debug2("Intel PT decoding without timestamps\n");
3727 thread__zput(pt->unknown_thread);
3729 intel_pt_log_disable();
3730 auxtrace_queues__free(&pt->queues);
3731 session->auxtrace = NULL;
3733 addr_filters__exit(&pt->filts);
3735 zfree(&pt->time_ranges);