1 // SPDX-License-Identifier: GPL-2.0-only
3 * intel_pt.c: Intel Processor Trace support
4 * Copyright (c) 2013-2015, Intel Corporation.
11 #include <linux/kernel.h>
12 #include <linux/string.h>
13 #include <linux/types.h>
14 #include <linux/zalloc.h>
27 #include "thread-stack.h"
29 #include "callchain.h"
36 #include "util/perf_api_probe.h"
37 #include "util/synthetic-events.h"
38 #include "time-utils.h"
40 #include "../arch/x86/include/uapi/asm/perf_regs.h"
42 #include "intel-pt-decoder/intel-pt-log.h"
43 #include "intel-pt-decoder/intel-pt-decoder.h"
44 #include "intel-pt-decoder/intel-pt-insn-decoder.h"
45 #include "intel-pt-decoder/intel-pt-pkt-decoder.h"
47 #define MAX_TIMESTAMP (~0ULL)
55 struct auxtrace auxtrace;
56 struct auxtrace_queues queues;
57 struct auxtrace_heap heap;
59 struct perf_session *session;
60 struct machine *machine;
61 struct evsel *switch_evsel;
62 struct thread *unknown_thread;
63 bool timeless_decoding;
72 bool use_thread_stack;
74 unsigned int br_stack_sz;
75 unsigned int br_stack_sz_plus;
76 int have_sched_switch;
83 struct perf_tsc_conversion tc;
84 bool cap_user_time_zero;
86 struct itrace_synth_opts synth_opts;
88 bool sample_instructions;
89 u64 instructions_sample_type;
94 u64 branches_sample_type;
97 bool sample_transactions;
98 u64 transactions_sample_type;
101 bool sample_ptwrites;
102 u64 ptwrites_sample_type;
105 bool sample_pwr_events;
106 u64 pwr_events_sample_type;
115 struct evsel *pebs_evsel;
124 unsigned max_non_turbo_ratio;
128 unsigned long num_events;
131 struct addr_filters filts;
133 struct range *time_ranges;
134 unsigned int range_cnt;
136 struct ip_callchain *chain;
137 struct branch_stack *br_stack;
140 struct rb_root vmcs_info;
144 INTEL_PT_SS_NOT_TRACING,
147 INTEL_PT_SS_EXPECTING_SWITCH_EVENT,
148 INTEL_PT_SS_EXPECTING_SWITCH_IP,
151 struct intel_pt_queue {
153 unsigned int queue_nr;
154 struct auxtrace_buffer *buffer;
155 struct auxtrace_buffer *old_buffer;
157 const struct intel_pt_state *state;
158 struct ip_callchain *chain;
159 struct branch_stack *last_branch;
160 union perf_event *event_buf;
163 bool step_through_buffers;
164 bool use_buffer_pid_tid;
170 struct thread *thread;
171 struct machine *guest_machine;
172 struct thread *unknown_guest_thread;
173 pid_t guest_machine_pid;
180 unsigned int sel_idx;
186 u64 last_in_insn_cnt;
188 u64 last_br_insn_cnt;
190 unsigned int cbr_seen;
191 char insn[INTEL_PT_INSN_BUF_SZ];
194 static void intel_pt_dump(struct intel_pt *pt __maybe_unused,
195 unsigned char *buf, size_t len)
197 struct intel_pt_pkt packet;
200 char desc[INTEL_PT_PKT_DESC_MAX];
201 const char *color = PERF_COLOR_BLUE;
202 enum intel_pt_pkt_ctx ctx = INTEL_PT_NO_CTX;
204 color_fprintf(stdout, color,
205 ". ... Intel Processor Trace data: size %zu bytes\n",
209 ret = intel_pt_get_packet(buf, len, &packet, &ctx);
215 color_fprintf(stdout, color, " %08x: ", pos);
216 for (i = 0; i < pkt_len; i++)
217 color_fprintf(stdout, color, " %02x", buf[i]);
219 color_fprintf(stdout, color, " ");
221 ret = intel_pt_pkt_desc(&packet, desc,
222 INTEL_PT_PKT_DESC_MAX);
224 color_fprintf(stdout, color, " %s\n", desc);
226 color_fprintf(stdout, color, " Bad packet!\n");
234 static void intel_pt_dump_event(struct intel_pt *pt, unsigned char *buf,
238 intel_pt_dump(pt, buf, len);
241 static void intel_pt_log_event(union perf_event *event)
243 FILE *f = intel_pt_log_fp();
245 if (!intel_pt_enable_logging || !f)
248 perf_event__fprintf(event, NULL, f);
251 static void intel_pt_dump_sample(struct perf_session *session,
252 struct perf_sample *sample)
254 struct intel_pt *pt = container_of(session->auxtrace, struct intel_pt,
258 intel_pt_dump(pt, sample->aux_sample.data, sample->aux_sample.size);
261 static bool intel_pt_log_events(struct intel_pt *pt, u64 tm)
263 struct perf_time_interval *range = pt->synth_opts.ptime_range;
264 int n = pt->synth_opts.range_num;
266 if (pt->synth_opts.log_plus_flags & AUXTRACE_LOG_FLG_ALL_PERF_EVTS)
269 if (pt->synth_opts.log_minus_flags & AUXTRACE_LOG_FLG_ALL_PERF_EVTS)
272 /* perf_time__ranges_skip_sample does not work if time is zero */
276 return !n || !perf_time__ranges_skip_sample(range, n, tm);
279 static struct intel_pt_vmcs_info *intel_pt_findnew_vmcs(struct rb_root *rb_root,
283 struct rb_node **p = &rb_root->rb_node;
284 struct rb_node *parent = NULL;
285 struct intel_pt_vmcs_info *v;
289 v = rb_entry(parent, struct intel_pt_vmcs_info, rb_node);
300 v = zalloc(sizeof(*v));
303 v->tsc_offset = dflt_tsc_offset;
304 v->reliable = dflt_tsc_offset;
306 rb_link_node(&v->rb_node, parent, p);
307 rb_insert_color(&v->rb_node, rb_root);
313 static struct intel_pt_vmcs_info *intel_pt_findnew_vmcs_info(void *data, uint64_t vmcs)
315 struct intel_pt_queue *ptq = data;
316 struct intel_pt *pt = ptq->pt;
318 if (!vmcs && !pt->dflt_tsc_offset)
321 return intel_pt_findnew_vmcs(&pt->vmcs_info, vmcs, pt->dflt_tsc_offset);
324 static void intel_pt_free_vmcs_info(struct intel_pt *pt)
326 struct intel_pt_vmcs_info *v;
329 n = rb_first(&pt->vmcs_info);
331 v = rb_entry(n, struct intel_pt_vmcs_info, rb_node);
333 rb_erase(&v->rb_node, &pt->vmcs_info);
338 static int intel_pt_do_fix_overlap(struct intel_pt *pt, struct auxtrace_buffer *a,
339 struct auxtrace_buffer *b)
341 bool consecutive = false;
344 start = intel_pt_find_overlap(a->data, a->size, b->data, b->size,
345 pt->have_tsc, &consecutive,
346 pt->synth_opts.vm_time_correlation);
350 * In the case of vm_time_correlation, the overlap might contain TSC
351 * packets that will not be fixed, and that will then no longer work for
352 * overlap detection. Avoid that by zeroing out the overlap.
354 if (pt->synth_opts.vm_time_correlation)
355 memset(b->data, 0, start - b->data);
356 b->use_size = b->data + b->size - start;
358 if (b->use_size && consecutive)
359 b->consecutive = true;
363 static int intel_pt_get_buffer(struct intel_pt_queue *ptq,
364 struct auxtrace_buffer *buffer,
365 struct auxtrace_buffer *old_buffer,
366 struct intel_pt_buffer *b)
371 int fd = perf_data__fd(ptq->pt->session->data);
373 buffer->data = auxtrace_buffer__get_data(buffer, fd);
378 might_overlap = ptq->pt->snapshot_mode || ptq->pt->sampling_mode;
379 if (might_overlap && !buffer->consecutive && old_buffer &&
380 intel_pt_do_fix_overlap(ptq->pt, old_buffer, buffer))
383 if (buffer->use_data) {
384 b->len = buffer->use_size;
385 b->buf = buffer->use_data;
387 b->len = buffer->size;
388 b->buf = buffer->data;
390 b->ref_timestamp = buffer->reference;
392 if (!old_buffer || (might_overlap && !buffer->consecutive)) {
393 b->consecutive = false;
394 b->trace_nr = buffer->buffer_nr + 1;
396 b->consecutive = true;
402 /* Do not drop buffers with references - refer intel_pt_get_trace() */
403 static void intel_pt_lookahead_drop_buffer(struct intel_pt_queue *ptq,
404 struct auxtrace_buffer *buffer)
406 if (!buffer || buffer == ptq->buffer || buffer == ptq->old_buffer)
409 auxtrace_buffer__drop_data(buffer);
412 /* Must be serialized with respect to intel_pt_get_trace() */
413 static int intel_pt_lookahead(void *data, intel_pt_lookahead_cb_t cb,
416 struct intel_pt_queue *ptq = data;
417 struct auxtrace_buffer *buffer = ptq->buffer;
418 struct auxtrace_buffer *old_buffer = ptq->old_buffer;
419 struct auxtrace_queue *queue;
422 queue = &ptq->pt->queues.queue_array[ptq->queue_nr];
425 struct intel_pt_buffer b = { .len = 0 };
427 buffer = auxtrace_buffer__next(queue, buffer);
431 err = intel_pt_get_buffer(ptq, buffer, old_buffer, &b);
436 intel_pt_lookahead_drop_buffer(ptq, old_buffer);
439 intel_pt_lookahead_drop_buffer(ptq, buffer);
443 err = cb(&b, cb_data);
448 if (buffer != old_buffer)
449 intel_pt_lookahead_drop_buffer(ptq, buffer);
450 intel_pt_lookahead_drop_buffer(ptq, old_buffer);
456 * This function assumes data is processed sequentially only.
457 * Must be serialized with respect to intel_pt_lookahead()
459 static int intel_pt_get_trace(struct intel_pt_buffer *b, void *data)
461 struct intel_pt_queue *ptq = data;
462 struct auxtrace_buffer *buffer = ptq->buffer;
463 struct auxtrace_buffer *old_buffer = ptq->old_buffer;
464 struct auxtrace_queue *queue;
472 queue = &ptq->pt->queues.queue_array[ptq->queue_nr];
474 buffer = auxtrace_buffer__next(queue, buffer);
477 auxtrace_buffer__drop_data(old_buffer);
482 ptq->buffer = buffer;
484 err = intel_pt_get_buffer(ptq, buffer, old_buffer, b);
488 if (ptq->step_through_buffers)
493 auxtrace_buffer__drop_data(old_buffer);
494 ptq->old_buffer = buffer;
496 auxtrace_buffer__drop_data(buffer);
497 return intel_pt_get_trace(b, data);
503 struct intel_pt_cache_entry {
504 struct auxtrace_cache_entry entry;
507 enum intel_pt_insn_op op;
508 enum intel_pt_insn_branch branch;
511 char insn[INTEL_PT_INSN_BUF_SZ];
514 static int intel_pt_config_div(const char *var, const char *value, void *data)
519 if (!strcmp(var, "intel-pt.cache-divisor")) {
520 val = strtol(value, NULL, 0);
521 if (val > 0 && val <= INT_MAX)
528 static int intel_pt_cache_divisor(void)
535 perf_config(intel_pt_config_div, &d);
543 static unsigned int intel_pt_cache_size(struct dso *dso,
544 struct machine *machine)
548 size = dso__data_size(dso, machine);
549 size /= intel_pt_cache_divisor();
552 if (size > (1 << 21))
554 return 32 - __builtin_clz(size);
557 static struct auxtrace_cache *intel_pt_cache(struct dso *dso,
558 struct machine *machine)
560 struct auxtrace_cache *c;
563 if (dso->auxtrace_cache)
564 return dso->auxtrace_cache;
566 bits = intel_pt_cache_size(dso, machine);
568 /* Ignoring cache creation failure */
569 c = auxtrace_cache__new(bits, sizeof(struct intel_pt_cache_entry), 200);
571 dso->auxtrace_cache = c;
576 static int intel_pt_cache_add(struct dso *dso, struct machine *machine,
577 u64 offset, u64 insn_cnt, u64 byte_cnt,
578 struct intel_pt_insn *intel_pt_insn)
580 struct auxtrace_cache *c = intel_pt_cache(dso, machine);
581 struct intel_pt_cache_entry *e;
587 e = auxtrace_cache__alloc_entry(c);
591 e->insn_cnt = insn_cnt;
592 e->byte_cnt = byte_cnt;
593 e->op = intel_pt_insn->op;
594 e->branch = intel_pt_insn->branch;
595 e->length = intel_pt_insn->length;
596 e->rel = intel_pt_insn->rel;
597 memcpy(e->insn, intel_pt_insn->buf, INTEL_PT_INSN_BUF_SZ);
599 err = auxtrace_cache__add(c, offset, &e->entry);
601 auxtrace_cache__free_entry(c, e);
606 static struct intel_pt_cache_entry *
607 intel_pt_cache_lookup(struct dso *dso, struct machine *machine, u64 offset)
609 struct auxtrace_cache *c = intel_pt_cache(dso, machine);
614 return auxtrace_cache__lookup(dso->auxtrace_cache, offset);
617 static void intel_pt_cache_invalidate(struct dso *dso, struct machine *machine,
620 struct auxtrace_cache *c = intel_pt_cache(dso, machine);
625 auxtrace_cache__remove(dso->auxtrace_cache, offset);
628 static inline bool intel_pt_guest_kernel_ip(uint64_t ip)
630 /* Assumes 64-bit kernel */
631 return ip & (1ULL << 63);
634 static inline u8 intel_pt_nr_cpumode(struct intel_pt_queue *ptq, uint64_t ip, bool nr)
637 return intel_pt_guest_kernel_ip(ip) ?
638 PERF_RECORD_MISC_GUEST_KERNEL :
639 PERF_RECORD_MISC_GUEST_USER;
642 return ip >= ptq->pt->kernel_start ?
643 PERF_RECORD_MISC_KERNEL :
644 PERF_RECORD_MISC_USER;
647 static inline u8 intel_pt_cpumode(struct intel_pt_queue *ptq, uint64_t from_ip, uint64_t to_ip)
649 /* No support for non-zero CS base */
651 return intel_pt_nr_cpumode(ptq, from_ip, ptq->state->from_nr);
652 return intel_pt_nr_cpumode(ptq, to_ip, ptq->state->to_nr);
655 static int intel_pt_get_guest(struct intel_pt_queue *ptq)
657 struct machines *machines = &ptq->pt->session->machines;
658 struct machine *machine;
659 pid_t pid = ptq->pid <= 0 ? DEFAULT_GUEST_KERNEL_ID : ptq->pid;
661 if (ptq->guest_machine && pid == ptq->guest_machine_pid)
664 ptq->guest_machine = NULL;
665 thread__zput(ptq->unknown_guest_thread);
667 machine = machines__find_guest(machines, pid);
671 ptq->unknown_guest_thread = machine__idle_thread(machine);
672 if (!ptq->unknown_guest_thread)
675 ptq->guest_machine = machine;
676 ptq->guest_machine_pid = pid;
681 static int intel_pt_walk_next_insn(struct intel_pt_insn *intel_pt_insn,
682 uint64_t *insn_cnt_ptr, uint64_t *ip,
683 uint64_t to_ip, uint64_t max_insn_cnt,
686 struct intel_pt_queue *ptq = data;
687 struct machine *machine = ptq->pt->machine;
688 struct thread *thread;
689 struct addr_location al;
690 unsigned char buf[INTEL_PT_INSN_BUF_SZ];
694 u64 offset, start_offset, start_ip;
699 intel_pt_insn->length = 0;
701 if (to_ip && *ip == to_ip)
704 nr = ptq->state->to_nr;
705 cpumode = intel_pt_nr_cpumode(ptq, *ip, nr);
708 if (cpumode != PERF_RECORD_MISC_GUEST_KERNEL ||
709 intel_pt_get_guest(ptq))
711 machine = ptq->guest_machine;
712 thread = ptq->unknown_guest_thread;
714 thread = ptq->thread;
716 if (cpumode != PERF_RECORD_MISC_KERNEL)
718 thread = ptq->pt->unknown_thread;
723 if (!thread__find_map(thread, cpumode, *ip, &al) || !al.map->dso)
726 if (al.map->dso->data.status == DSO_DATA_STATUS_ERROR &&
727 dso__data_status_seen(al.map->dso,
728 DSO_DATA_STATUS_SEEN_ITRACE))
731 offset = al.map->map_ip(al.map, *ip);
733 if (!to_ip && one_map) {
734 struct intel_pt_cache_entry *e;
736 e = intel_pt_cache_lookup(al.map->dso, machine, offset);
738 (!max_insn_cnt || e->insn_cnt <= max_insn_cnt)) {
739 *insn_cnt_ptr = e->insn_cnt;
741 intel_pt_insn->op = e->op;
742 intel_pt_insn->branch = e->branch;
743 intel_pt_insn->length = e->length;
744 intel_pt_insn->rel = e->rel;
745 memcpy(intel_pt_insn->buf, e->insn,
746 INTEL_PT_INSN_BUF_SZ);
747 intel_pt_log_insn_no_data(intel_pt_insn, *ip);
752 start_offset = offset;
755 /* Load maps to ensure dso->is_64_bit has been updated */
758 x86_64 = al.map->dso->is_64_bit;
761 len = dso__data_read_offset(al.map->dso, machine,
763 INTEL_PT_INSN_BUF_SZ);
767 if (intel_pt_get_insn(buf, len, x86_64, intel_pt_insn))
770 intel_pt_log_insn(intel_pt_insn, *ip);
774 if (intel_pt_insn->branch != INTEL_PT_BR_NO_BRANCH)
777 if (max_insn_cnt && insn_cnt >= max_insn_cnt)
780 *ip += intel_pt_insn->length;
782 if (to_ip && *ip == to_ip) {
783 intel_pt_insn->length = 0;
787 if (*ip >= al.map->end)
790 offset += intel_pt_insn->length;
795 *insn_cnt_ptr = insn_cnt;
801 * Didn't lookup in the 'to_ip' case, so do it now to prevent duplicate
805 struct intel_pt_cache_entry *e;
807 e = intel_pt_cache_lookup(al.map->dso, machine, start_offset);
812 /* Ignore cache errors */
813 intel_pt_cache_add(al.map->dso, machine, start_offset, insn_cnt,
814 *ip - start_ip, intel_pt_insn);
819 *insn_cnt_ptr = insn_cnt;
823 static bool intel_pt_match_pgd_ip(struct intel_pt *pt, uint64_t ip,
824 uint64_t offset, const char *filename)
826 struct addr_filter *filt;
827 bool have_filter = false;
828 bool hit_tracestop = false;
829 bool hit_filter = false;
831 list_for_each_entry(filt, &pt->filts.head, list) {
835 if ((filename && !filt->filename) ||
836 (!filename && filt->filename) ||
837 (filename && strcmp(filename, filt->filename)))
840 if (!(offset >= filt->addr && offset < filt->addr + filt->size))
843 intel_pt_log("TIP.PGD ip %#"PRIx64" offset %#"PRIx64" in %s hit filter: %s offset %#"PRIx64" size %#"PRIx64"\n",
844 ip, offset, filename ? filename : "[kernel]",
845 filt->start ? "filter" : "stop",
846 filt->addr, filt->size);
851 hit_tracestop = true;
854 if (!hit_tracestop && !hit_filter)
855 intel_pt_log("TIP.PGD ip %#"PRIx64" offset %#"PRIx64" in %s is not in a filter region\n",
856 ip, offset, filename ? filename : "[kernel]");
858 return hit_tracestop || (have_filter && !hit_filter);
861 static int __intel_pt_pgd_ip(uint64_t ip, void *data)
863 struct intel_pt_queue *ptq = data;
864 struct thread *thread;
865 struct addr_location al;
869 if (ptq->state->to_nr) {
870 if (intel_pt_guest_kernel_ip(ip))
871 return intel_pt_match_pgd_ip(ptq->pt, ip, ip, NULL);
872 /* No support for decoding guest user space */
874 } else if (ip >= ptq->pt->kernel_start) {
875 return intel_pt_match_pgd_ip(ptq->pt, ip, ip, NULL);
878 cpumode = PERF_RECORD_MISC_USER;
880 thread = ptq->thread;
884 if (!thread__find_map(thread, cpumode, ip, &al) || !al.map->dso)
887 offset = al.map->map_ip(al.map, ip);
889 return intel_pt_match_pgd_ip(ptq->pt, ip, offset,
890 al.map->dso->long_name);
893 static bool intel_pt_pgd_ip(uint64_t ip, void *data)
895 return __intel_pt_pgd_ip(ip, data) > 0;
898 static bool intel_pt_get_config(struct intel_pt *pt,
899 struct perf_event_attr *attr, u64 *config)
901 if (attr->type == pt->pmu_type) {
903 *config = attr->config;
910 static bool intel_pt_exclude_kernel(struct intel_pt *pt)
914 evlist__for_each_entry(pt->session->evlist, evsel) {
915 if (intel_pt_get_config(pt, &evsel->core.attr, NULL) &&
916 !evsel->core.attr.exclude_kernel)
922 static bool intel_pt_return_compression(struct intel_pt *pt)
927 if (!pt->noretcomp_bit)
930 evlist__for_each_entry(pt->session->evlist, evsel) {
931 if (intel_pt_get_config(pt, &evsel->core.attr, &config) &&
932 (config & pt->noretcomp_bit))
938 static bool intel_pt_branch_enable(struct intel_pt *pt)
943 evlist__for_each_entry(pt->session->evlist, evsel) {
944 if (intel_pt_get_config(pt, &evsel->core.attr, &config) &&
945 (config & 1) && !(config & 0x2000))
951 static unsigned int intel_pt_mtc_period(struct intel_pt *pt)
957 if (!pt->mtc_freq_bits)
960 for (shift = 0, config = pt->mtc_freq_bits; !(config & 1); shift++)
963 evlist__for_each_entry(pt->session->evlist, evsel) {
964 if (intel_pt_get_config(pt, &evsel->core.attr, &config))
965 return (config & pt->mtc_freq_bits) >> shift;
970 static bool intel_pt_timeless_decoding(struct intel_pt *pt)
973 bool timeless_decoding = true;
976 if (!pt->tsc_bit || !pt->cap_user_time_zero || pt->synth_opts.timeless_decoding)
979 evlist__for_each_entry(pt->session->evlist, evsel) {
980 if (!(evsel->core.attr.sample_type & PERF_SAMPLE_TIME))
982 if (intel_pt_get_config(pt, &evsel->core.attr, &config)) {
983 if (config & pt->tsc_bit)
984 timeless_decoding = false;
989 return timeless_decoding;
992 static bool intel_pt_tracing_kernel(struct intel_pt *pt)
996 evlist__for_each_entry(pt->session->evlist, evsel) {
997 if (intel_pt_get_config(pt, &evsel->core.attr, NULL) &&
998 !evsel->core.attr.exclude_kernel)
1004 static bool intel_pt_have_tsc(struct intel_pt *pt)
1006 struct evsel *evsel;
1007 bool have_tsc = false;
1013 evlist__for_each_entry(pt->session->evlist, evsel) {
1014 if (intel_pt_get_config(pt, &evsel->core.attr, &config)) {
1015 if (config & pt->tsc_bit)
1024 static bool intel_pt_have_mtc(struct intel_pt *pt)
1026 struct evsel *evsel;
1029 evlist__for_each_entry(pt->session->evlist, evsel) {
1030 if (intel_pt_get_config(pt, &evsel->core.attr, &config) &&
1031 (config & pt->mtc_bit))
1037 static bool intel_pt_sampling_mode(struct intel_pt *pt)
1039 struct evsel *evsel;
1041 evlist__for_each_entry(pt->session->evlist, evsel) {
1042 if ((evsel->core.attr.sample_type & PERF_SAMPLE_AUX) &&
1043 evsel->core.attr.aux_sample_size)
1049 static u64 intel_pt_ctl(struct intel_pt *pt)
1051 struct evsel *evsel;
1054 evlist__for_each_entry(pt->session->evlist, evsel) {
1055 if (intel_pt_get_config(pt, &evsel->core.attr, &config))
1061 static u64 intel_pt_ns_to_ticks(const struct intel_pt *pt, u64 ns)
1065 quot = ns / pt->tc.time_mult;
1066 rem = ns % pt->tc.time_mult;
1067 return (quot << pt->tc.time_shift) + (rem << pt->tc.time_shift) /
1071 static struct ip_callchain *intel_pt_alloc_chain(struct intel_pt *pt)
1073 size_t sz = sizeof(struct ip_callchain);
1075 /* Add 1 to callchain_sz for callchain context */
1076 sz += (pt->synth_opts.callchain_sz + 1) * sizeof(u64);
1080 static int intel_pt_callchain_init(struct intel_pt *pt)
1082 struct evsel *evsel;
1084 evlist__for_each_entry(pt->session->evlist, evsel) {
1085 if (!(evsel->core.attr.sample_type & PERF_SAMPLE_CALLCHAIN))
1086 evsel->synth_sample_type |= PERF_SAMPLE_CALLCHAIN;
1089 pt->chain = intel_pt_alloc_chain(pt);
1096 static void intel_pt_add_callchain(struct intel_pt *pt,
1097 struct perf_sample *sample)
1099 struct thread *thread = machine__findnew_thread(pt->machine,
1103 thread_stack__sample_late(thread, sample->cpu, pt->chain,
1104 pt->synth_opts.callchain_sz + 1, sample->ip,
1107 sample->callchain = pt->chain;
1110 static struct branch_stack *intel_pt_alloc_br_stack(unsigned int entry_cnt)
1112 size_t sz = sizeof(struct branch_stack);
1114 sz += entry_cnt * sizeof(struct branch_entry);
1118 static int intel_pt_br_stack_init(struct intel_pt *pt)
1120 struct evsel *evsel;
1122 evlist__for_each_entry(pt->session->evlist, evsel) {
1123 if (!(evsel->core.attr.sample_type & PERF_SAMPLE_BRANCH_STACK))
1124 evsel->synth_sample_type |= PERF_SAMPLE_BRANCH_STACK;
1127 pt->br_stack = intel_pt_alloc_br_stack(pt->br_stack_sz);
1134 static void intel_pt_add_br_stack(struct intel_pt *pt,
1135 struct perf_sample *sample)
1137 struct thread *thread = machine__findnew_thread(pt->machine,
1141 thread_stack__br_sample_late(thread, sample->cpu, pt->br_stack,
1142 pt->br_stack_sz, sample->ip,
1145 sample->branch_stack = pt->br_stack;
1148 /* INTEL_PT_LBR_0, INTEL_PT_LBR_1 and INTEL_PT_LBR_2 */
1149 #define LBRS_MAX (INTEL_PT_BLK_ITEM_ID_CNT * 3U)
1151 static struct intel_pt_queue *intel_pt_alloc_queue(struct intel_pt *pt,
1152 unsigned int queue_nr)
1154 struct intel_pt_params params = { .get_trace = 0, };
1155 struct perf_env *env = pt->machine->env;
1156 struct intel_pt_queue *ptq;
1158 ptq = zalloc(sizeof(struct intel_pt_queue));
1162 if (pt->synth_opts.callchain) {
1163 ptq->chain = intel_pt_alloc_chain(pt);
1168 if (pt->synth_opts.last_branch || pt->synth_opts.other_events) {
1169 unsigned int entry_cnt = max(LBRS_MAX, pt->br_stack_sz);
1171 ptq->last_branch = intel_pt_alloc_br_stack(entry_cnt);
1172 if (!ptq->last_branch)
1176 ptq->event_buf = malloc(PERF_SAMPLE_MAX_SIZE);
1177 if (!ptq->event_buf)
1181 ptq->queue_nr = queue_nr;
1182 ptq->exclude_kernel = intel_pt_exclude_kernel(pt);
1188 params.get_trace = intel_pt_get_trace;
1189 params.walk_insn = intel_pt_walk_next_insn;
1190 params.lookahead = intel_pt_lookahead;
1191 params.findnew_vmcs_info = intel_pt_findnew_vmcs_info;
1193 params.return_compression = intel_pt_return_compression(pt);
1194 params.branch_enable = intel_pt_branch_enable(pt);
1195 params.ctl = intel_pt_ctl(pt);
1196 params.max_non_turbo_ratio = pt->max_non_turbo_ratio;
1197 params.mtc_period = intel_pt_mtc_period(pt);
1198 params.tsc_ctc_ratio_n = pt->tsc_ctc_ratio_n;
1199 params.tsc_ctc_ratio_d = pt->tsc_ctc_ratio_d;
1200 params.quick = pt->synth_opts.quick;
1201 params.vm_time_correlation = pt->synth_opts.vm_time_correlation;
1202 params.vm_tm_corr_dry_run = pt->synth_opts.vm_tm_corr_dry_run;
1203 params.first_timestamp = pt->first_timestamp;
1204 params.max_loops = pt->max_loops;
1206 if (pt->filts.cnt > 0)
1207 params.pgd_ip = intel_pt_pgd_ip;
1209 if (pt->synth_opts.instructions) {
1210 if (pt->synth_opts.period) {
1211 switch (pt->synth_opts.period_type) {
1212 case PERF_ITRACE_PERIOD_INSTRUCTIONS:
1213 params.period_type =
1214 INTEL_PT_PERIOD_INSTRUCTIONS;
1215 params.period = pt->synth_opts.period;
1217 case PERF_ITRACE_PERIOD_TICKS:
1218 params.period_type = INTEL_PT_PERIOD_TICKS;
1219 params.period = pt->synth_opts.period;
1221 case PERF_ITRACE_PERIOD_NANOSECS:
1222 params.period_type = INTEL_PT_PERIOD_TICKS;
1223 params.period = intel_pt_ns_to_ticks(pt,
1224 pt->synth_opts.period);
1231 if (!params.period) {
1232 params.period_type = INTEL_PT_PERIOD_INSTRUCTIONS;
1237 if (env->cpuid && !strncmp(env->cpuid, "GenuineIntel,6,92,", 18))
1238 params.flags |= INTEL_PT_FUP_WITH_NLIP;
1240 ptq->decoder = intel_pt_decoder_new(¶ms);
1247 zfree(&ptq->event_buf);
1248 zfree(&ptq->last_branch);
1254 static void intel_pt_free_queue(void *priv)
1256 struct intel_pt_queue *ptq = priv;
1260 thread__zput(ptq->thread);
1261 thread__zput(ptq->unknown_guest_thread);
1262 intel_pt_decoder_free(ptq->decoder);
1263 zfree(&ptq->event_buf);
1264 zfree(&ptq->last_branch);
1269 static void intel_pt_first_timestamp(struct intel_pt *pt, u64 timestamp)
1273 pt->first_timestamp = timestamp;
1275 for (i = 0; i < pt->queues.nr_queues; i++) {
1276 struct auxtrace_queue *queue = &pt->queues.queue_array[i];
1277 struct intel_pt_queue *ptq = queue->priv;
1279 if (ptq && ptq->decoder)
1280 intel_pt_set_first_timestamp(ptq->decoder, timestamp);
1284 static void intel_pt_set_pid_tid_cpu(struct intel_pt *pt,
1285 struct auxtrace_queue *queue)
1287 struct intel_pt_queue *ptq = queue->priv;
1289 if (queue->tid == -1 || pt->have_sched_switch) {
1290 ptq->tid = machine__get_current_tid(pt->machine, ptq->cpu);
1293 thread__zput(ptq->thread);
1296 if (!ptq->thread && ptq->tid != -1)
1297 ptq->thread = machine__find_thread(pt->machine, -1, ptq->tid);
1300 ptq->pid = ptq->thread->pid_;
1301 if (queue->cpu == -1)
1302 ptq->cpu = ptq->thread->cpu;
1306 static void intel_pt_sample_flags(struct intel_pt_queue *ptq)
1309 if (ptq->state->flags & INTEL_PT_ABORT_TX) {
1310 ptq->flags = PERF_IP_FLAG_BRANCH | PERF_IP_FLAG_TX_ABORT;
1311 } else if (ptq->state->flags & INTEL_PT_ASYNC) {
1312 if (!ptq->state->to_ip)
1313 ptq->flags = PERF_IP_FLAG_BRANCH |
1314 PERF_IP_FLAG_TRACE_END;
1315 else if (ptq->state->from_nr && !ptq->state->to_nr)
1316 ptq->flags = PERF_IP_FLAG_BRANCH | PERF_IP_FLAG_CALL |
1317 PERF_IP_FLAG_VMEXIT;
1319 ptq->flags = PERF_IP_FLAG_BRANCH | PERF_IP_FLAG_CALL |
1320 PERF_IP_FLAG_ASYNC |
1321 PERF_IP_FLAG_INTERRUPT;
1323 if (ptq->state->from_ip)
1324 ptq->flags = intel_pt_insn_type(ptq->state->insn_op);
1326 ptq->flags = PERF_IP_FLAG_BRANCH |
1327 PERF_IP_FLAG_TRACE_BEGIN;
1328 if (ptq->state->flags & INTEL_PT_IN_TX)
1329 ptq->flags |= PERF_IP_FLAG_IN_TX;
1330 ptq->insn_len = ptq->state->insn_len;
1331 memcpy(ptq->insn, ptq->state->insn, INTEL_PT_INSN_BUF_SZ);
1334 if (ptq->state->type & INTEL_PT_TRACE_BEGIN)
1335 ptq->flags |= PERF_IP_FLAG_TRACE_BEGIN;
1336 if (ptq->state->type & INTEL_PT_TRACE_END)
1337 ptq->flags |= PERF_IP_FLAG_TRACE_END;
1340 static void intel_pt_setup_time_range(struct intel_pt *pt,
1341 struct intel_pt_queue *ptq)
1346 ptq->sel_timestamp = pt->time_ranges[0].start;
1349 if (ptq->sel_timestamp) {
1350 ptq->sel_start = true;
1352 ptq->sel_timestamp = pt->time_ranges[0].end;
1353 ptq->sel_start = false;
1357 static int intel_pt_setup_queue(struct intel_pt *pt,
1358 struct auxtrace_queue *queue,
1359 unsigned int queue_nr)
1361 struct intel_pt_queue *ptq = queue->priv;
1363 if (list_empty(&queue->head))
1367 ptq = intel_pt_alloc_queue(pt, queue_nr);
1372 if (queue->cpu != -1)
1373 ptq->cpu = queue->cpu;
1374 ptq->tid = queue->tid;
1376 ptq->cbr_seen = UINT_MAX;
1378 if (pt->sampling_mode && !pt->snapshot_mode &&
1379 pt->timeless_decoding)
1380 ptq->step_through_buffers = true;
1382 ptq->sync_switch = pt->sync_switch;
1384 intel_pt_setup_time_range(pt, ptq);
1387 if (!ptq->on_heap &&
1388 (!ptq->sync_switch ||
1389 ptq->switch_state != INTEL_PT_SS_EXPECTING_SWITCH_EVENT)) {
1390 const struct intel_pt_state *state;
1393 if (pt->timeless_decoding)
1396 intel_pt_log("queue %u getting timestamp\n", queue_nr);
1397 intel_pt_log("queue %u decoding cpu %d pid %d tid %d\n",
1398 queue_nr, ptq->cpu, ptq->pid, ptq->tid);
1400 if (ptq->sel_start && ptq->sel_timestamp) {
1401 ret = intel_pt_fast_forward(ptq->decoder,
1402 ptq->sel_timestamp);
1408 state = intel_pt_decode(ptq->decoder);
1410 if (state->err == INTEL_PT_ERR_NODATA) {
1411 intel_pt_log("queue %u has no timestamp\n",
1417 if (state->timestamp)
1421 ptq->timestamp = state->timestamp;
1422 intel_pt_log("queue %u timestamp 0x%" PRIx64 "\n",
1423 queue_nr, ptq->timestamp);
1425 ptq->have_sample = true;
1426 if (ptq->sel_start && ptq->sel_timestamp &&
1427 ptq->timestamp < ptq->sel_timestamp)
1428 ptq->have_sample = false;
1429 intel_pt_sample_flags(ptq);
1430 ret = auxtrace_heap__add(&pt->heap, queue_nr, ptq->timestamp);
1433 ptq->on_heap = true;
1439 static int intel_pt_setup_queues(struct intel_pt *pt)
1444 for (i = 0; i < pt->queues.nr_queues; i++) {
1445 ret = intel_pt_setup_queue(pt, &pt->queues.queue_array[i], i);
1452 static inline bool intel_pt_skip_event(struct intel_pt *pt)
1454 return pt->synth_opts.initial_skip &&
1455 pt->num_events++ < pt->synth_opts.initial_skip;
1459 * Cannot count CBR as skipped because it won't go away until cbr == cbr_seen.
1460 * Also ensure CBR is first non-skipped event by allowing for 4 more samples
1461 * from this decoder state.
1463 static inline bool intel_pt_skip_cbr_event(struct intel_pt *pt)
1465 return pt->synth_opts.initial_skip &&
1466 pt->num_events + 4 < pt->synth_opts.initial_skip;
1469 static void intel_pt_prep_a_sample(struct intel_pt_queue *ptq,
1470 union perf_event *event,
1471 struct perf_sample *sample)
1473 event->sample.header.type = PERF_RECORD_SAMPLE;
1474 event->sample.header.size = sizeof(struct perf_event_header);
1476 sample->pid = ptq->pid;
1477 sample->tid = ptq->tid;
1478 sample->cpu = ptq->cpu;
1479 sample->insn_len = ptq->insn_len;
1480 memcpy(sample->insn, ptq->insn, INTEL_PT_INSN_BUF_SZ);
1483 static void intel_pt_prep_b_sample(struct intel_pt *pt,
1484 struct intel_pt_queue *ptq,
1485 union perf_event *event,
1486 struct perf_sample *sample)
1488 intel_pt_prep_a_sample(ptq, event, sample);
1490 if (!pt->timeless_decoding)
1491 sample->time = tsc_to_perf_time(ptq->timestamp, &pt->tc);
1493 sample->ip = ptq->state->from_ip;
1494 sample->addr = ptq->state->to_ip;
1495 sample->cpumode = intel_pt_cpumode(ptq, sample->ip, sample->addr);
1497 sample->flags = ptq->flags;
1499 event->sample.header.misc = sample->cpumode;
1502 static int intel_pt_inject_event(union perf_event *event,
1503 struct perf_sample *sample, u64 type)
1505 event->header.size = perf_event__sample_event_size(sample, type, 0);
1506 return perf_event__synthesize_sample(event, type, 0, sample);
1509 static inline int intel_pt_opt_inject(struct intel_pt *pt,
1510 union perf_event *event,
1511 struct perf_sample *sample, u64 type)
1513 if (!pt->synth_opts.inject)
1516 return intel_pt_inject_event(event, sample, type);
1519 static int intel_pt_deliver_synth_event(struct intel_pt *pt,
1520 union perf_event *event,
1521 struct perf_sample *sample, u64 type)
1525 ret = intel_pt_opt_inject(pt, event, sample, type);
1529 ret = perf_session__deliver_synth_event(pt->session, event, sample);
1531 pr_err("Intel PT: failed to deliver event, error %d\n", ret);
1536 static int intel_pt_synth_branch_sample(struct intel_pt_queue *ptq)
1538 struct intel_pt *pt = ptq->pt;
1539 union perf_event *event = ptq->event_buf;
1540 struct perf_sample sample = { .ip = 0, };
1541 struct dummy_branch_stack {
1544 struct branch_entry entries;
1547 if (pt->branches_filter && !(pt->branches_filter & ptq->flags))
1550 if (intel_pt_skip_event(pt))
1553 intel_pt_prep_b_sample(pt, ptq, event, &sample);
1555 sample.id = ptq->pt->branches_id;
1556 sample.stream_id = ptq->pt->branches_id;
1559 * perf report cannot handle events without a branch stack when using
1560 * SORT_MODE__BRANCH so make a dummy one.
1562 if (pt->synth_opts.last_branch && sort__mode == SORT_MODE__BRANCH) {
1563 dummy_bs = (struct dummy_branch_stack){
1571 sample.branch_stack = (struct branch_stack *)&dummy_bs;
1574 if (ptq->state->flags & INTEL_PT_SAMPLE_IPC)
1575 sample.cyc_cnt = ptq->ipc_cyc_cnt - ptq->last_br_cyc_cnt;
1576 if (sample.cyc_cnt) {
1577 sample.insn_cnt = ptq->ipc_insn_cnt - ptq->last_br_insn_cnt;
1578 ptq->last_br_insn_cnt = ptq->ipc_insn_cnt;
1579 ptq->last_br_cyc_cnt = ptq->ipc_cyc_cnt;
1582 return intel_pt_deliver_synth_event(pt, event, &sample,
1583 pt->branches_sample_type);
1586 static void intel_pt_prep_sample(struct intel_pt *pt,
1587 struct intel_pt_queue *ptq,
1588 union perf_event *event,
1589 struct perf_sample *sample)
1591 intel_pt_prep_b_sample(pt, ptq, event, sample);
1593 if (pt->synth_opts.callchain) {
1594 thread_stack__sample(ptq->thread, ptq->cpu, ptq->chain,
1595 pt->synth_opts.callchain_sz + 1,
1596 sample->ip, pt->kernel_start);
1597 sample->callchain = ptq->chain;
1600 if (pt->synth_opts.last_branch) {
1601 thread_stack__br_sample(ptq->thread, ptq->cpu, ptq->last_branch,
1603 sample->branch_stack = ptq->last_branch;
1607 static int intel_pt_synth_instruction_sample(struct intel_pt_queue *ptq)
1609 struct intel_pt *pt = ptq->pt;
1610 union perf_event *event = ptq->event_buf;
1611 struct perf_sample sample = { .ip = 0, };
1613 if (intel_pt_skip_event(pt))
1616 intel_pt_prep_sample(pt, ptq, event, &sample);
1618 sample.id = ptq->pt->instructions_id;
1619 sample.stream_id = ptq->pt->instructions_id;
1620 if (pt->synth_opts.quick)
1623 sample.period = ptq->state->tot_insn_cnt - ptq->last_insn_cnt;
1625 if (ptq->state->flags & INTEL_PT_SAMPLE_IPC)
1626 sample.cyc_cnt = ptq->ipc_cyc_cnt - ptq->last_in_cyc_cnt;
1627 if (sample.cyc_cnt) {
1628 sample.insn_cnt = ptq->ipc_insn_cnt - ptq->last_in_insn_cnt;
1629 ptq->last_in_insn_cnt = ptq->ipc_insn_cnt;
1630 ptq->last_in_cyc_cnt = ptq->ipc_cyc_cnt;
1633 ptq->last_insn_cnt = ptq->state->tot_insn_cnt;
1635 return intel_pt_deliver_synth_event(pt, event, &sample,
1636 pt->instructions_sample_type);
1639 static int intel_pt_synth_transaction_sample(struct intel_pt_queue *ptq)
1641 struct intel_pt *pt = ptq->pt;
1642 union perf_event *event = ptq->event_buf;
1643 struct perf_sample sample = { .ip = 0, };
1645 if (intel_pt_skip_event(pt))
1648 intel_pt_prep_sample(pt, ptq, event, &sample);
1650 sample.id = ptq->pt->transactions_id;
1651 sample.stream_id = ptq->pt->transactions_id;
1653 return intel_pt_deliver_synth_event(pt, event, &sample,
1654 pt->transactions_sample_type);
1657 static void intel_pt_prep_p_sample(struct intel_pt *pt,
1658 struct intel_pt_queue *ptq,
1659 union perf_event *event,
1660 struct perf_sample *sample)
1662 intel_pt_prep_sample(pt, ptq, event, sample);
1665 * Zero IP is used to mean "trace start" but that is not the case for
1666 * power or PTWRITE events with no IP, so clear the flags.
1672 static int intel_pt_synth_ptwrite_sample(struct intel_pt_queue *ptq)
1674 struct intel_pt *pt = ptq->pt;
1675 union perf_event *event = ptq->event_buf;
1676 struct perf_sample sample = { .ip = 0, };
1677 struct perf_synth_intel_ptwrite raw;
1679 if (intel_pt_skip_event(pt))
1682 intel_pt_prep_p_sample(pt, ptq, event, &sample);
1684 sample.id = ptq->pt->ptwrites_id;
1685 sample.stream_id = ptq->pt->ptwrites_id;
1688 raw.ip = !!(ptq->state->flags & INTEL_PT_FUP_IP);
1689 raw.payload = cpu_to_le64(ptq->state->ptw_payload);
1691 sample.raw_size = perf_synth__raw_size(raw);
1692 sample.raw_data = perf_synth__raw_data(&raw);
1694 return intel_pt_deliver_synth_event(pt, event, &sample,
1695 pt->ptwrites_sample_type);
1698 static int intel_pt_synth_cbr_sample(struct intel_pt_queue *ptq)
1700 struct intel_pt *pt = ptq->pt;
1701 union perf_event *event = ptq->event_buf;
1702 struct perf_sample sample = { .ip = 0, };
1703 struct perf_synth_intel_cbr raw;
1706 if (intel_pt_skip_cbr_event(pt))
1709 ptq->cbr_seen = ptq->state->cbr;
1711 intel_pt_prep_p_sample(pt, ptq, event, &sample);
1713 sample.id = ptq->pt->cbr_id;
1714 sample.stream_id = ptq->pt->cbr_id;
1716 flags = (u16)ptq->state->cbr_payload | (pt->max_non_turbo_ratio << 16);
1717 raw.flags = cpu_to_le32(flags);
1718 raw.freq = cpu_to_le32(raw.cbr * pt->cbr2khz);
1721 sample.raw_size = perf_synth__raw_size(raw);
1722 sample.raw_data = perf_synth__raw_data(&raw);
1724 return intel_pt_deliver_synth_event(pt, event, &sample,
1725 pt->pwr_events_sample_type);
1728 static int intel_pt_synth_psb_sample(struct intel_pt_queue *ptq)
1730 struct intel_pt *pt = ptq->pt;
1731 union perf_event *event = ptq->event_buf;
1732 struct perf_sample sample = { .ip = 0, };
1733 struct perf_synth_intel_psb raw;
1735 if (intel_pt_skip_event(pt))
1738 intel_pt_prep_p_sample(pt, ptq, event, &sample);
1740 sample.id = ptq->pt->psb_id;
1741 sample.stream_id = ptq->pt->psb_id;
1745 raw.offset = ptq->state->psb_offset;
1747 sample.raw_size = perf_synth__raw_size(raw);
1748 sample.raw_data = perf_synth__raw_data(&raw);
1750 return intel_pt_deliver_synth_event(pt, event, &sample,
1751 pt->pwr_events_sample_type);
1754 static int intel_pt_synth_mwait_sample(struct intel_pt_queue *ptq)
1756 struct intel_pt *pt = ptq->pt;
1757 union perf_event *event = ptq->event_buf;
1758 struct perf_sample sample = { .ip = 0, };
1759 struct perf_synth_intel_mwait raw;
1761 if (intel_pt_skip_event(pt))
1764 intel_pt_prep_p_sample(pt, ptq, event, &sample);
1766 sample.id = ptq->pt->mwait_id;
1767 sample.stream_id = ptq->pt->mwait_id;
1770 raw.payload = cpu_to_le64(ptq->state->mwait_payload);
1772 sample.raw_size = perf_synth__raw_size(raw);
1773 sample.raw_data = perf_synth__raw_data(&raw);
1775 return intel_pt_deliver_synth_event(pt, event, &sample,
1776 pt->pwr_events_sample_type);
1779 static int intel_pt_synth_pwre_sample(struct intel_pt_queue *ptq)
1781 struct intel_pt *pt = ptq->pt;
1782 union perf_event *event = ptq->event_buf;
1783 struct perf_sample sample = { .ip = 0, };
1784 struct perf_synth_intel_pwre raw;
1786 if (intel_pt_skip_event(pt))
1789 intel_pt_prep_p_sample(pt, ptq, event, &sample);
1791 sample.id = ptq->pt->pwre_id;
1792 sample.stream_id = ptq->pt->pwre_id;
1795 raw.payload = cpu_to_le64(ptq->state->pwre_payload);
1797 sample.raw_size = perf_synth__raw_size(raw);
1798 sample.raw_data = perf_synth__raw_data(&raw);
1800 return intel_pt_deliver_synth_event(pt, event, &sample,
1801 pt->pwr_events_sample_type);
1804 static int intel_pt_synth_exstop_sample(struct intel_pt_queue *ptq)
1806 struct intel_pt *pt = ptq->pt;
1807 union perf_event *event = ptq->event_buf;
1808 struct perf_sample sample = { .ip = 0, };
1809 struct perf_synth_intel_exstop raw;
1811 if (intel_pt_skip_event(pt))
1814 intel_pt_prep_p_sample(pt, ptq, event, &sample);
1816 sample.id = ptq->pt->exstop_id;
1817 sample.stream_id = ptq->pt->exstop_id;
1820 raw.ip = !!(ptq->state->flags & INTEL_PT_FUP_IP);
1822 sample.raw_size = perf_synth__raw_size(raw);
1823 sample.raw_data = perf_synth__raw_data(&raw);
1825 return intel_pt_deliver_synth_event(pt, event, &sample,
1826 pt->pwr_events_sample_type);
1829 static int intel_pt_synth_pwrx_sample(struct intel_pt_queue *ptq)
1831 struct intel_pt *pt = ptq->pt;
1832 union perf_event *event = ptq->event_buf;
1833 struct perf_sample sample = { .ip = 0, };
1834 struct perf_synth_intel_pwrx raw;
1836 if (intel_pt_skip_event(pt))
1839 intel_pt_prep_p_sample(pt, ptq, event, &sample);
1841 sample.id = ptq->pt->pwrx_id;
1842 sample.stream_id = ptq->pt->pwrx_id;
1845 raw.payload = cpu_to_le64(ptq->state->pwrx_payload);
1847 sample.raw_size = perf_synth__raw_size(raw);
1848 sample.raw_data = perf_synth__raw_data(&raw);
1850 return intel_pt_deliver_synth_event(pt, event, &sample,
1851 pt->pwr_events_sample_type);
1855 * PEBS gp_regs array indexes plus 1 so that 0 means not present. Refer
1856 * intel_pt_add_gp_regs().
1858 static const int pebs_gp_regs[] = {
1859 [PERF_REG_X86_FLAGS] = 1,
1860 [PERF_REG_X86_IP] = 2,
1861 [PERF_REG_X86_AX] = 3,
1862 [PERF_REG_X86_CX] = 4,
1863 [PERF_REG_X86_DX] = 5,
1864 [PERF_REG_X86_BX] = 6,
1865 [PERF_REG_X86_SP] = 7,
1866 [PERF_REG_X86_BP] = 8,
1867 [PERF_REG_X86_SI] = 9,
1868 [PERF_REG_X86_DI] = 10,
1869 [PERF_REG_X86_R8] = 11,
1870 [PERF_REG_X86_R9] = 12,
1871 [PERF_REG_X86_R10] = 13,
1872 [PERF_REG_X86_R11] = 14,
1873 [PERF_REG_X86_R12] = 15,
1874 [PERF_REG_X86_R13] = 16,
1875 [PERF_REG_X86_R14] = 17,
1876 [PERF_REG_X86_R15] = 18,
1879 static u64 *intel_pt_add_gp_regs(struct regs_dump *intr_regs, u64 *pos,
1880 const struct intel_pt_blk_items *items,
1883 const u64 *gp_regs = items->val[INTEL_PT_GP_REGS_POS];
1884 u32 mask = items->mask[INTEL_PT_GP_REGS_POS];
1888 for (i = 0, bit = 1; i < PERF_REG_X86_64_MAX; i++, bit <<= 1) {
1889 /* Get the PEBS gp_regs array index */
1890 int n = pebs_gp_regs[i] - 1;
1895 * Add only registers that were requested (i.e. 'regs_mask') and
1896 * that were provided (i.e. 'mask'), and update the resulting
1897 * mask (i.e. 'intr_regs->mask') accordingly.
1899 if (mask & 1 << n && regs_mask & bit) {
1900 intr_regs->mask |= bit;
1901 *pos++ = gp_regs[n];
1908 #ifndef PERF_REG_X86_XMM0
1909 #define PERF_REG_X86_XMM0 32
1912 static void intel_pt_add_xmm(struct regs_dump *intr_regs, u64 *pos,
1913 const struct intel_pt_blk_items *items,
1916 u32 mask = items->has_xmm & (regs_mask >> PERF_REG_X86_XMM0);
1917 const u64 *xmm = items->xmm;
1920 * If there are any XMM registers, then there should be all of them.
1921 * Nevertheless, follow the logic to add only registers that were
1922 * requested (i.e. 'regs_mask') and that were provided (i.e. 'mask'),
1923 * and update the resulting mask (i.e. 'intr_regs->mask') accordingly.
1925 intr_regs->mask |= (u64)mask << PERF_REG_X86_XMM0;
1927 for (; mask; mask >>= 1, xmm++) {
1933 #define LBR_INFO_MISPRED (1ULL << 63)
1934 #define LBR_INFO_IN_TX (1ULL << 62)
1935 #define LBR_INFO_ABORT (1ULL << 61)
1936 #define LBR_INFO_CYCLES 0xffff
1938 /* Refer kernel's intel_pmu_store_pebs_lbrs() */
1939 static u64 intel_pt_lbr_flags(u64 info)
1942 struct branch_flags flags;
1947 u.flags.mispred = !!(info & LBR_INFO_MISPRED);
1948 u.flags.predicted = !(info & LBR_INFO_MISPRED);
1949 u.flags.in_tx = !!(info & LBR_INFO_IN_TX);
1950 u.flags.abort = !!(info & LBR_INFO_ABORT);
1951 u.flags.cycles = info & LBR_INFO_CYCLES;
1956 static void intel_pt_add_lbrs(struct branch_stack *br_stack,
1957 const struct intel_pt_blk_items *items)
1964 to = &br_stack->entries[0].from;
1966 for (i = INTEL_PT_LBR_0_POS; i <= INTEL_PT_LBR_2_POS; i++) {
1967 u32 mask = items->mask[i];
1968 const u64 *from = items->val[i];
1970 for (; mask; mask >>= 3, from += 3) {
1971 if ((mask & 7) == 7) {
1974 *to++ = intel_pt_lbr_flags(from[2]);
1981 static int intel_pt_synth_pebs_sample(struct intel_pt_queue *ptq)
1983 const struct intel_pt_blk_items *items = &ptq->state->items;
1984 struct perf_sample sample = { .ip = 0, };
1985 union perf_event *event = ptq->event_buf;
1986 struct intel_pt *pt = ptq->pt;
1987 struct evsel *evsel = pt->pebs_evsel;
1988 u64 sample_type = evsel->core.attr.sample_type;
1989 u64 id = evsel->core.id[0];
1991 u64 regs[8 * sizeof(sample.intr_regs.mask)];
1993 if (intel_pt_skip_event(pt))
1996 intel_pt_prep_a_sample(ptq, event, &sample);
1999 sample.stream_id = id;
2001 if (!evsel->core.attr.freq)
2002 sample.period = evsel->core.attr.sample_period;
2004 /* No support for non-zero CS base */
2006 sample.ip = items->ip;
2007 else if (items->has_rip)
2008 sample.ip = items->rip;
2010 sample.ip = ptq->state->from_ip;
2012 cpumode = intel_pt_cpumode(ptq, sample.ip, 0);
2014 event->sample.header.misc = cpumode | PERF_RECORD_MISC_EXACT_IP;
2016 sample.cpumode = cpumode;
2018 if (sample_type & PERF_SAMPLE_TIME) {
2021 if (items->has_timestamp)
2022 timestamp = items->timestamp;
2023 else if (!pt->timeless_decoding)
2024 timestamp = ptq->timestamp;
2026 sample.time = tsc_to_perf_time(timestamp, &pt->tc);
2029 if (sample_type & PERF_SAMPLE_CALLCHAIN &&
2030 pt->synth_opts.callchain) {
2031 thread_stack__sample(ptq->thread, ptq->cpu, ptq->chain,
2032 pt->synth_opts.callchain_sz, sample.ip,
2034 sample.callchain = ptq->chain;
2037 if (sample_type & PERF_SAMPLE_REGS_INTR &&
2038 (items->mask[INTEL_PT_GP_REGS_POS] ||
2039 items->mask[INTEL_PT_XMM_POS])) {
2040 u64 regs_mask = evsel->core.attr.sample_regs_intr;
2043 sample.intr_regs.abi = items->is_32_bit ?
2044 PERF_SAMPLE_REGS_ABI_32 :
2045 PERF_SAMPLE_REGS_ABI_64;
2046 sample.intr_regs.regs = regs;
2048 pos = intel_pt_add_gp_regs(&sample.intr_regs, regs, items, regs_mask);
2050 intel_pt_add_xmm(&sample.intr_regs, pos, items, regs_mask);
2053 if (sample_type & PERF_SAMPLE_BRANCH_STACK) {
2054 if (items->mask[INTEL_PT_LBR_0_POS] ||
2055 items->mask[INTEL_PT_LBR_1_POS] ||
2056 items->mask[INTEL_PT_LBR_2_POS]) {
2057 intel_pt_add_lbrs(ptq->last_branch, items);
2058 } else if (pt->synth_opts.last_branch) {
2059 thread_stack__br_sample(ptq->thread, ptq->cpu,
2063 ptq->last_branch->nr = 0;
2065 sample.branch_stack = ptq->last_branch;
2068 if (sample_type & PERF_SAMPLE_ADDR && items->has_mem_access_address)
2069 sample.addr = items->mem_access_address;
2071 if (sample_type & PERF_SAMPLE_WEIGHT_TYPE) {
2073 * Refer kernel's setup_pebs_adaptive_sample_data() and
2074 * intel_hsw_weight().
2076 if (items->has_mem_access_latency) {
2077 u64 weight = items->mem_access_latency >> 32;
2080 * Starts from SPR, the mem access latency field
2081 * contains both cache latency [47:32] and instruction
2082 * latency [15:0]. The cache latency is the same as the
2083 * mem access latency on previous platforms.
2085 * In practice, no memory access could last than 4G
2086 * cycles. Use latency >> 32 to distinguish the
2087 * different format of the mem access latency field.
2090 sample.weight = weight & 0xffff;
2091 sample.ins_lat = items->mem_access_latency & 0xffff;
2093 sample.weight = items->mem_access_latency;
2095 if (!sample.weight && items->has_tsx_aux_info) {
2096 /* Cycles last block */
2097 sample.weight = (u32)items->tsx_aux_info;
2101 if (sample_type & PERF_SAMPLE_TRANSACTION && items->has_tsx_aux_info) {
2102 u64 ax = items->has_rax ? items->rax : 0;
2103 /* Refer kernel's intel_hsw_transaction() */
2104 u64 txn = (u8)(items->tsx_aux_info >> 32);
2106 /* For RTM XABORTs also log the abort code from AX */
2107 if (txn & PERF_TXN_TRANSACTION && ax & 1)
2108 txn |= ((ax >> 24) & 0xff) << PERF_TXN_ABORT_SHIFT;
2109 sample.transaction = txn;
2112 return intel_pt_deliver_synth_event(pt, event, &sample, sample_type);
2115 static int intel_pt_synth_error(struct intel_pt *pt, int code, int cpu,
2116 pid_t pid, pid_t tid, u64 ip, u64 timestamp)
2118 union perf_event event;
2119 char msg[MAX_AUXTRACE_ERROR_MSG];
2122 if (pt->synth_opts.error_minus_flags) {
2123 if (code == INTEL_PT_ERR_OVR &&
2124 pt->synth_opts.error_minus_flags & AUXTRACE_ERR_FLG_OVERFLOW)
2126 if (code == INTEL_PT_ERR_LOST &&
2127 pt->synth_opts.error_minus_flags & AUXTRACE_ERR_FLG_DATA_LOST)
2131 intel_pt__strerror(code, msg, MAX_AUXTRACE_ERROR_MSG);
2133 auxtrace_synth_error(&event.auxtrace_error, PERF_AUXTRACE_ERROR_ITRACE,
2134 code, cpu, pid, tid, ip, msg, timestamp);
2136 err = perf_session__deliver_synth_event(pt->session, &event, NULL);
2138 pr_err("Intel Processor Trace: failed to deliver error event, error %d\n",
2144 static int intel_ptq_synth_error(struct intel_pt_queue *ptq,
2145 const struct intel_pt_state *state)
2147 struct intel_pt *pt = ptq->pt;
2148 u64 tm = ptq->timestamp;
2150 tm = pt->timeless_decoding ? 0 : tsc_to_perf_time(tm, &pt->tc);
2152 return intel_pt_synth_error(pt, state->err, ptq->cpu, ptq->pid,
2153 ptq->tid, state->from_ip, tm);
2156 static int intel_pt_next_tid(struct intel_pt *pt, struct intel_pt_queue *ptq)
2158 struct auxtrace_queue *queue;
2159 pid_t tid = ptq->next_tid;
2165 intel_pt_log("switch: cpu %d tid %d\n", ptq->cpu, tid);
2167 err = machine__set_current_tid(pt->machine, ptq->cpu, -1, tid);
2169 queue = &pt->queues.queue_array[ptq->queue_nr];
2170 intel_pt_set_pid_tid_cpu(pt, queue);
2177 static inline bool intel_pt_is_switch_ip(struct intel_pt_queue *ptq, u64 ip)
2179 struct intel_pt *pt = ptq->pt;
2181 return ip == pt->switch_ip &&
2182 (ptq->flags & PERF_IP_FLAG_BRANCH) &&
2183 !(ptq->flags & (PERF_IP_FLAG_CONDITIONAL | PERF_IP_FLAG_ASYNC |
2184 PERF_IP_FLAG_INTERRUPT | PERF_IP_FLAG_TX_ABORT));
2187 #define INTEL_PT_PWR_EVT (INTEL_PT_MWAIT_OP | INTEL_PT_PWR_ENTRY | \
2188 INTEL_PT_EX_STOP | INTEL_PT_PWR_EXIT)
2190 static int intel_pt_sample(struct intel_pt_queue *ptq)
2192 const struct intel_pt_state *state = ptq->state;
2193 struct intel_pt *pt = ptq->pt;
2196 if (!ptq->have_sample)
2199 ptq->have_sample = false;
2201 ptq->ipc_insn_cnt = ptq->state->tot_insn_cnt;
2202 ptq->ipc_cyc_cnt = ptq->state->tot_cyc_cnt;
2205 * Do PEBS first to allow for the possibility that the PEBS timestamp
2206 * precedes the current timestamp.
2208 if (pt->sample_pebs && state->type & INTEL_PT_BLK_ITEMS) {
2209 err = intel_pt_synth_pebs_sample(ptq);
2214 if (pt->sample_pwr_events) {
2215 if (state->type & INTEL_PT_PSB_EVT) {
2216 err = intel_pt_synth_psb_sample(ptq);
2220 if (ptq->state->cbr != ptq->cbr_seen) {
2221 err = intel_pt_synth_cbr_sample(ptq);
2225 if (state->type & INTEL_PT_PWR_EVT) {
2226 if (state->type & INTEL_PT_MWAIT_OP) {
2227 err = intel_pt_synth_mwait_sample(ptq);
2231 if (state->type & INTEL_PT_PWR_ENTRY) {
2232 err = intel_pt_synth_pwre_sample(ptq);
2236 if (state->type & INTEL_PT_EX_STOP) {
2237 err = intel_pt_synth_exstop_sample(ptq);
2241 if (state->type & INTEL_PT_PWR_EXIT) {
2242 err = intel_pt_synth_pwrx_sample(ptq);
2249 if (pt->sample_instructions && (state->type & INTEL_PT_INSTRUCTION)) {
2250 err = intel_pt_synth_instruction_sample(ptq);
2255 if (pt->sample_transactions && (state->type & INTEL_PT_TRANSACTION)) {
2256 err = intel_pt_synth_transaction_sample(ptq);
2261 if (pt->sample_ptwrites && (state->type & INTEL_PT_PTW)) {
2262 err = intel_pt_synth_ptwrite_sample(ptq);
2267 if (!(state->type & INTEL_PT_BRANCH))
2270 if (pt->use_thread_stack) {
2271 thread_stack__event(ptq->thread, ptq->cpu, ptq->flags,
2272 state->from_ip, state->to_ip, ptq->insn_len,
2273 state->trace_nr, pt->callstack,
2274 pt->br_stack_sz_plus,
2277 thread_stack__set_trace_nr(ptq->thread, ptq->cpu, state->trace_nr);
2280 if (pt->sample_branches) {
2281 if (state->from_nr != state->to_nr &&
2282 state->from_ip && state->to_ip) {
2283 struct intel_pt_state *st = (struct intel_pt_state *)state;
2284 u64 to_ip = st->to_ip;
2285 u64 from_ip = st->from_ip;
2288 * perf cannot handle having different machines for ip
2289 * and addr, so create 2 branches.
2292 err = intel_pt_synth_branch_sample(ptq);
2297 err = intel_pt_synth_branch_sample(ptq);
2298 st->from_ip = from_ip;
2300 err = intel_pt_synth_branch_sample(ptq);
2306 if (!ptq->sync_switch)
2309 if (intel_pt_is_switch_ip(ptq, state->to_ip)) {
2310 switch (ptq->switch_state) {
2311 case INTEL_PT_SS_NOT_TRACING:
2312 case INTEL_PT_SS_UNKNOWN:
2313 case INTEL_PT_SS_EXPECTING_SWITCH_IP:
2314 err = intel_pt_next_tid(pt, ptq);
2317 ptq->switch_state = INTEL_PT_SS_TRACING;
2320 ptq->switch_state = INTEL_PT_SS_EXPECTING_SWITCH_EVENT;
2323 } else if (!state->to_ip) {
2324 ptq->switch_state = INTEL_PT_SS_NOT_TRACING;
2325 } else if (ptq->switch_state == INTEL_PT_SS_NOT_TRACING) {
2326 ptq->switch_state = INTEL_PT_SS_UNKNOWN;
2327 } else if (ptq->switch_state == INTEL_PT_SS_UNKNOWN &&
2328 state->to_ip == pt->ptss_ip &&
2329 (ptq->flags & PERF_IP_FLAG_CALL)) {
2330 ptq->switch_state = INTEL_PT_SS_TRACING;
2336 static u64 intel_pt_switch_ip(struct intel_pt *pt, u64 *ptss_ip)
2338 struct machine *machine = pt->machine;
2340 struct symbol *sym, *start;
2341 u64 ip, switch_ip = 0;
2347 map = machine__kernel_map(machine);
2354 start = dso__first_symbol(map->dso);
2356 for (sym = start; sym; sym = dso__next_symbol(sym)) {
2357 if (sym->binding == STB_GLOBAL &&
2358 !strcmp(sym->name, "__switch_to")) {
2359 ip = map->unmap_ip(map, sym->start);
2360 if (ip >= map->start && ip < map->end) {
2367 if (!switch_ip || !ptss_ip)
2370 if (pt->have_sched_switch == 1)
2371 ptss = "perf_trace_sched_switch";
2373 ptss = "__perf_event_task_sched_out";
2375 for (sym = start; sym; sym = dso__next_symbol(sym)) {
2376 if (!strcmp(sym->name, ptss)) {
2377 ip = map->unmap_ip(map, sym->start);
2378 if (ip >= map->start && ip < map->end) {
2388 static void intel_pt_enable_sync_switch(struct intel_pt *pt)
2392 pt->sync_switch = true;
2394 for (i = 0; i < pt->queues.nr_queues; i++) {
2395 struct auxtrace_queue *queue = &pt->queues.queue_array[i];
2396 struct intel_pt_queue *ptq = queue->priv;
2399 ptq->sync_switch = true;
2404 * To filter against time ranges, it is only necessary to look at the next start
2407 static bool intel_pt_next_time(struct intel_pt_queue *ptq)
2409 struct intel_pt *pt = ptq->pt;
2411 if (ptq->sel_start) {
2412 /* Next time is an end time */
2413 ptq->sel_start = false;
2414 ptq->sel_timestamp = pt->time_ranges[ptq->sel_idx].end;
2416 } else if (ptq->sel_idx + 1 < pt->range_cnt) {
2417 /* Next time is a start time */
2418 ptq->sel_start = true;
2420 ptq->sel_timestamp = pt->time_ranges[ptq->sel_idx].start;
2428 static int intel_pt_time_filter(struct intel_pt_queue *ptq, u64 *ff_timestamp)
2433 if (ptq->sel_start) {
2434 if (ptq->timestamp >= ptq->sel_timestamp) {
2435 /* After start time, so consider next time */
2436 intel_pt_next_time(ptq);
2437 if (!ptq->sel_timestamp) {
2441 /* Check against end time */
2444 /* Before start time, so fast forward */
2445 ptq->have_sample = false;
2446 if (ptq->sel_timestamp > *ff_timestamp) {
2447 if (ptq->sync_switch) {
2448 intel_pt_next_tid(ptq->pt, ptq);
2449 ptq->switch_state = INTEL_PT_SS_UNKNOWN;
2451 *ff_timestamp = ptq->sel_timestamp;
2452 err = intel_pt_fast_forward(ptq->decoder,
2453 ptq->sel_timestamp);
2458 } else if (ptq->timestamp > ptq->sel_timestamp) {
2459 /* After end time, so consider next time */
2460 if (!intel_pt_next_time(ptq)) {
2461 /* No next time range, so stop decoding */
2462 ptq->have_sample = false;
2463 ptq->switch_state = INTEL_PT_SS_NOT_TRACING;
2466 /* Check against next start time */
2469 /* Before end time */
2475 static int intel_pt_run_decoder(struct intel_pt_queue *ptq, u64 *timestamp)
2477 const struct intel_pt_state *state = ptq->state;
2478 struct intel_pt *pt = ptq->pt;
2479 u64 ff_timestamp = 0;
2482 if (!pt->kernel_start) {
2483 pt->kernel_start = machine__kernel_start(pt->machine);
2484 if (pt->per_cpu_mmaps &&
2485 (pt->have_sched_switch == 1 || pt->have_sched_switch == 3) &&
2486 !pt->timeless_decoding && intel_pt_tracing_kernel(pt) &&
2487 !pt->sampling_mode && !pt->synth_opts.vm_time_correlation) {
2488 pt->switch_ip = intel_pt_switch_ip(pt, &pt->ptss_ip);
2489 if (pt->switch_ip) {
2490 intel_pt_log("switch_ip: %"PRIx64" ptss_ip: %"PRIx64"\n",
2491 pt->switch_ip, pt->ptss_ip);
2492 intel_pt_enable_sync_switch(pt);
2497 intel_pt_log("queue %u decoding cpu %d pid %d tid %d\n",
2498 ptq->queue_nr, ptq->cpu, ptq->pid, ptq->tid);
2500 err = intel_pt_sample(ptq);
2504 state = intel_pt_decode(ptq->decoder);
2506 if (state->err == INTEL_PT_ERR_NODATA)
2508 if (ptq->sync_switch &&
2509 state->from_ip >= pt->kernel_start) {
2510 ptq->sync_switch = false;
2511 intel_pt_next_tid(pt, ptq);
2513 if (pt->synth_opts.errors) {
2514 err = intel_ptq_synth_error(ptq, state);
2522 ptq->have_sample = true;
2523 intel_pt_sample_flags(ptq);
2525 /* Use estimated TSC upon return to user space */
2527 (state->from_ip >= pt->kernel_start || !state->from_ip) &&
2528 state->to_ip && state->to_ip < pt->kernel_start) {
2529 intel_pt_log("TSC %"PRIx64" est. TSC %"PRIx64"\n",
2530 state->timestamp, state->est_timestamp);
2531 ptq->timestamp = state->est_timestamp;
2532 /* Use estimated TSC in unknown switch state */
2533 } else if (ptq->sync_switch &&
2534 ptq->switch_state == INTEL_PT_SS_UNKNOWN &&
2535 intel_pt_is_switch_ip(ptq, state->to_ip) &&
2536 ptq->next_tid == -1) {
2537 intel_pt_log("TSC %"PRIx64" est. TSC %"PRIx64"\n",
2538 state->timestamp, state->est_timestamp);
2539 ptq->timestamp = state->est_timestamp;
2540 } else if (state->timestamp > ptq->timestamp) {
2541 ptq->timestamp = state->timestamp;
2544 if (ptq->sel_timestamp) {
2545 err = intel_pt_time_filter(ptq, &ff_timestamp);
2550 if (!pt->timeless_decoding && ptq->timestamp >= *timestamp) {
2551 *timestamp = ptq->timestamp;
2558 static inline int intel_pt_update_queues(struct intel_pt *pt)
2560 if (pt->queues.new_data) {
2561 pt->queues.new_data = false;
2562 return intel_pt_setup_queues(pt);
2567 static int intel_pt_process_queues(struct intel_pt *pt, u64 timestamp)
2569 unsigned int queue_nr;
2574 struct auxtrace_queue *queue;
2575 struct intel_pt_queue *ptq;
2577 if (!pt->heap.heap_cnt)
2580 if (pt->heap.heap_array[0].ordinal >= timestamp)
2583 queue_nr = pt->heap.heap_array[0].queue_nr;
2584 queue = &pt->queues.queue_array[queue_nr];
2587 intel_pt_log("queue %u processing 0x%" PRIx64 " to 0x%" PRIx64 "\n",
2588 queue_nr, pt->heap.heap_array[0].ordinal,
2591 auxtrace_heap__pop(&pt->heap);
2593 if (pt->heap.heap_cnt) {
2594 ts = pt->heap.heap_array[0].ordinal + 1;
2601 intel_pt_set_pid_tid_cpu(pt, queue);
2603 ret = intel_pt_run_decoder(ptq, &ts);
2606 auxtrace_heap__add(&pt->heap, queue_nr, ts);
2611 ret = auxtrace_heap__add(&pt->heap, queue_nr, ts);
2615 ptq->on_heap = false;
2622 static int intel_pt_process_timeless_queues(struct intel_pt *pt, pid_t tid,
2625 struct auxtrace_queues *queues = &pt->queues;
2629 for (i = 0; i < queues->nr_queues; i++) {
2630 struct auxtrace_queue *queue = &pt->queues.queue_array[i];
2631 struct intel_pt_queue *ptq = queue->priv;
2633 if (ptq && (tid == -1 || ptq->tid == tid)) {
2635 intel_pt_set_pid_tid_cpu(pt, queue);
2636 intel_pt_run_decoder(ptq, &ts);
2642 static void intel_pt_sample_set_pid_tid_cpu(struct intel_pt_queue *ptq,
2643 struct auxtrace_queue *queue,
2644 struct perf_sample *sample)
2646 struct machine *m = ptq->pt->machine;
2648 ptq->pid = sample->pid;
2649 ptq->tid = sample->tid;
2650 ptq->cpu = queue->cpu;
2652 intel_pt_log("queue %u cpu %d pid %d tid %d\n",
2653 ptq->queue_nr, ptq->cpu, ptq->pid, ptq->tid);
2655 thread__zput(ptq->thread);
2660 if (ptq->pid == -1) {
2661 ptq->thread = machine__find_thread(m, -1, ptq->tid);
2663 ptq->pid = ptq->thread->pid_;
2667 ptq->thread = machine__findnew_thread(m, ptq->pid, ptq->tid);
2670 static int intel_pt_process_timeless_sample(struct intel_pt *pt,
2671 struct perf_sample *sample)
2673 struct auxtrace_queue *queue;
2674 struct intel_pt_queue *ptq;
2677 queue = auxtrace_queues__sample_queue(&pt->queues, sample, pt->session);
2686 ptq->time = sample->time;
2687 intel_pt_sample_set_pid_tid_cpu(ptq, queue, sample);
2688 intel_pt_run_decoder(ptq, &ts);
2692 static int intel_pt_lost(struct intel_pt *pt, struct perf_sample *sample)
2694 return intel_pt_synth_error(pt, INTEL_PT_ERR_LOST, sample->cpu,
2695 sample->pid, sample->tid, 0, sample->time);
2698 static struct intel_pt_queue *intel_pt_cpu_to_ptq(struct intel_pt *pt, int cpu)
2702 if (cpu < 0 || !pt->queues.nr_queues)
2705 if ((unsigned)cpu >= pt->queues.nr_queues)
2706 i = pt->queues.nr_queues - 1;
2710 if (pt->queues.queue_array[i].cpu == cpu)
2711 return pt->queues.queue_array[i].priv;
2713 for (j = 0; i > 0; j++) {
2714 if (pt->queues.queue_array[--i].cpu == cpu)
2715 return pt->queues.queue_array[i].priv;
2718 for (; j < pt->queues.nr_queues; j++) {
2719 if (pt->queues.queue_array[j].cpu == cpu)
2720 return pt->queues.queue_array[j].priv;
2726 static int intel_pt_sync_switch(struct intel_pt *pt, int cpu, pid_t tid,
2729 struct intel_pt_queue *ptq;
2732 if (!pt->sync_switch)
2735 ptq = intel_pt_cpu_to_ptq(pt, cpu);
2736 if (!ptq || !ptq->sync_switch)
2739 switch (ptq->switch_state) {
2740 case INTEL_PT_SS_NOT_TRACING:
2742 case INTEL_PT_SS_UNKNOWN:
2743 case INTEL_PT_SS_TRACING:
2744 ptq->next_tid = tid;
2745 ptq->switch_state = INTEL_PT_SS_EXPECTING_SWITCH_IP;
2747 case INTEL_PT_SS_EXPECTING_SWITCH_EVENT:
2748 if (!ptq->on_heap) {
2749 ptq->timestamp = perf_time_to_tsc(timestamp,
2751 err = auxtrace_heap__add(&pt->heap, ptq->queue_nr,
2755 ptq->on_heap = true;
2757 ptq->switch_state = INTEL_PT_SS_TRACING;
2759 case INTEL_PT_SS_EXPECTING_SWITCH_IP:
2760 intel_pt_log("ERROR: cpu %d expecting switch ip\n", cpu);
2771 static int intel_pt_process_switch(struct intel_pt *pt,
2772 struct perf_sample *sample)
2776 struct evsel *evsel = evlist__id2evsel(pt->session->evlist, sample->id);
2778 if (evsel != pt->switch_evsel)
2781 tid = evsel__intval(evsel, sample, "next_pid");
2784 intel_pt_log("sched_switch: cpu %d tid %d time %"PRIu64" tsc %#"PRIx64"\n",
2785 cpu, tid, sample->time, perf_time_to_tsc(sample->time,
2788 ret = intel_pt_sync_switch(pt, cpu, tid, sample->time);
2792 return machine__set_current_tid(pt->machine, cpu, -1, tid);
2795 static int intel_pt_context_switch_in(struct intel_pt *pt,
2796 struct perf_sample *sample)
2798 pid_t pid = sample->pid;
2799 pid_t tid = sample->tid;
2800 int cpu = sample->cpu;
2802 if (pt->sync_switch) {
2803 struct intel_pt_queue *ptq;
2805 ptq = intel_pt_cpu_to_ptq(pt, cpu);
2806 if (ptq && ptq->sync_switch) {
2808 switch (ptq->switch_state) {
2809 case INTEL_PT_SS_NOT_TRACING:
2810 case INTEL_PT_SS_UNKNOWN:
2811 case INTEL_PT_SS_TRACING:
2813 case INTEL_PT_SS_EXPECTING_SWITCH_EVENT:
2814 case INTEL_PT_SS_EXPECTING_SWITCH_IP:
2815 ptq->switch_state = INTEL_PT_SS_TRACING;
2824 * If the current tid has not been updated yet, ensure it is now that
2825 * a "switch in" event has occurred.
2827 if (machine__get_current_tid(pt->machine, cpu) == tid)
2830 return machine__set_current_tid(pt->machine, cpu, pid, tid);
2833 static int intel_pt_context_switch(struct intel_pt *pt, union perf_event *event,
2834 struct perf_sample *sample)
2836 bool out = event->header.misc & PERF_RECORD_MISC_SWITCH_OUT;
2842 if (pt->have_sched_switch == 3) {
2844 return intel_pt_context_switch_in(pt, sample);
2845 if (event->header.type != PERF_RECORD_SWITCH_CPU_WIDE) {
2846 pr_err("Expecting CPU-wide context switch event\n");
2849 pid = event->context_switch.next_prev_pid;
2850 tid = event->context_switch.next_prev_tid;
2859 intel_pt_log("context_switch event has no tid\n");
2861 ret = intel_pt_sync_switch(pt, cpu, tid, sample->time);
2865 return machine__set_current_tid(pt->machine, cpu, pid, tid);
2868 static int intel_pt_process_itrace_start(struct intel_pt *pt,
2869 union perf_event *event,
2870 struct perf_sample *sample)
2872 if (!pt->per_cpu_mmaps)
2875 intel_pt_log("itrace_start: cpu %d pid %d tid %d time %"PRIu64" tsc %#"PRIx64"\n",
2876 sample->cpu, event->itrace_start.pid,
2877 event->itrace_start.tid, sample->time,
2878 perf_time_to_tsc(sample->time, &pt->tc));
2880 return machine__set_current_tid(pt->machine, sample->cpu,
2881 event->itrace_start.pid,
2882 event->itrace_start.tid);
2885 static int intel_pt_find_map(struct thread *thread, u8 cpumode, u64 addr,
2886 struct addr_location *al)
2888 if (!al->map || addr < al->map->start || addr >= al->map->end) {
2889 if (!thread__find_map(thread, cpumode, addr, al))
2896 /* Invalidate all instruction cache entries that overlap the text poke */
2897 static int intel_pt_text_poke(struct intel_pt *pt, union perf_event *event)
2899 u8 cpumode = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK;
2900 u64 addr = event->text_poke.addr + event->text_poke.new_len - 1;
2901 /* Assume text poke begins in a basic block no more than 4096 bytes */
2902 int cnt = 4096 + event->text_poke.new_len;
2903 struct thread *thread = pt->unknown_thread;
2904 struct addr_location al = { .map = NULL };
2905 struct machine *machine = pt->machine;
2906 struct intel_pt_cache_entry *e;
2909 if (!event->text_poke.new_len)
2912 for (; cnt; cnt--, addr--) {
2913 if (intel_pt_find_map(thread, cpumode, addr, &al)) {
2914 if (addr < event->text_poke.addr)
2919 if (!al.map->dso || !al.map->dso->auxtrace_cache)
2922 offset = al.map->map_ip(al.map, addr);
2924 e = intel_pt_cache_lookup(al.map->dso, machine, offset);
2928 if (addr + e->byte_cnt + e->length <= event->text_poke.addr) {
2930 * No overlap. Working backwards there cannot be another
2931 * basic block that overlaps the text poke if there is a
2932 * branch instruction before the text poke address.
2934 if (e->branch != INTEL_PT_BR_NO_BRANCH)
2937 intel_pt_cache_invalidate(al.map->dso, machine, offset);
2938 intel_pt_log("Invalidated instruction cache for %s at %#"PRIx64"\n",
2939 al.map->dso->long_name, addr);
2946 static int intel_pt_process_event(struct perf_session *session,
2947 union perf_event *event,
2948 struct perf_sample *sample,
2949 struct perf_tool *tool)
2951 struct intel_pt *pt = container_of(session->auxtrace, struct intel_pt,
2959 if (!tool->ordered_events) {
2960 pr_err("Intel Processor Trace requires ordered events\n");
2964 if (sample->time && sample->time != (u64)-1)
2965 timestamp = perf_time_to_tsc(sample->time, &pt->tc);
2969 if (timestamp || pt->timeless_decoding) {
2970 err = intel_pt_update_queues(pt);
2975 if (pt->timeless_decoding) {
2976 if (pt->sampling_mode) {
2977 if (sample->aux_sample.size)
2978 err = intel_pt_process_timeless_sample(pt,
2980 } else if (event->header.type == PERF_RECORD_EXIT) {
2981 err = intel_pt_process_timeless_queues(pt,
2985 } else if (timestamp) {
2986 if (!pt->first_timestamp)
2987 intel_pt_first_timestamp(pt, timestamp);
2988 err = intel_pt_process_queues(pt, timestamp);
2993 if (event->header.type == PERF_RECORD_SAMPLE) {
2994 if (pt->synth_opts.add_callchain && !sample->callchain)
2995 intel_pt_add_callchain(pt, sample);
2996 if (pt->synth_opts.add_last_branch && !sample->branch_stack)
2997 intel_pt_add_br_stack(pt, sample);
3000 if (event->header.type == PERF_RECORD_AUX &&
3001 (event->aux.flags & PERF_AUX_FLAG_TRUNCATED) &&
3002 pt->synth_opts.errors) {
3003 err = intel_pt_lost(pt, sample);
3008 if (pt->switch_evsel && event->header.type == PERF_RECORD_SAMPLE)
3009 err = intel_pt_process_switch(pt, sample);
3010 else if (event->header.type == PERF_RECORD_ITRACE_START)
3011 err = intel_pt_process_itrace_start(pt, event, sample);
3012 else if (event->header.type == PERF_RECORD_SWITCH ||
3013 event->header.type == PERF_RECORD_SWITCH_CPU_WIDE)
3014 err = intel_pt_context_switch(pt, event, sample);
3016 if (!err && event->header.type == PERF_RECORD_TEXT_POKE)
3017 err = intel_pt_text_poke(pt, event);
3019 if (intel_pt_enable_logging && intel_pt_log_events(pt, sample->time)) {
3020 intel_pt_log("event %u: cpu %d time %"PRIu64" tsc %#"PRIx64" ",
3021 event->header.type, sample->cpu, sample->time, timestamp);
3022 intel_pt_log_event(event);
3028 static int intel_pt_flush(struct perf_session *session, struct perf_tool *tool)
3030 struct intel_pt *pt = container_of(session->auxtrace, struct intel_pt,
3037 if (!tool->ordered_events)
3040 ret = intel_pt_update_queues(pt);
3044 if (pt->timeless_decoding)
3045 return intel_pt_process_timeless_queues(pt, -1,
3048 return intel_pt_process_queues(pt, MAX_TIMESTAMP);
3051 static void intel_pt_free_events(struct perf_session *session)
3053 struct intel_pt *pt = container_of(session->auxtrace, struct intel_pt,
3055 struct auxtrace_queues *queues = &pt->queues;
3058 for (i = 0; i < queues->nr_queues; i++) {
3059 intel_pt_free_queue(queues->queue_array[i].priv);
3060 queues->queue_array[i].priv = NULL;
3062 intel_pt_log_disable();
3063 auxtrace_queues__free(queues);
3066 static void intel_pt_free(struct perf_session *session)
3068 struct intel_pt *pt = container_of(session->auxtrace, struct intel_pt,
3071 auxtrace_heap__free(&pt->heap);
3072 intel_pt_free_events(session);
3073 session->auxtrace = NULL;
3074 intel_pt_free_vmcs_info(pt);
3075 thread__put(pt->unknown_thread);
3076 addr_filters__exit(&pt->filts);
3079 zfree(&pt->time_ranges);
3083 static bool intel_pt_evsel_is_auxtrace(struct perf_session *session,
3084 struct evsel *evsel)
3086 struct intel_pt *pt = container_of(session->auxtrace, struct intel_pt,
3089 return evsel->core.attr.type == pt->pmu_type;
3092 static int intel_pt_process_auxtrace_event(struct perf_session *session,
3093 union perf_event *event,
3094 struct perf_tool *tool __maybe_unused)
3096 struct intel_pt *pt = container_of(session->auxtrace, struct intel_pt,
3099 if (!pt->data_queued) {
3100 struct auxtrace_buffer *buffer;
3102 int fd = perf_data__fd(session->data);
3105 if (perf_data__is_pipe(session->data)) {
3108 data_offset = lseek(fd, 0, SEEK_CUR);
3109 if (data_offset == -1)
3113 err = auxtrace_queues__add_event(&pt->queues, session, event,
3114 data_offset, &buffer);
3118 /* Dump here now we have copied a piped trace out of the pipe */
3120 if (auxtrace_buffer__get_data(buffer, fd)) {
3121 intel_pt_dump_event(pt, buffer->data,
3123 auxtrace_buffer__put_data(buffer);
3131 static int intel_pt_queue_data(struct perf_session *session,
3132 struct perf_sample *sample,
3133 union perf_event *event, u64 data_offset)
3135 struct intel_pt *pt = container_of(session->auxtrace, struct intel_pt,
3140 return auxtrace_queues__add_event(&pt->queues, session, event,
3144 if (sample->time && sample->time != (u64)-1)
3145 timestamp = perf_time_to_tsc(sample->time, &pt->tc);
3149 return auxtrace_queues__add_sample(&pt->queues, session, sample,
3150 data_offset, timestamp);
3153 struct intel_pt_synth {
3154 struct perf_tool dummy_tool;
3155 struct perf_session *session;
3158 static int intel_pt_event_synth(struct perf_tool *tool,
3159 union perf_event *event,
3160 struct perf_sample *sample __maybe_unused,
3161 struct machine *machine __maybe_unused)
3163 struct intel_pt_synth *intel_pt_synth =
3164 container_of(tool, struct intel_pt_synth, dummy_tool);
3166 return perf_session__deliver_synth_event(intel_pt_synth->session, event,
3170 static int intel_pt_synth_event(struct perf_session *session, const char *name,
3171 struct perf_event_attr *attr, u64 id)
3173 struct intel_pt_synth intel_pt_synth;
3176 pr_debug("Synthesizing '%s' event with id %" PRIu64 " sample type %#" PRIx64 "\n",
3177 name, id, (u64)attr->sample_type);
3179 memset(&intel_pt_synth, 0, sizeof(struct intel_pt_synth));
3180 intel_pt_synth.session = session;
3182 err = perf_event__synthesize_attr(&intel_pt_synth.dummy_tool, attr, 1,
3183 &id, intel_pt_event_synth);
3185 pr_err("%s: failed to synthesize '%s' event type\n",
3191 static void intel_pt_set_event_name(struct evlist *evlist, u64 id,
3194 struct evsel *evsel;
3196 evlist__for_each_entry(evlist, evsel) {
3197 if (evsel->core.id && evsel->core.id[0] == id) {
3199 zfree(&evsel->name);
3200 evsel->name = strdup(name);
3206 static struct evsel *intel_pt_evsel(struct intel_pt *pt,
3207 struct evlist *evlist)
3209 struct evsel *evsel;
3211 evlist__for_each_entry(evlist, evsel) {
3212 if (evsel->core.attr.type == pt->pmu_type && evsel->core.ids)
3219 static int intel_pt_synth_events(struct intel_pt *pt,
3220 struct perf_session *session)
3222 struct evlist *evlist = session->evlist;
3223 struct evsel *evsel = intel_pt_evsel(pt, evlist);
3224 struct perf_event_attr attr;
3229 pr_debug("There are no selected events with Intel Processor Trace data\n");
3233 memset(&attr, 0, sizeof(struct perf_event_attr));
3234 attr.size = sizeof(struct perf_event_attr);
3235 attr.type = PERF_TYPE_HARDWARE;
3236 attr.sample_type = evsel->core.attr.sample_type & PERF_SAMPLE_MASK;
3237 attr.sample_type |= PERF_SAMPLE_IP | PERF_SAMPLE_TID |
3239 if (pt->timeless_decoding)
3240 attr.sample_type &= ~(u64)PERF_SAMPLE_TIME;
3242 attr.sample_type |= PERF_SAMPLE_TIME;
3243 if (!pt->per_cpu_mmaps)
3244 attr.sample_type &= ~(u64)PERF_SAMPLE_CPU;
3245 attr.exclude_user = evsel->core.attr.exclude_user;
3246 attr.exclude_kernel = evsel->core.attr.exclude_kernel;
3247 attr.exclude_hv = evsel->core.attr.exclude_hv;
3248 attr.exclude_host = evsel->core.attr.exclude_host;
3249 attr.exclude_guest = evsel->core.attr.exclude_guest;
3250 attr.sample_id_all = evsel->core.attr.sample_id_all;
3251 attr.read_format = evsel->core.attr.read_format;
3253 id = evsel->core.id[0] + 1000000000;
3257 if (pt->synth_opts.branches) {
3258 attr.config = PERF_COUNT_HW_BRANCH_INSTRUCTIONS;
3259 attr.sample_period = 1;
3260 attr.sample_type |= PERF_SAMPLE_ADDR;
3261 err = intel_pt_synth_event(session, "branches", &attr, id);
3264 pt->sample_branches = true;
3265 pt->branches_sample_type = attr.sample_type;
3266 pt->branches_id = id;
3268 attr.sample_type &= ~(u64)PERF_SAMPLE_ADDR;
3271 if (pt->synth_opts.callchain)
3272 attr.sample_type |= PERF_SAMPLE_CALLCHAIN;
3273 if (pt->synth_opts.last_branch) {
3274 attr.sample_type |= PERF_SAMPLE_BRANCH_STACK;
3276 * We don't use the hardware index, but the sample generation
3277 * code uses the new format branch_stack with this field,
3278 * so the event attributes must indicate that it's present.
3280 attr.branch_sample_type |= PERF_SAMPLE_BRANCH_HW_INDEX;
3283 if (pt->synth_opts.instructions) {
3284 attr.config = PERF_COUNT_HW_INSTRUCTIONS;
3285 if (pt->synth_opts.period_type == PERF_ITRACE_PERIOD_NANOSECS)
3286 attr.sample_period =
3287 intel_pt_ns_to_ticks(pt, pt->synth_opts.period);
3289 attr.sample_period = pt->synth_opts.period;
3290 err = intel_pt_synth_event(session, "instructions", &attr, id);
3293 pt->sample_instructions = true;
3294 pt->instructions_sample_type = attr.sample_type;
3295 pt->instructions_id = id;
3299 attr.sample_type &= ~(u64)PERF_SAMPLE_PERIOD;
3300 attr.sample_period = 1;
3302 if (pt->synth_opts.transactions) {
3303 attr.config = PERF_COUNT_HW_INSTRUCTIONS;
3304 err = intel_pt_synth_event(session, "transactions", &attr, id);
3307 pt->sample_transactions = true;
3308 pt->transactions_sample_type = attr.sample_type;
3309 pt->transactions_id = id;
3310 intel_pt_set_event_name(evlist, id, "transactions");
3314 attr.type = PERF_TYPE_SYNTH;
3315 attr.sample_type |= PERF_SAMPLE_RAW;
3317 if (pt->synth_opts.ptwrites) {
3318 attr.config = PERF_SYNTH_INTEL_PTWRITE;
3319 err = intel_pt_synth_event(session, "ptwrite", &attr, id);
3322 pt->sample_ptwrites = true;
3323 pt->ptwrites_sample_type = attr.sample_type;
3324 pt->ptwrites_id = id;
3325 intel_pt_set_event_name(evlist, id, "ptwrite");
3329 if (pt->synth_opts.pwr_events) {
3330 pt->sample_pwr_events = true;
3331 pt->pwr_events_sample_type = attr.sample_type;
3333 attr.config = PERF_SYNTH_INTEL_CBR;
3334 err = intel_pt_synth_event(session, "cbr", &attr, id);
3338 intel_pt_set_event_name(evlist, id, "cbr");
3341 attr.config = PERF_SYNTH_INTEL_PSB;
3342 err = intel_pt_synth_event(session, "psb", &attr, id);
3346 intel_pt_set_event_name(evlist, id, "psb");
3350 if (pt->synth_opts.pwr_events && (evsel->core.attr.config & 0x10)) {
3351 attr.config = PERF_SYNTH_INTEL_MWAIT;
3352 err = intel_pt_synth_event(session, "mwait", &attr, id);
3356 intel_pt_set_event_name(evlist, id, "mwait");
3359 attr.config = PERF_SYNTH_INTEL_PWRE;
3360 err = intel_pt_synth_event(session, "pwre", &attr, id);
3364 intel_pt_set_event_name(evlist, id, "pwre");
3367 attr.config = PERF_SYNTH_INTEL_EXSTOP;
3368 err = intel_pt_synth_event(session, "exstop", &attr, id);
3372 intel_pt_set_event_name(evlist, id, "exstop");
3375 attr.config = PERF_SYNTH_INTEL_PWRX;
3376 err = intel_pt_synth_event(session, "pwrx", &attr, id);
3380 intel_pt_set_event_name(evlist, id, "pwrx");
3387 static void intel_pt_setup_pebs_events(struct intel_pt *pt)
3389 struct evsel *evsel;
3391 if (!pt->synth_opts.other_events)
3394 evlist__for_each_entry(pt->session->evlist, evsel) {
3395 if (evsel->core.attr.aux_output && evsel->core.id) {
3396 pt->sample_pebs = true;
3397 pt->pebs_evsel = evsel;
3403 static struct evsel *intel_pt_find_sched_switch(struct evlist *evlist)
3405 struct evsel *evsel;
3407 evlist__for_each_entry_reverse(evlist, evsel) {
3408 const char *name = evsel__name(evsel);
3410 if (!strcmp(name, "sched:sched_switch"))
3417 static bool intel_pt_find_switch(struct evlist *evlist)
3419 struct evsel *evsel;
3421 evlist__for_each_entry(evlist, evsel) {
3422 if (evsel->core.attr.context_switch)
3429 static int intel_pt_perf_config(const char *var, const char *value, void *data)
3431 struct intel_pt *pt = data;
3433 if (!strcmp(var, "intel-pt.mispred-all"))
3434 pt->mispred_all = perf_config_bool(var, value);
3436 if (!strcmp(var, "intel-pt.max-loops"))
3437 perf_config_int(&pt->max_loops, var, value);
3442 /* Find least TSC which converts to ns or later */
3443 static u64 intel_pt_tsc_start(u64 ns, struct intel_pt *pt)
3447 tsc = perf_time_to_tsc(ns, &pt->tc);
3450 tm = tsc_to_perf_time(tsc, &pt->tc);
3457 tm = tsc_to_perf_time(++tsc, &pt->tc);
3462 /* Find greatest TSC which converts to ns or earlier */
3463 static u64 intel_pt_tsc_end(u64 ns, struct intel_pt *pt)
3467 tsc = perf_time_to_tsc(ns, &pt->tc);
3470 tm = tsc_to_perf_time(tsc, &pt->tc);
3477 tm = tsc_to_perf_time(--tsc, &pt->tc);
3482 static int intel_pt_setup_time_ranges(struct intel_pt *pt,
3483 struct itrace_synth_opts *opts)
3485 struct perf_time_interval *p = opts->ptime_range;
3486 int n = opts->range_num;
3489 if (!n || !p || pt->timeless_decoding)
3492 pt->time_ranges = calloc(n, sizeof(struct range));
3493 if (!pt->time_ranges)
3498 intel_pt_log("%s: %u range(s)\n", __func__, n);
3500 for (i = 0; i < n; i++) {
3501 struct range *r = &pt->time_ranges[i];
3502 u64 ts = p[i].start;
3506 * Take care to ensure the TSC range matches the perf-time range
3507 * when converted back to perf-time.
3509 r->start = ts ? intel_pt_tsc_start(ts, pt) : 0;
3510 r->end = te ? intel_pt_tsc_end(te, pt) : 0;
3512 intel_pt_log("range %d: perf time interval: %"PRIu64" to %"PRIu64"\n",
3514 intel_pt_log("range %d: TSC time interval: %#"PRIx64" to %#"PRIx64"\n",
3515 i, r->start, r->end);
3521 static int intel_pt_parse_vm_tm_corr_arg(struct intel_pt *pt, char **args)
3523 struct intel_pt_vmcs_info *vmcs_info;
3524 u64 tsc_offset, vmcs;
3533 tsc_offset = strtoull(p, &p, 0);
3538 pt->dflt_tsc_offset = tsc_offset;
3543 vmcs = strtoull(p, &p, 0);
3548 vmcs_info = intel_pt_findnew_vmcs(&pt->vmcs_info, vmcs, tsc_offset);
3560 static int intel_pt_parse_vm_tm_corr_args(struct intel_pt *pt)
3562 char *args = pt->synth_opts.vm_tm_corr_args;
3569 ret = intel_pt_parse_vm_tm_corr_arg(pt, &args);
3573 pr_err("Failed to parse VM Time Correlation options\n");
3580 static const char * const intel_pt_info_fmts[] = {
3581 [INTEL_PT_PMU_TYPE] = " PMU Type %"PRId64"\n",
3582 [INTEL_PT_TIME_SHIFT] = " Time Shift %"PRIu64"\n",
3583 [INTEL_PT_TIME_MULT] = " Time Muliplier %"PRIu64"\n",
3584 [INTEL_PT_TIME_ZERO] = " Time Zero %"PRIu64"\n",
3585 [INTEL_PT_CAP_USER_TIME_ZERO] = " Cap Time Zero %"PRId64"\n",
3586 [INTEL_PT_TSC_BIT] = " TSC bit %#"PRIx64"\n",
3587 [INTEL_PT_NORETCOMP_BIT] = " NoRETComp bit %#"PRIx64"\n",
3588 [INTEL_PT_HAVE_SCHED_SWITCH] = " Have sched_switch %"PRId64"\n",
3589 [INTEL_PT_SNAPSHOT_MODE] = " Snapshot mode %"PRId64"\n",
3590 [INTEL_PT_PER_CPU_MMAPS] = " Per-cpu maps %"PRId64"\n",
3591 [INTEL_PT_MTC_BIT] = " MTC bit %#"PRIx64"\n",
3592 [INTEL_PT_TSC_CTC_N] = " TSC:CTC numerator %"PRIu64"\n",
3593 [INTEL_PT_TSC_CTC_D] = " TSC:CTC denominator %"PRIu64"\n",
3594 [INTEL_PT_CYC_BIT] = " CYC bit %#"PRIx64"\n",
3595 [INTEL_PT_MAX_NONTURBO_RATIO] = " Max non-turbo ratio %"PRIu64"\n",
3596 [INTEL_PT_FILTER_STR_LEN] = " Filter string len. %"PRIu64"\n",
3599 static void intel_pt_print_info(__u64 *arr, int start, int finish)
3606 for (i = start; i <= finish; i++)
3607 fprintf(stdout, intel_pt_info_fmts[i], arr[i]);
3610 static void intel_pt_print_info_str(const char *name, const char *str)
3615 fprintf(stdout, " %-20s%s\n", name, str ? str : "");
3618 static bool intel_pt_has(struct perf_record_auxtrace_info *auxtrace_info, int pos)
3620 return auxtrace_info->header.size >=
3621 sizeof(struct perf_record_auxtrace_info) + (sizeof(u64) * (pos + 1));
3624 int intel_pt_process_auxtrace_info(union perf_event *event,
3625 struct perf_session *session)
3627 struct perf_record_auxtrace_info *auxtrace_info = &event->auxtrace_info;
3628 size_t min_sz = sizeof(u64) * INTEL_PT_PER_CPU_MMAPS;
3629 struct intel_pt *pt;
3634 if (auxtrace_info->header.size < sizeof(struct perf_record_auxtrace_info) +
3638 pt = zalloc(sizeof(struct intel_pt));
3642 pt->vmcs_info = RB_ROOT;
3644 addr_filters__init(&pt->filts);
3646 err = perf_config(intel_pt_perf_config, pt);
3650 err = auxtrace_queues__init(&pt->queues);
3654 intel_pt_log_set_name(INTEL_PT_PMU_NAME);
3656 if (session->itrace_synth_opts->set) {
3657 pt->synth_opts = *session->itrace_synth_opts;
3659 struct itrace_synth_opts *opts = session->itrace_synth_opts;
3661 itrace_synth_opts__set_default(&pt->synth_opts, opts->default_no_sample);
3662 if (!opts->default_no_sample && !opts->inject) {
3663 pt->synth_opts.branches = false;
3664 pt->synth_opts.callchain = true;
3665 pt->synth_opts.add_callchain = true;
3667 pt->synth_opts.thread_stack = opts->thread_stack;
3670 pt->session = session;
3671 pt->machine = &session->machines.host; /* No kvm support */
3672 pt->auxtrace_type = auxtrace_info->type;
3673 pt->pmu_type = auxtrace_info->priv[INTEL_PT_PMU_TYPE];
3674 pt->tc.time_shift = auxtrace_info->priv[INTEL_PT_TIME_SHIFT];
3675 pt->tc.time_mult = auxtrace_info->priv[INTEL_PT_TIME_MULT];
3676 pt->tc.time_zero = auxtrace_info->priv[INTEL_PT_TIME_ZERO];
3677 pt->cap_user_time_zero = auxtrace_info->priv[INTEL_PT_CAP_USER_TIME_ZERO];
3678 pt->tsc_bit = auxtrace_info->priv[INTEL_PT_TSC_BIT];
3679 pt->noretcomp_bit = auxtrace_info->priv[INTEL_PT_NORETCOMP_BIT];
3680 pt->have_sched_switch = auxtrace_info->priv[INTEL_PT_HAVE_SCHED_SWITCH];
3681 pt->snapshot_mode = auxtrace_info->priv[INTEL_PT_SNAPSHOT_MODE];
3682 pt->per_cpu_mmaps = auxtrace_info->priv[INTEL_PT_PER_CPU_MMAPS];
3683 intel_pt_print_info(&auxtrace_info->priv[0], INTEL_PT_PMU_TYPE,
3684 INTEL_PT_PER_CPU_MMAPS);
3686 if (intel_pt_has(auxtrace_info, INTEL_PT_CYC_BIT)) {
3687 pt->mtc_bit = auxtrace_info->priv[INTEL_PT_MTC_BIT];
3688 pt->mtc_freq_bits = auxtrace_info->priv[INTEL_PT_MTC_FREQ_BITS];
3689 pt->tsc_ctc_ratio_n = auxtrace_info->priv[INTEL_PT_TSC_CTC_N];
3690 pt->tsc_ctc_ratio_d = auxtrace_info->priv[INTEL_PT_TSC_CTC_D];
3691 pt->cyc_bit = auxtrace_info->priv[INTEL_PT_CYC_BIT];
3692 intel_pt_print_info(&auxtrace_info->priv[0], INTEL_PT_MTC_BIT,
3696 if (intel_pt_has(auxtrace_info, INTEL_PT_MAX_NONTURBO_RATIO)) {
3697 pt->max_non_turbo_ratio =
3698 auxtrace_info->priv[INTEL_PT_MAX_NONTURBO_RATIO];
3699 intel_pt_print_info(&auxtrace_info->priv[0],
3700 INTEL_PT_MAX_NONTURBO_RATIO,
3701 INTEL_PT_MAX_NONTURBO_RATIO);
3704 info = &auxtrace_info->priv[INTEL_PT_FILTER_STR_LEN] + 1;
3705 info_end = (void *)info + auxtrace_info->header.size;
3707 if (intel_pt_has(auxtrace_info, INTEL_PT_FILTER_STR_LEN)) {
3710 len = auxtrace_info->priv[INTEL_PT_FILTER_STR_LEN];
3711 intel_pt_print_info(&auxtrace_info->priv[0],
3712 INTEL_PT_FILTER_STR_LEN,
3713 INTEL_PT_FILTER_STR_LEN);
3715 const char *filter = (const char *)info;
3717 len = roundup(len + 1, 8);
3719 if ((void *)info > info_end) {
3720 pr_err("%s: bad filter string length\n", __func__);
3722 goto err_free_queues;
3724 pt->filter = memdup(filter, len);
3727 goto err_free_queues;
3729 if (session->header.needs_swap)
3730 mem_bswap_64(pt->filter, len);
3731 if (pt->filter[len - 1]) {
3732 pr_err("%s: filter string not null terminated\n", __func__);
3734 goto err_free_queues;
3736 err = addr_filters__parse_bare_filter(&pt->filts,
3739 goto err_free_queues;
3741 intel_pt_print_info_str("Filter string", pt->filter);
3744 pt->timeless_decoding = intel_pt_timeless_decoding(pt);
3745 if (pt->timeless_decoding && !pt->tc.time_mult)
3746 pt->tc.time_mult = 1;
3747 pt->have_tsc = intel_pt_have_tsc(pt);
3748 pt->sampling_mode = intel_pt_sampling_mode(pt);
3749 pt->est_tsc = !pt->timeless_decoding;
3751 if (pt->synth_opts.vm_time_correlation) {
3752 if (pt->timeless_decoding) {
3753 pr_err("Intel PT has no time information for VM Time Correlation\n");
3755 goto err_free_queues;
3757 if (session->itrace_synth_opts->ptime_range) {
3758 pr_err("Time ranges cannot be specified with VM Time Correlation\n");
3760 goto err_free_queues;
3762 /* Currently TSC Offset is calculated using MTC packets */
3763 if (!intel_pt_have_mtc(pt)) {
3764 pr_err("MTC packets must have been enabled for VM Time Correlation\n");
3766 goto err_free_queues;
3768 err = intel_pt_parse_vm_tm_corr_args(pt);
3770 goto err_free_queues;
3773 pt->unknown_thread = thread__new(999999999, 999999999);
3774 if (!pt->unknown_thread) {
3776 goto err_free_queues;
3780 * Since this thread will not be kept in any rbtree not in a
3781 * list, initialize its list node so that at thread__put() the
3782 * current thread lifetime assumption is kept and we don't segfault
3783 * at list_del_init().
3785 INIT_LIST_HEAD(&pt->unknown_thread->node);
3787 err = thread__set_comm(pt->unknown_thread, "unknown", 0);
3789 goto err_delete_thread;
3790 if (thread__init_maps(pt->unknown_thread, pt->machine)) {
3792 goto err_delete_thread;
3795 pt->auxtrace.process_event = intel_pt_process_event;
3796 pt->auxtrace.process_auxtrace_event = intel_pt_process_auxtrace_event;
3797 pt->auxtrace.queue_data = intel_pt_queue_data;
3798 pt->auxtrace.dump_auxtrace_sample = intel_pt_dump_sample;
3799 pt->auxtrace.flush_events = intel_pt_flush;
3800 pt->auxtrace.free_events = intel_pt_free_events;
3801 pt->auxtrace.free = intel_pt_free;
3802 pt->auxtrace.evsel_is_auxtrace = intel_pt_evsel_is_auxtrace;
3803 session->auxtrace = &pt->auxtrace;
3808 if (pt->have_sched_switch == 1) {
3809 pt->switch_evsel = intel_pt_find_sched_switch(session->evlist);
3810 if (!pt->switch_evsel) {
3811 pr_err("%s: missing sched_switch event\n", __func__);
3813 goto err_delete_thread;
3815 } else if (pt->have_sched_switch == 2 &&
3816 !intel_pt_find_switch(session->evlist)) {
3817 pr_err("%s: missing context_switch attribute flag\n", __func__);
3819 goto err_delete_thread;
3822 if (pt->synth_opts.log)
3823 intel_pt_log_enable();
3825 /* Maximum non-turbo ratio is TSC freq / 100 MHz */
3826 if (pt->tc.time_mult) {
3827 u64 tsc_freq = intel_pt_ns_to_ticks(pt, 1000000000);
3829 if (!pt->max_non_turbo_ratio)
3830 pt->max_non_turbo_ratio =
3831 (tsc_freq + 50000000) / 100000000;
3832 intel_pt_log("TSC frequency %"PRIu64"\n", tsc_freq);
3833 intel_pt_log("Maximum non-turbo ratio %u\n",
3834 pt->max_non_turbo_ratio);
3835 pt->cbr2khz = tsc_freq / pt->max_non_turbo_ratio / 1000;
3838 err = intel_pt_setup_time_ranges(pt, session->itrace_synth_opts);
3840 goto err_delete_thread;
3842 if (pt->synth_opts.calls)
3843 pt->branches_filter |= PERF_IP_FLAG_CALL | PERF_IP_FLAG_ASYNC |
3844 PERF_IP_FLAG_TRACE_END;
3845 if (pt->synth_opts.returns)
3846 pt->branches_filter |= PERF_IP_FLAG_RETURN |
3847 PERF_IP_FLAG_TRACE_BEGIN;
3849 if ((pt->synth_opts.callchain || pt->synth_opts.add_callchain) &&
3850 !symbol_conf.use_callchain) {
3851 symbol_conf.use_callchain = true;
3852 if (callchain_register_param(&callchain_param) < 0) {
3853 symbol_conf.use_callchain = false;
3854 pt->synth_opts.callchain = false;
3855 pt->synth_opts.add_callchain = false;
3859 if (pt->synth_opts.add_callchain) {
3860 err = intel_pt_callchain_init(pt);
3862 goto err_delete_thread;
3865 if (pt->synth_opts.last_branch || pt->synth_opts.add_last_branch) {
3866 pt->br_stack_sz = pt->synth_opts.last_branch_sz;
3867 pt->br_stack_sz_plus = pt->br_stack_sz;
3870 if (pt->synth_opts.add_last_branch) {
3871 err = intel_pt_br_stack_init(pt);
3873 goto err_delete_thread;
3875 * Additional branch stack size to cater for tracing from the
3876 * actual sample ip to where the sample time is recorded.
3877 * Measured at about 200 branches, but generously set to 1024.
3878 * If kernel space is not being traced, then add just 1 for the
3879 * branch to kernel space.
3881 if (intel_pt_tracing_kernel(pt))
3882 pt->br_stack_sz_plus += 1024;
3884 pt->br_stack_sz_plus += 1;
3887 pt->use_thread_stack = pt->synth_opts.callchain ||
3888 pt->synth_opts.add_callchain ||
3889 pt->synth_opts.thread_stack ||
3890 pt->synth_opts.last_branch ||
3891 pt->synth_opts.add_last_branch;
3893 pt->callstack = pt->synth_opts.callchain ||
3894 pt->synth_opts.add_callchain ||
3895 pt->synth_opts.thread_stack;
3897 err = intel_pt_synth_events(pt, session);
3899 goto err_delete_thread;
3901 intel_pt_setup_pebs_events(pt);
3903 if (pt->sampling_mode || list_empty(&session->auxtrace_index))
3904 err = auxtrace_queue_data(session, true, true);
3906 err = auxtrace_queues__process_index(&pt->queues, session);
3908 goto err_delete_thread;
3910 if (pt->queues.populated)
3911 pt->data_queued = true;
3913 if (pt->timeless_decoding)
3914 pr_debug2("Intel PT decoding without timestamps\n");
3920 thread__zput(pt->unknown_thread);
3922 intel_pt_log_disable();
3923 auxtrace_queues__free(&pt->queues);
3924 session->auxtrace = NULL;
3926 addr_filters__exit(&pt->filts);
3928 zfree(&pt->time_ranges);