1 // SPDX-License-Identifier: GPL-2.0-only
3 * intel_pt.c: Intel Processor Trace support
4 * Copyright (c) 2013-2015, Intel Corporation.
11 #include <linux/kernel.h>
12 #include <linux/types.h>
13 #include <linux/zalloc.h>
27 #include "thread-stack.h"
29 #include "callchain.h"
36 #include "time-utils.h"
38 #include "../arch/x86/include/uapi/asm/perf_regs.h"
40 #include "intel-pt-decoder/intel-pt-log.h"
41 #include "intel-pt-decoder/intel-pt-decoder.h"
42 #include "intel-pt-decoder/intel-pt-insn-decoder.h"
43 #include "intel-pt-decoder/intel-pt-pkt-decoder.h"
45 #define MAX_TIMESTAMP (~0ULL)
53 struct auxtrace auxtrace;
54 struct auxtrace_queues queues;
55 struct auxtrace_heap heap;
57 struct perf_session *session;
58 struct machine *machine;
59 struct perf_evsel *switch_evsel;
60 struct thread *unknown_thread;
61 bool timeless_decoding;
70 int have_sched_switch;
76 struct perf_tsc_conversion tc;
77 bool cap_user_time_zero;
79 struct itrace_synth_opts synth_opts;
81 bool sample_instructions;
82 u64 instructions_sample_type;
87 u64 branches_sample_type;
90 bool sample_transactions;
91 u64 transactions_sample_type;
95 u64 ptwrites_sample_type;
98 bool sample_pwr_events;
99 u64 pwr_events_sample_type;
107 struct perf_evsel *pebs_evsel;
116 unsigned max_non_turbo_ratio;
119 unsigned long num_events;
122 struct addr_filters filts;
124 struct range *time_ranges;
125 unsigned int range_cnt;
129 INTEL_PT_SS_NOT_TRACING,
132 INTEL_PT_SS_EXPECTING_SWITCH_EVENT,
133 INTEL_PT_SS_EXPECTING_SWITCH_IP,
136 struct intel_pt_queue {
138 unsigned int queue_nr;
139 struct auxtrace_buffer *buffer;
140 struct auxtrace_buffer *old_buffer;
142 const struct intel_pt_state *state;
143 struct ip_callchain *chain;
144 struct branch_stack *last_branch;
145 struct branch_stack *last_branch_rb;
146 size_t last_branch_pos;
147 union perf_event *event_buf;
150 bool step_through_buffers;
151 bool use_buffer_pid_tid;
157 struct thread *thread;
164 unsigned int sel_idx;
170 u64 last_in_insn_cnt;
172 u64 last_br_insn_cnt;
174 unsigned int cbr_seen;
175 char insn[INTEL_PT_INSN_BUF_SZ];
178 static void intel_pt_dump(struct intel_pt *pt __maybe_unused,
179 unsigned char *buf, size_t len)
181 struct intel_pt_pkt packet;
184 char desc[INTEL_PT_PKT_DESC_MAX];
185 const char *color = PERF_COLOR_BLUE;
186 enum intel_pt_pkt_ctx ctx = INTEL_PT_NO_CTX;
188 color_fprintf(stdout, color,
189 ". ... Intel Processor Trace data: size %zu bytes\n",
193 ret = intel_pt_get_packet(buf, len, &packet, &ctx);
199 color_fprintf(stdout, color, " %08x: ", pos);
200 for (i = 0; i < pkt_len; i++)
201 color_fprintf(stdout, color, " %02x", buf[i]);
203 color_fprintf(stdout, color, " ");
205 ret = intel_pt_pkt_desc(&packet, desc,
206 INTEL_PT_PKT_DESC_MAX);
208 color_fprintf(stdout, color, " %s\n", desc);
210 color_fprintf(stdout, color, " Bad packet!\n");
218 static void intel_pt_dump_event(struct intel_pt *pt, unsigned char *buf,
222 intel_pt_dump(pt, buf, len);
225 static void intel_pt_log_event(union perf_event *event)
227 FILE *f = intel_pt_log_fp();
229 if (!intel_pt_enable_logging || !f)
232 perf_event__fprintf(event, f);
235 static int intel_pt_do_fix_overlap(struct intel_pt *pt, struct auxtrace_buffer *a,
236 struct auxtrace_buffer *b)
238 bool consecutive = false;
241 start = intel_pt_find_overlap(a->data, a->size, b->data, b->size,
242 pt->have_tsc, &consecutive);
245 b->use_size = b->data + b->size - start;
247 if (b->use_size && consecutive)
248 b->consecutive = true;
252 static int intel_pt_get_buffer(struct intel_pt_queue *ptq,
253 struct auxtrace_buffer *buffer,
254 struct auxtrace_buffer *old_buffer,
255 struct intel_pt_buffer *b)
260 int fd = perf_data__fd(ptq->pt->session->data);
262 buffer->data = auxtrace_buffer__get_data(buffer, fd);
267 might_overlap = ptq->pt->snapshot_mode || ptq->pt->sampling_mode;
268 if (might_overlap && !buffer->consecutive && old_buffer &&
269 intel_pt_do_fix_overlap(ptq->pt, old_buffer, buffer))
272 if (buffer->use_data) {
273 b->len = buffer->use_size;
274 b->buf = buffer->use_data;
276 b->len = buffer->size;
277 b->buf = buffer->data;
279 b->ref_timestamp = buffer->reference;
281 if (!old_buffer || (might_overlap && !buffer->consecutive)) {
282 b->consecutive = false;
283 b->trace_nr = buffer->buffer_nr + 1;
285 b->consecutive = true;
291 /* Do not drop buffers with references - refer intel_pt_get_trace() */
292 static void intel_pt_lookahead_drop_buffer(struct intel_pt_queue *ptq,
293 struct auxtrace_buffer *buffer)
295 if (!buffer || buffer == ptq->buffer || buffer == ptq->old_buffer)
298 auxtrace_buffer__drop_data(buffer);
301 /* Must be serialized with respect to intel_pt_get_trace() */
302 static int intel_pt_lookahead(void *data, intel_pt_lookahead_cb_t cb,
305 struct intel_pt_queue *ptq = data;
306 struct auxtrace_buffer *buffer = ptq->buffer;
307 struct auxtrace_buffer *old_buffer = ptq->old_buffer;
308 struct auxtrace_queue *queue;
311 queue = &ptq->pt->queues.queue_array[ptq->queue_nr];
314 struct intel_pt_buffer b = { .len = 0 };
316 buffer = auxtrace_buffer__next(queue, buffer);
320 err = intel_pt_get_buffer(ptq, buffer, old_buffer, &b);
325 intel_pt_lookahead_drop_buffer(ptq, old_buffer);
328 intel_pt_lookahead_drop_buffer(ptq, buffer);
332 err = cb(&b, cb_data);
337 if (buffer != old_buffer)
338 intel_pt_lookahead_drop_buffer(ptq, buffer);
339 intel_pt_lookahead_drop_buffer(ptq, old_buffer);
345 * This function assumes data is processed sequentially only.
346 * Must be serialized with respect to intel_pt_lookahead()
348 static int intel_pt_get_trace(struct intel_pt_buffer *b, void *data)
350 struct intel_pt_queue *ptq = data;
351 struct auxtrace_buffer *buffer = ptq->buffer;
352 struct auxtrace_buffer *old_buffer = ptq->old_buffer;
353 struct auxtrace_queue *queue;
361 queue = &ptq->pt->queues.queue_array[ptq->queue_nr];
363 buffer = auxtrace_buffer__next(queue, buffer);
366 auxtrace_buffer__drop_data(old_buffer);
371 ptq->buffer = buffer;
373 err = intel_pt_get_buffer(ptq, buffer, old_buffer, b);
377 if (ptq->step_through_buffers)
382 auxtrace_buffer__drop_data(old_buffer);
383 ptq->old_buffer = buffer;
385 auxtrace_buffer__drop_data(buffer);
386 return intel_pt_get_trace(b, data);
392 struct intel_pt_cache_entry {
393 struct auxtrace_cache_entry entry;
396 enum intel_pt_insn_op op;
397 enum intel_pt_insn_branch branch;
400 char insn[INTEL_PT_INSN_BUF_SZ];
403 static int intel_pt_config_div(const char *var, const char *value, void *data)
408 if (!strcmp(var, "intel-pt.cache-divisor")) {
409 val = strtol(value, NULL, 0);
410 if (val > 0 && val <= INT_MAX)
417 static int intel_pt_cache_divisor(void)
424 perf_config(intel_pt_config_div, &d);
432 static unsigned int intel_pt_cache_size(struct dso *dso,
433 struct machine *machine)
437 size = dso__data_size(dso, machine);
438 size /= intel_pt_cache_divisor();
441 if (size > (1 << 21))
443 return 32 - __builtin_clz(size);
446 static struct auxtrace_cache *intel_pt_cache(struct dso *dso,
447 struct machine *machine)
449 struct auxtrace_cache *c;
452 if (dso->auxtrace_cache)
453 return dso->auxtrace_cache;
455 bits = intel_pt_cache_size(dso, machine);
457 /* Ignoring cache creation failure */
458 c = auxtrace_cache__new(bits, sizeof(struct intel_pt_cache_entry), 200);
460 dso->auxtrace_cache = c;
465 static int intel_pt_cache_add(struct dso *dso, struct machine *machine,
466 u64 offset, u64 insn_cnt, u64 byte_cnt,
467 struct intel_pt_insn *intel_pt_insn)
469 struct auxtrace_cache *c = intel_pt_cache(dso, machine);
470 struct intel_pt_cache_entry *e;
476 e = auxtrace_cache__alloc_entry(c);
480 e->insn_cnt = insn_cnt;
481 e->byte_cnt = byte_cnt;
482 e->op = intel_pt_insn->op;
483 e->branch = intel_pt_insn->branch;
484 e->length = intel_pt_insn->length;
485 e->rel = intel_pt_insn->rel;
486 memcpy(e->insn, intel_pt_insn->buf, INTEL_PT_INSN_BUF_SZ);
488 err = auxtrace_cache__add(c, offset, &e->entry);
490 auxtrace_cache__free_entry(c, e);
495 static struct intel_pt_cache_entry *
496 intel_pt_cache_lookup(struct dso *dso, struct machine *machine, u64 offset)
498 struct auxtrace_cache *c = intel_pt_cache(dso, machine);
503 return auxtrace_cache__lookup(dso->auxtrace_cache, offset);
506 static inline u8 intel_pt_cpumode(struct intel_pt *pt, uint64_t ip)
508 return ip >= pt->kernel_start ?
509 PERF_RECORD_MISC_KERNEL :
510 PERF_RECORD_MISC_USER;
513 static int intel_pt_walk_next_insn(struct intel_pt_insn *intel_pt_insn,
514 uint64_t *insn_cnt_ptr, uint64_t *ip,
515 uint64_t to_ip, uint64_t max_insn_cnt,
518 struct intel_pt_queue *ptq = data;
519 struct machine *machine = ptq->pt->machine;
520 struct thread *thread;
521 struct addr_location al;
522 unsigned char buf[INTEL_PT_INSN_BUF_SZ];
526 u64 offset, start_offset, start_ip;
530 intel_pt_insn->length = 0;
532 if (to_ip && *ip == to_ip)
535 cpumode = intel_pt_cpumode(ptq->pt, *ip);
537 thread = ptq->thread;
539 if (cpumode != PERF_RECORD_MISC_KERNEL)
541 thread = ptq->pt->unknown_thread;
545 if (!thread__find_map(thread, cpumode, *ip, &al) || !al.map->dso)
548 if (al.map->dso->data.status == DSO_DATA_STATUS_ERROR &&
549 dso__data_status_seen(al.map->dso,
550 DSO_DATA_STATUS_SEEN_ITRACE))
553 offset = al.map->map_ip(al.map, *ip);
555 if (!to_ip && one_map) {
556 struct intel_pt_cache_entry *e;
558 e = intel_pt_cache_lookup(al.map->dso, machine, offset);
560 (!max_insn_cnt || e->insn_cnt <= max_insn_cnt)) {
561 *insn_cnt_ptr = e->insn_cnt;
563 intel_pt_insn->op = e->op;
564 intel_pt_insn->branch = e->branch;
565 intel_pt_insn->length = e->length;
566 intel_pt_insn->rel = e->rel;
567 memcpy(intel_pt_insn->buf, e->insn,
568 INTEL_PT_INSN_BUF_SZ);
569 intel_pt_log_insn_no_data(intel_pt_insn, *ip);
574 start_offset = offset;
577 /* Load maps to ensure dso->is_64_bit has been updated */
580 x86_64 = al.map->dso->is_64_bit;
583 len = dso__data_read_offset(al.map->dso, machine,
585 INTEL_PT_INSN_BUF_SZ);
589 if (intel_pt_get_insn(buf, len, x86_64, intel_pt_insn))
592 intel_pt_log_insn(intel_pt_insn, *ip);
596 if (intel_pt_insn->branch != INTEL_PT_BR_NO_BRANCH)
599 if (max_insn_cnt && insn_cnt >= max_insn_cnt)
602 *ip += intel_pt_insn->length;
604 if (to_ip && *ip == to_ip)
607 if (*ip >= al.map->end)
610 offset += intel_pt_insn->length;
615 *insn_cnt_ptr = insn_cnt;
621 * Didn't lookup in the 'to_ip' case, so do it now to prevent duplicate
625 struct intel_pt_cache_entry *e;
627 e = intel_pt_cache_lookup(al.map->dso, machine, start_offset);
632 /* Ignore cache errors */
633 intel_pt_cache_add(al.map->dso, machine, start_offset, insn_cnt,
634 *ip - start_ip, intel_pt_insn);
639 *insn_cnt_ptr = insn_cnt;
643 static bool intel_pt_match_pgd_ip(struct intel_pt *pt, uint64_t ip,
644 uint64_t offset, const char *filename)
646 struct addr_filter *filt;
647 bool have_filter = false;
648 bool hit_tracestop = false;
649 bool hit_filter = false;
651 list_for_each_entry(filt, &pt->filts.head, list) {
655 if ((filename && !filt->filename) ||
656 (!filename && filt->filename) ||
657 (filename && strcmp(filename, filt->filename)))
660 if (!(offset >= filt->addr && offset < filt->addr + filt->size))
663 intel_pt_log("TIP.PGD ip %#"PRIx64" offset %#"PRIx64" in %s hit filter: %s offset %#"PRIx64" size %#"PRIx64"\n",
664 ip, offset, filename ? filename : "[kernel]",
665 filt->start ? "filter" : "stop",
666 filt->addr, filt->size);
671 hit_tracestop = true;
674 if (!hit_tracestop && !hit_filter)
675 intel_pt_log("TIP.PGD ip %#"PRIx64" offset %#"PRIx64" in %s is not in a filter region\n",
676 ip, offset, filename ? filename : "[kernel]");
678 return hit_tracestop || (have_filter && !hit_filter);
681 static int __intel_pt_pgd_ip(uint64_t ip, void *data)
683 struct intel_pt_queue *ptq = data;
684 struct thread *thread;
685 struct addr_location al;
689 if (ip >= ptq->pt->kernel_start)
690 return intel_pt_match_pgd_ip(ptq->pt, ip, ip, NULL);
692 cpumode = PERF_RECORD_MISC_USER;
694 thread = ptq->thread;
698 if (!thread__find_map(thread, cpumode, ip, &al) || !al.map->dso)
701 offset = al.map->map_ip(al.map, ip);
703 return intel_pt_match_pgd_ip(ptq->pt, ip, offset,
704 al.map->dso->long_name);
707 static bool intel_pt_pgd_ip(uint64_t ip, void *data)
709 return __intel_pt_pgd_ip(ip, data) > 0;
712 static bool intel_pt_get_config(struct intel_pt *pt,
713 struct perf_event_attr *attr, u64 *config)
715 if (attr->type == pt->pmu_type) {
717 *config = attr->config;
724 static bool intel_pt_exclude_kernel(struct intel_pt *pt)
726 struct perf_evsel *evsel;
728 evlist__for_each_entry(pt->session->evlist, evsel) {
729 if (intel_pt_get_config(pt, &evsel->attr, NULL) &&
730 !evsel->attr.exclude_kernel)
736 static bool intel_pt_return_compression(struct intel_pt *pt)
738 struct perf_evsel *evsel;
741 if (!pt->noretcomp_bit)
744 evlist__for_each_entry(pt->session->evlist, evsel) {
745 if (intel_pt_get_config(pt, &evsel->attr, &config) &&
746 (config & pt->noretcomp_bit))
752 static bool intel_pt_branch_enable(struct intel_pt *pt)
754 struct perf_evsel *evsel;
757 evlist__for_each_entry(pt->session->evlist, evsel) {
758 if (intel_pt_get_config(pt, &evsel->attr, &config) &&
759 (config & 1) && !(config & 0x2000))
765 static unsigned int intel_pt_mtc_period(struct intel_pt *pt)
767 struct perf_evsel *evsel;
771 if (!pt->mtc_freq_bits)
774 for (shift = 0, config = pt->mtc_freq_bits; !(config & 1); shift++)
777 evlist__for_each_entry(pt->session->evlist, evsel) {
778 if (intel_pt_get_config(pt, &evsel->attr, &config))
779 return (config & pt->mtc_freq_bits) >> shift;
784 static bool intel_pt_timeless_decoding(struct intel_pt *pt)
786 struct perf_evsel *evsel;
787 bool timeless_decoding = true;
790 if (!pt->tsc_bit || !pt->cap_user_time_zero)
793 evlist__for_each_entry(pt->session->evlist, evsel) {
794 if (!(evsel->attr.sample_type & PERF_SAMPLE_TIME))
796 if (intel_pt_get_config(pt, &evsel->attr, &config)) {
797 if (config & pt->tsc_bit)
798 timeless_decoding = false;
803 return timeless_decoding;
806 static bool intel_pt_tracing_kernel(struct intel_pt *pt)
808 struct perf_evsel *evsel;
810 evlist__for_each_entry(pt->session->evlist, evsel) {
811 if (intel_pt_get_config(pt, &evsel->attr, NULL) &&
812 !evsel->attr.exclude_kernel)
818 static bool intel_pt_have_tsc(struct intel_pt *pt)
820 struct perf_evsel *evsel;
821 bool have_tsc = false;
827 evlist__for_each_entry(pt->session->evlist, evsel) {
828 if (intel_pt_get_config(pt, &evsel->attr, &config)) {
829 if (config & pt->tsc_bit)
838 static u64 intel_pt_ns_to_ticks(const struct intel_pt *pt, u64 ns)
842 quot = ns / pt->tc.time_mult;
843 rem = ns % pt->tc.time_mult;
844 return (quot << pt->tc.time_shift) + (rem << pt->tc.time_shift) /
848 static struct intel_pt_queue *intel_pt_alloc_queue(struct intel_pt *pt,
849 unsigned int queue_nr)
851 struct intel_pt_params params = { .get_trace = 0, };
852 struct perf_env *env = pt->machine->env;
853 struct intel_pt_queue *ptq;
855 ptq = zalloc(sizeof(struct intel_pt_queue));
859 if (pt->synth_opts.callchain) {
860 size_t sz = sizeof(struct ip_callchain);
862 /* Add 1 to callchain_sz for callchain context */
863 sz += (pt->synth_opts.callchain_sz + 1) * sizeof(u64);
864 ptq->chain = zalloc(sz);
869 if (pt->synth_opts.last_branch) {
870 size_t sz = sizeof(struct branch_stack);
872 sz += pt->synth_opts.last_branch_sz *
873 sizeof(struct branch_entry);
874 ptq->last_branch = zalloc(sz);
875 if (!ptq->last_branch)
877 ptq->last_branch_rb = zalloc(sz);
878 if (!ptq->last_branch_rb)
882 ptq->event_buf = malloc(PERF_SAMPLE_MAX_SIZE);
887 ptq->queue_nr = queue_nr;
888 ptq->exclude_kernel = intel_pt_exclude_kernel(pt);
894 params.get_trace = intel_pt_get_trace;
895 params.walk_insn = intel_pt_walk_next_insn;
896 params.lookahead = intel_pt_lookahead;
898 params.return_compression = intel_pt_return_compression(pt);
899 params.branch_enable = intel_pt_branch_enable(pt);
900 params.max_non_turbo_ratio = pt->max_non_turbo_ratio;
901 params.mtc_period = intel_pt_mtc_period(pt);
902 params.tsc_ctc_ratio_n = pt->tsc_ctc_ratio_n;
903 params.tsc_ctc_ratio_d = pt->tsc_ctc_ratio_d;
905 if (pt->filts.cnt > 0)
906 params.pgd_ip = intel_pt_pgd_ip;
908 if (pt->synth_opts.instructions) {
909 if (pt->synth_opts.period) {
910 switch (pt->synth_opts.period_type) {
911 case PERF_ITRACE_PERIOD_INSTRUCTIONS:
913 INTEL_PT_PERIOD_INSTRUCTIONS;
914 params.period = pt->synth_opts.period;
916 case PERF_ITRACE_PERIOD_TICKS:
917 params.period_type = INTEL_PT_PERIOD_TICKS;
918 params.period = pt->synth_opts.period;
920 case PERF_ITRACE_PERIOD_NANOSECS:
921 params.period_type = INTEL_PT_PERIOD_TICKS;
922 params.period = intel_pt_ns_to_ticks(pt,
923 pt->synth_opts.period);
930 if (!params.period) {
931 params.period_type = INTEL_PT_PERIOD_INSTRUCTIONS;
936 if (env->cpuid && !strncmp(env->cpuid, "GenuineIntel,6,92,", 18))
937 params.flags |= INTEL_PT_FUP_WITH_NLIP;
939 ptq->decoder = intel_pt_decoder_new(¶ms);
946 zfree(&ptq->event_buf);
947 zfree(&ptq->last_branch);
948 zfree(&ptq->last_branch_rb);
954 static void intel_pt_free_queue(void *priv)
956 struct intel_pt_queue *ptq = priv;
960 thread__zput(ptq->thread);
961 intel_pt_decoder_free(ptq->decoder);
962 zfree(&ptq->event_buf);
963 zfree(&ptq->last_branch);
964 zfree(&ptq->last_branch_rb);
969 static void intel_pt_set_pid_tid_cpu(struct intel_pt *pt,
970 struct auxtrace_queue *queue)
972 struct intel_pt_queue *ptq = queue->priv;
974 if (queue->tid == -1 || pt->have_sched_switch) {
975 ptq->tid = machine__get_current_tid(pt->machine, ptq->cpu);
976 thread__zput(ptq->thread);
979 if (!ptq->thread && ptq->tid != -1)
980 ptq->thread = machine__find_thread(pt->machine, -1, ptq->tid);
983 ptq->pid = ptq->thread->pid_;
984 if (queue->cpu == -1)
985 ptq->cpu = ptq->thread->cpu;
989 static void intel_pt_sample_flags(struct intel_pt_queue *ptq)
991 if (ptq->state->flags & INTEL_PT_ABORT_TX) {
992 ptq->flags = PERF_IP_FLAG_BRANCH | PERF_IP_FLAG_TX_ABORT;
993 } else if (ptq->state->flags & INTEL_PT_ASYNC) {
994 if (ptq->state->to_ip)
995 ptq->flags = PERF_IP_FLAG_BRANCH | PERF_IP_FLAG_CALL |
997 PERF_IP_FLAG_INTERRUPT;
999 ptq->flags = PERF_IP_FLAG_BRANCH |
1000 PERF_IP_FLAG_TRACE_END;
1003 if (ptq->state->from_ip)
1004 ptq->flags = intel_pt_insn_type(ptq->state->insn_op);
1006 ptq->flags = PERF_IP_FLAG_BRANCH |
1007 PERF_IP_FLAG_TRACE_BEGIN;
1008 if (ptq->state->flags & INTEL_PT_IN_TX)
1009 ptq->flags |= PERF_IP_FLAG_IN_TX;
1010 ptq->insn_len = ptq->state->insn_len;
1011 memcpy(ptq->insn, ptq->state->insn, INTEL_PT_INSN_BUF_SZ);
1014 if (ptq->state->type & INTEL_PT_TRACE_BEGIN)
1015 ptq->flags |= PERF_IP_FLAG_TRACE_BEGIN;
1016 if (ptq->state->type & INTEL_PT_TRACE_END)
1017 ptq->flags |= PERF_IP_FLAG_TRACE_END;
1020 static void intel_pt_setup_time_range(struct intel_pt *pt,
1021 struct intel_pt_queue *ptq)
1026 ptq->sel_timestamp = pt->time_ranges[0].start;
1029 if (ptq->sel_timestamp) {
1030 ptq->sel_start = true;
1032 ptq->sel_timestamp = pt->time_ranges[0].end;
1033 ptq->sel_start = false;
1037 static int intel_pt_setup_queue(struct intel_pt *pt,
1038 struct auxtrace_queue *queue,
1039 unsigned int queue_nr)
1041 struct intel_pt_queue *ptq = queue->priv;
1043 if (list_empty(&queue->head))
1047 ptq = intel_pt_alloc_queue(pt, queue_nr);
1052 if (queue->cpu != -1)
1053 ptq->cpu = queue->cpu;
1054 ptq->tid = queue->tid;
1056 ptq->cbr_seen = UINT_MAX;
1058 if (pt->sampling_mode && !pt->snapshot_mode &&
1059 pt->timeless_decoding)
1060 ptq->step_through_buffers = true;
1062 ptq->sync_switch = pt->sync_switch;
1064 intel_pt_setup_time_range(pt, ptq);
1067 if (!ptq->on_heap &&
1068 (!ptq->sync_switch ||
1069 ptq->switch_state != INTEL_PT_SS_EXPECTING_SWITCH_EVENT)) {
1070 const struct intel_pt_state *state;
1073 if (pt->timeless_decoding)
1076 intel_pt_log("queue %u getting timestamp\n", queue_nr);
1077 intel_pt_log("queue %u decoding cpu %d pid %d tid %d\n",
1078 queue_nr, ptq->cpu, ptq->pid, ptq->tid);
1080 if (ptq->sel_start && ptq->sel_timestamp) {
1081 ret = intel_pt_fast_forward(ptq->decoder,
1082 ptq->sel_timestamp);
1088 state = intel_pt_decode(ptq->decoder);
1090 if (state->err == INTEL_PT_ERR_NODATA) {
1091 intel_pt_log("queue %u has no timestamp\n",
1097 if (state->timestamp)
1101 ptq->timestamp = state->timestamp;
1102 intel_pt_log("queue %u timestamp 0x%" PRIx64 "\n",
1103 queue_nr, ptq->timestamp);
1105 ptq->have_sample = true;
1106 if (ptq->sel_start && ptq->sel_timestamp &&
1107 ptq->timestamp < ptq->sel_timestamp)
1108 ptq->have_sample = false;
1109 intel_pt_sample_flags(ptq);
1110 ret = auxtrace_heap__add(&pt->heap, queue_nr, ptq->timestamp);
1113 ptq->on_heap = true;
1119 static int intel_pt_setup_queues(struct intel_pt *pt)
1124 for (i = 0; i < pt->queues.nr_queues; i++) {
1125 ret = intel_pt_setup_queue(pt, &pt->queues.queue_array[i], i);
1132 static inline void intel_pt_copy_last_branch_rb(struct intel_pt_queue *ptq)
1134 struct branch_stack *bs_src = ptq->last_branch_rb;
1135 struct branch_stack *bs_dst = ptq->last_branch;
1138 bs_dst->nr = bs_src->nr;
1143 nr = ptq->pt->synth_opts.last_branch_sz - ptq->last_branch_pos;
1144 memcpy(&bs_dst->entries[0],
1145 &bs_src->entries[ptq->last_branch_pos],
1146 sizeof(struct branch_entry) * nr);
1148 if (bs_src->nr >= ptq->pt->synth_opts.last_branch_sz) {
1149 memcpy(&bs_dst->entries[nr],
1150 &bs_src->entries[0],
1151 sizeof(struct branch_entry) * ptq->last_branch_pos);
1155 static inline void intel_pt_reset_last_branch_rb(struct intel_pt_queue *ptq)
1157 ptq->last_branch_pos = 0;
1158 ptq->last_branch_rb->nr = 0;
1161 static void intel_pt_update_last_branch_rb(struct intel_pt_queue *ptq)
1163 const struct intel_pt_state *state = ptq->state;
1164 struct branch_stack *bs = ptq->last_branch_rb;
1165 struct branch_entry *be;
1167 if (!ptq->last_branch_pos)
1168 ptq->last_branch_pos = ptq->pt->synth_opts.last_branch_sz;
1170 ptq->last_branch_pos -= 1;
1172 be = &bs->entries[ptq->last_branch_pos];
1173 be->from = state->from_ip;
1174 be->to = state->to_ip;
1175 be->flags.abort = !!(state->flags & INTEL_PT_ABORT_TX);
1176 be->flags.in_tx = !!(state->flags & INTEL_PT_IN_TX);
1177 /* No support for mispredict */
1178 be->flags.mispred = ptq->pt->mispred_all;
1180 if (bs->nr < ptq->pt->synth_opts.last_branch_sz)
1184 static inline bool intel_pt_skip_event(struct intel_pt *pt)
1186 return pt->synth_opts.initial_skip &&
1187 pt->num_events++ < pt->synth_opts.initial_skip;
1191 * Cannot count CBR as skipped because it won't go away until cbr == cbr_seen.
1192 * Also ensure CBR is first non-skipped event by allowing for 4 more samples
1193 * from this decoder state.
1195 static inline bool intel_pt_skip_cbr_event(struct intel_pt *pt)
1197 return pt->synth_opts.initial_skip &&
1198 pt->num_events + 4 < pt->synth_opts.initial_skip;
1201 static void intel_pt_prep_a_sample(struct intel_pt_queue *ptq,
1202 union perf_event *event,
1203 struct perf_sample *sample)
1205 event->sample.header.type = PERF_RECORD_SAMPLE;
1206 event->sample.header.size = sizeof(struct perf_event_header);
1208 sample->pid = ptq->pid;
1209 sample->tid = ptq->tid;
1210 sample->cpu = ptq->cpu;
1211 sample->insn_len = ptq->insn_len;
1212 memcpy(sample->insn, ptq->insn, INTEL_PT_INSN_BUF_SZ);
1215 static void intel_pt_prep_b_sample(struct intel_pt *pt,
1216 struct intel_pt_queue *ptq,
1217 union perf_event *event,
1218 struct perf_sample *sample)
1220 intel_pt_prep_a_sample(ptq, event, sample);
1222 if (!pt->timeless_decoding)
1223 sample->time = tsc_to_perf_time(ptq->timestamp, &pt->tc);
1225 sample->ip = ptq->state->from_ip;
1226 sample->cpumode = intel_pt_cpumode(pt, sample->ip);
1227 sample->addr = ptq->state->to_ip;
1229 sample->flags = ptq->flags;
1231 event->sample.header.misc = sample->cpumode;
1234 static int intel_pt_inject_event(union perf_event *event,
1235 struct perf_sample *sample, u64 type)
1237 event->header.size = perf_event__sample_event_size(sample, type, 0);
1238 return perf_event__synthesize_sample(event, type, 0, sample);
1241 static inline int intel_pt_opt_inject(struct intel_pt *pt,
1242 union perf_event *event,
1243 struct perf_sample *sample, u64 type)
1245 if (!pt->synth_opts.inject)
1248 return intel_pt_inject_event(event, sample, type);
1251 static int intel_pt_deliver_synth_b_event(struct intel_pt *pt,
1252 union perf_event *event,
1253 struct perf_sample *sample, u64 type)
1257 ret = intel_pt_opt_inject(pt, event, sample, type);
1261 ret = perf_session__deliver_synth_event(pt->session, event, sample);
1263 pr_err("Intel PT: failed to deliver event, error %d\n", ret);
1268 static int intel_pt_synth_branch_sample(struct intel_pt_queue *ptq)
1270 struct intel_pt *pt = ptq->pt;
1271 union perf_event *event = ptq->event_buf;
1272 struct perf_sample sample = { .ip = 0, };
1273 struct dummy_branch_stack {
1275 struct branch_entry entries;
1278 if (pt->branches_filter && !(pt->branches_filter & ptq->flags))
1281 if (intel_pt_skip_event(pt))
1284 intel_pt_prep_b_sample(pt, ptq, event, &sample);
1286 sample.id = ptq->pt->branches_id;
1287 sample.stream_id = ptq->pt->branches_id;
1290 * perf report cannot handle events without a branch stack when using
1291 * SORT_MODE__BRANCH so make a dummy one.
1293 if (pt->synth_opts.last_branch && sort__mode == SORT_MODE__BRANCH) {
1294 dummy_bs = (struct dummy_branch_stack){
1301 sample.branch_stack = (struct branch_stack *)&dummy_bs;
1304 sample.cyc_cnt = ptq->ipc_cyc_cnt - ptq->last_br_cyc_cnt;
1305 if (sample.cyc_cnt) {
1306 sample.insn_cnt = ptq->ipc_insn_cnt - ptq->last_br_insn_cnt;
1307 ptq->last_br_insn_cnt = ptq->ipc_insn_cnt;
1308 ptq->last_br_cyc_cnt = ptq->ipc_cyc_cnt;
1311 return intel_pt_deliver_synth_b_event(pt, event, &sample,
1312 pt->branches_sample_type);
1315 static void intel_pt_prep_sample(struct intel_pt *pt,
1316 struct intel_pt_queue *ptq,
1317 union perf_event *event,
1318 struct perf_sample *sample)
1320 intel_pt_prep_b_sample(pt, ptq, event, sample);
1322 if (pt->synth_opts.callchain) {
1323 thread_stack__sample(ptq->thread, ptq->cpu, ptq->chain,
1324 pt->synth_opts.callchain_sz + 1,
1325 sample->ip, pt->kernel_start);
1326 sample->callchain = ptq->chain;
1329 if (pt->synth_opts.last_branch) {
1330 intel_pt_copy_last_branch_rb(ptq);
1331 sample->branch_stack = ptq->last_branch;
1335 static inline int intel_pt_deliver_synth_event(struct intel_pt *pt,
1336 struct intel_pt_queue *ptq,
1337 union perf_event *event,
1338 struct perf_sample *sample,
1343 ret = intel_pt_deliver_synth_b_event(pt, event, sample, type);
1345 if (pt->synth_opts.last_branch)
1346 intel_pt_reset_last_branch_rb(ptq);
1351 static int intel_pt_synth_instruction_sample(struct intel_pt_queue *ptq)
1353 struct intel_pt *pt = ptq->pt;
1354 union perf_event *event = ptq->event_buf;
1355 struct perf_sample sample = { .ip = 0, };
1357 if (intel_pt_skip_event(pt))
1360 intel_pt_prep_sample(pt, ptq, event, &sample);
1362 sample.id = ptq->pt->instructions_id;
1363 sample.stream_id = ptq->pt->instructions_id;
1364 sample.period = ptq->state->tot_insn_cnt - ptq->last_insn_cnt;
1366 sample.cyc_cnt = ptq->ipc_cyc_cnt - ptq->last_in_cyc_cnt;
1367 if (sample.cyc_cnt) {
1368 sample.insn_cnt = ptq->ipc_insn_cnt - ptq->last_in_insn_cnt;
1369 ptq->last_in_insn_cnt = ptq->ipc_insn_cnt;
1370 ptq->last_in_cyc_cnt = ptq->ipc_cyc_cnt;
1373 ptq->last_insn_cnt = ptq->state->tot_insn_cnt;
1375 return intel_pt_deliver_synth_event(pt, ptq, event, &sample,
1376 pt->instructions_sample_type);
1379 static int intel_pt_synth_transaction_sample(struct intel_pt_queue *ptq)
1381 struct intel_pt *pt = ptq->pt;
1382 union perf_event *event = ptq->event_buf;
1383 struct perf_sample sample = { .ip = 0, };
1385 if (intel_pt_skip_event(pt))
1388 intel_pt_prep_sample(pt, ptq, event, &sample);
1390 sample.id = ptq->pt->transactions_id;
1391 sample.stream_id = ptq->pt->transactions_id;
1393 return intel_pt_deliver_synth_event(pt, ptq, event, &sample,
1394 pt->transactions_sample_type);
1397 static void intel_pt_prep_p_sample(struct intel_pt *pt,
1398 struct intel_pt_queue *ptq,
1399 union perf_event *event,
1400 struct perf_sample *sample)
1402 intel_pt_prep_sample(pt, ptq, event, sample);
1405 * Zero IP is used to mean "trace start" but that is not the case for
1406 * power or PTWRITE events with no IP, so clear the flags.
1412 static int intel_pt_synth_ptwrite_sample(struct intel_pt_queue *ptq)
1414 struct intel_pt *pt = ptq->pt;
1415 union perf_event *event = ptq->event_buf;
1416 struct perf_sample sample = { .ip = 0, };
1417 struct perf_synth_intel_ptwrite raw;
1419 if (intel_pt_skip_event(pt))
1422 intel_pt_prep_p_sample(pt, ptq, event, &sample);
1424 sample.id = ptq->pt->ptwrites_id;
1425 sample.stream_id = ptq->pt->ptwrites_id;
1428 raw.ip = !!(ptq->state->flags & INTEL_PT_FUP_IP);
1429 raw.payload = cpu_to_le64(ptq->state->ptw_payload);
1431 sample.raw_size = perf_synth__raw_size(raw);
1432 sample.raw_data = perf_synth__raw_data(&raw);
1434 return intel_pt_deliver_synth_event(pt, ptq, event, &sample,
1435 pt->ptwrites_sample_type);
1438 static int intel_pt_synth_cbr_sample(struct intel_pt_queue *ptq)
1440 struct intel_pt *pt = ptq->pt;
1441 union perf_event *event = ptq->event_buf;
1442 struct perf_sample sample = { .ip = 0, };
1443 struct perf_synth_intel_cbr raw;
1446 if (intel_pt_skip_cbr_event(pt))
1449 ptq->cbr_seen = ptq->state->cbr;
1451 intel_pt_prep_p_sample(pt, ptq, event, &sample);
1453 sample.id = ptq->pt->cbr_id;
1454 sample.stream_id = ptq->pt->cbr_id;
1456 flags = (u16)ptq->state->cbr_payload | (pt->max_non_turbo_ratio << 16);
1457 raw.flags = cpu_to_le32(flags);
1458 raw.freq = cpu_to_le32(raw.cbr * pt->cbr2khz);
1461 sample.raw_size = perf_synth__raw_size(raw);
1462 sample.raw_data = perf_synth__raw_data(&raw);
1464 return intel_pt_deliver_synth_event(pt, ptq, event, &sample,
1465 pt->pwr_events_sample_type);
1468 static int intel_pt_synth_mwait_sample(struct intel_pt_queue *ptq)
1470 struct intel_pt *pt = ptq->pt;
1471 union perf_event *event = ptq->event_buf;
1472 struct perf_sample sample = { .ip = 0, };
1473 struct perf_synth_intel_mwait raw;
1475 if (intel_pt_skip_event(pt))
1478 intel_pt_prep_p_sample(pt, ptq, event, &sample);
1480 sample.id = ptq->pt->mwait_id;
1481 sample.stream_id = ptq->pt->mwait_id;
1484 raw.payload = cpu_to_le64(ptq->state->mwait_payload);
1486 sample.raw_size = perf_synth__raw_size(raw);
1487 sample.raw_data = perf_synth__raw_data(&raw);
1489 return intel_pt_deliver_synth_event(pt, ptq, event, &sample,
1490 pt->pwr_events_sample_type);
1493 static int intel_pt_synth_pwre_sample(struct intel_pt_queue *ptq)
1495 struct intel_pt *pt = ptq->pt;
1496 union perf_event *event = ptq->event_buf;
1497 struct perf_sample sample = { .ip = 0, };
1498 struct perf_synth_intel_pwre raw;
1500 if (intel_pt_skip_event(pt))
1503 intel_pt_prep_p_sample(pt, ptq, event, &sample);
1505 sample.id = ptq->pt->pwre_id;
1506 sample.stream_id = ptq->pt->pwre_id;
1509 raw.payload = cpu_to_le64(ptq->state->pwre_payload);
1511 sample.raw_size = perf_synth__raw_size(raw);
1512 sample.raw_data = perf_synth__raw_data(&raw);
1514 return intel_pt_deliver_synth_event(pt, ptq, event, &sample,
1515 pt->pwr_events_sample_type);
1518 static int intel_pt_synth_exstop_sample(struct intel_pt_queue *ptq)
1520 struct intel_pt *pt = ptq->pt;
1521 union perf_event *event = ptq->event_buf;
1522 struct perf_sample sample = { .ip = 0, };
1523 struct perf_synth_intel_exstop raw;
1525 if (intel_pt_skip_event(pt))
1528 intel_pt_prep_p_sample(pt, ptq, event, &sample);
1530 sample.id = ptq->pt->exstop_id;
1531 sample.stream_id = ptq->pt->exstop_id;
1534 raw.ip = !!(ptq->state->flags & INTEL_PT_FUP_IP);
1536 sample.raw_size = perf_synth__raw_size(raw);
1537 sample.raw_data = perf_synth__raw_data(&raw);
1539 return intel_pt_deliver_synth_event(pt, ptq, event, &sample,
1540 pt->pwr_events_sample_type);
1543 static int intel_pt_synth_pwrx_sample(struct intel_pt_queue *ptq)
1545 struct intel_pt *pt = ptq->pt;
1546 union perf_event *event = ptq->event_buf;
1547 struct perf_sample sample = { .ip = 0, };
1548 struct perf_synth_intel_pwrx raw;
1550 if (intel_pt_skip_event(pt))
1553 intel_pt_prep_p_sample(pt, ptq, event, &sample);
1555 sample.id = ptq->pt->pwrx_id;
1556 sample.stream_id = ptq->pt->pwrx_id;
1559 raw.payload = cpu_to_le64(ptq->state->pwrx_payload);
1561 sample.raw_size = perf_synth__raw_size(raw);
1562 sample.raw_data = perf_synth__raw_data(&raw);
1564 return intel_pt_deliver_synth_event(pt, ptq, event, &sample,
1565 pt->pwr_events_sample_type);
1569 * PEBS gp_regs array indexes plus 1 so that 0 means not present. Refer
1570 * intel_pt_add_gp_regs().
1572 static const int pebs_gp_regs[] = {
1573 [PERF_REG_X86_FLAGS] = 1,
1574 [PERF_REG_X86_IP] = 2,
1575 [PERF_REG_X86_AX] = 3,
1576 [PERF_REG_X86_CX] = 4,
1577 [PERF_REG_X86_DX] = 5,
1578 [PERF_REG_X86_BX] = 6,
1579 [PERF_REG_X86_SP] = 7,
1580 [PERF_REG_X86_BP] = 8,
1581 [PERF_REG_X86_SI] = 9,
1582 [PERF_REG_X86_DI] = 10,
1583 [PERF_REG_X86_R8] = 11,
1584 [PERF_REG_X86_R9] = 12,
1585 [PERF_REG_X86_R10] = 13,
1586 [PERF_REG_X86_R11] = 14,
1587 [PERF_REG_X86_R12] = 15,
1588 [PERF_REG_X86_R13] = 16,
1589 [PERF_REG_X86_R14] = 17,
1590 [PERF_REG_X86_R15] = 18,
1593 static u64 *intel_pt_add_gp_regs(struct regs_dump *intr_regs, u64 *pos,
1594 const struct intel_pt_blk_items *items,
1597 const u64 *gp_regs = items->val[INTEL_PT_GP_REGS_POS];
1598 u32 mask = items->mask[INTEL_PT_GP_REGS_POS];
1602 for (i = 0, bit = 1; i < PERF_REG_X86_64_MAX; i++, bit <<= 1) {
1603 /* Get the PEBS gp_regs array index */
1604 int n = pebs_gp_regs[i] - 1;
1609 * Add only registers that were requested (i.e. 'regs_mask') and
1610 * that were provided (i.e. 'mask'), and update the resulting
1611 * mask (i.e. 'intr_regs->mask') accordingly.
1613 if (mask & 1 << n && regs_mask & bit) {
1614 intr_regs->mask |= bit;
1615 *pos++ = gp_regs[n];
1622 #ifndef PERF_REG_X86_XMM0
1623 #define PERF_REG_X86_XMM0 32
1626 static void intel_pt_add_xmm(struct regs_dump *intr_regs, u64 *pos,
1627 const struct intel_pt_blk_items *items,
1630 u32 mask = items->has_xmm & (regs_mask >> PERF_REG_X86_XMM0);
1631 const u64 *xmm = items->xmm;
1634 * If there are any XMM registers, then there should be all of them.
1635 * Nevertheless, follow the logic to add only registers that were
1636 * requested (i.e. 'regs_mask') and that were provided (i.e. 'mask'),
1637 * and update the resulting mask (i.e. 'intr_regs->mask') accordingly.
1639 intr_regs->mask |= (u64)mask << PERF_REG_X86_XMM0;
1641 for (; mask; mask >>= 1, xmm++) {
1647 #define LBR_INFO_MISPRED (1ULL << 63)
1648 #define LBR_INFO_IN_TX (1ULL << 62)
1649 #define LBR_INFO_ABORT (1ULL << 61)
1650 #define LBR_INFO_CYCLES 0xffff
1652 /* Refer kernel's intel_pmu_store_pebs_lbrs() */
1653 static u64 intel_pt_lbr_flags(u64 info)
1656 struct branch_flags flags;
1660 .mispred = !!(info & LBR_INFO_MISPRED),
1661 .predicted = !(info & LBR_INFO_MISPRED),
1662 .in_tx = !!(info & LBR_INFO_IN_TX),
1663 .abort = !!(info & LBR_INFO_ABORT),
1664 .cycles = info & LBR_INFO_CYCLES,
1671 static void intel_pt_add_lbrs(struct branch_stack *br_stack,
1672 const struct intel_pt_blk_items *items)
1679 to = &br_stack->entries[0].from;
1681 for (i = INTEL_PT_LBR_0_POS; i <= INTEL_PT_LBR_2_POS; i++) {
1682 u32 mask = items->mask[i];
1683 const u64 *from = items->val[i];
1685 for (; mask; mask >>= 3, from += 3) {
1686 if ((mask & 7) == 7) {
1689 *to++ = intel_pt_lbr_flags(from[2]);
1696 /* INTEL_PT_LBR_0, INTEL_PT_LBR_1 and INTEL_PT_LBR_2 */
1697 #define LBRS_MAX (INTEL_PT_BLK_ITEM_ID_CNT * 3)
1699 static int intel_pt_synth_pebs_sample(struct intel_pt_queue *ptq)
1701 const struct intel_pt_blk_items *items = &ptq->state->items;
1702 struct perf_sample sample = { .ip = 0, };
1703 union perf_event *event = ptq->event_buf;
1704 struct intel_pt *pt = ptq->pt;
1705 struct perf_evsel *evsel = pt->pebs_evsel;
1706 u64 sample_type = evsel->attr.sample_type;
1707 u64 id = evsel->id[0];
1710 if (intel_pt_skip_event(pt))
1713 intel_pt_prep_a_sample(ptq, event, &sample);
1716 sample.stream_id = id;
1718 if (!evsel->attr.freq)
1719 sample.period = evsel->attr.sample_period;
1721 /* No support for non-zero CS base */
1723 sample.ip = items->ip;
1724 else if (items->has_rip)
1725 sample.ip = items->rip;
1727 sample.ip = ptq->state->from_ip;
1729 /* No support for guest mode at this time */
1730 cpumode = sample.ip < ptq->pt->kernel_start ?
1731 PERF_RECORD_MISC_USER :
1732 PERF_RECORD_MISC_KERNEL;
1734 event->sample.header.misc = cpumode | PERF_RECORD_MISC_EXACT_IP;
1736 sample.cpumode = cpumode;
1738 if (sample_type & PERF_SAMPLE_TIME) {
1741 if (items->has_timestamp)
1742 timestamp = items->timestamp;
1743 else if (!pt->timeless_decoding)
1744 timestamp = ptq->timestamp;
1746 sample.time = tsc_to_perf_time(timestamp, &pt->tc);
1749 if (sample_type & PERF_SAMPLE_CALLCHAIN &&
1750 pt->synth_opts.callchain) {
1751 thread_stack__sample(ptq->thread, ptq->cpu, ptq->chain,
1752 pt->synth_opts.callchain_sz, sample.ip,
1754 sample.callchain = ptq->chain;
1757 if (sample_type & PERF_SAMPLE_REGS_INTR &&
1758 items->mask[INTEL_PT_GP_REGS_POS]) {
1759 u64 regs[sizeof(sample.intr_regs.mask)];
1760 u64 regs_mask = evsel->attr.sample_regs_intr;
1763 sample.intr_regs.abi = items->is_32_bit ?
1764 PERF_SAMPLE_REGS_ABI_32 :
1765 PERF_SAMPLE_REGS_ABI_64;
1766 sample.intr_regs.regs = regs;
1768 pos = intel_pt_add_gp_regs(&sample.intr_regs, regs, items, regs_mask);
1770 intel_pt_add_xmm(&sample.intr_regs, pos, items, regs_mask);
1773 if (sample_type & PERF_SAMPLE_BRANCH_STACK) {
1775 struct branch_stack br_stack;
1776 struct branch_entry entries[LBRS_MAX];
1779 if (items->mask[INTEL_PT_LBR_0_POS] ||
1780 items->mask[INTEL_PT_LBR_1_POS] ||
1781 items->mask[INTEL_PT_LBR_2_POS]) {
1782 intel_pt_add_lbrs(&br.br_stack, items);
1783 sample.branch_stack = &br.br_stack;
1784 } else if (pt->synth_opts.last_branch) {
1785 intel_pt_copy_last_branch_rb(ptq);
1786 sample.branch_stack = ptq->last_branch;
1789 sample.branch_stack = &br.br_stack;
1793 if (sample_type & PERF_SAMPLE_ADDR && items->has_mem_access_address)
1794 sample.addr = items->mem_access_address;
1796 if (sample_type & PERF_SAMPLE_WEIGHT) {
1798 * Refer kernel's setup_pebs_adaptive_sample_data() and
1799 * intel_hsw_weight().
1801 if (items->has_mem_access_latency)
1802 sample.weight = items->mem_access_latency;
1803 if (!sample.weight && items->has_tsx_aux_info) {
1804 /* Cycles last block */
1805 sample.weight = (u32)items->tsx_aux_info;
1809 if (sample_type & PERF_SAMPLE_TRANSACTION && items->has_tsx_aux_info) {
1810 u64 ax = items->has_rax ? items->rax : 0;
1811 /* Refer kernel's intel_hsw_transaction() */
1812 u64 txn = (u8)(items->tsx_aux_info >> 32);
1814 /* For RTM XABORTs also log the abort code from AX */
1815 if (txn & PERF_TXN_TRANSACTION && ax & 1)
1816 txn |= ((ax >> 24) & 0xff) << PERF_TXN_ABORT_SHIFT;
1817 sample.transaction = txn;
1820 return intel_pt_deliver_synth_event(pt, ptq, event, &sample, sample_type);
1823 static int intel_pt_synth_error(struct intel_pt *pt, int code, int cpu,
1824 pid_t pid, pid_t tid, u64 ip, u64 timestamp)
1826 union perf_event event;
1827 char msg[MAX_AUXTRACE_ERROR_MSG];
1830 intel_pt__strerror(code, msg, MAX_AUXTRACE_ERROR_MSG);
1832 auxtrace_synth_error(&event.auxtrace_error, PERF_AUXTRACE_ERROR_ITRACE,
1833 code, cpu, pid, tid, ip, msg, timestamp);
1835 err = perf_session__deliver_synth_event(pt->session, &event, NULL);
1837 pr_err("Intel Processor Trace: failed to deliver error event, error %d\n",
1843 static int intel_ptq_synth_error(struct intel_pt_queue *ptq,
1844 const struct intel_pt_state *state)
1846 struct intel_pt *pt = ptq->pt;
1847 u64 tm = ptq->timestamp;
1849 tm = pt->timeless_decoding ? 0 : tsc_to_perf_time(tm, &pt->tc);
1851 return intel_pt_synth_error(pt, state->err, ptq->cpu, ptq->pid,
1852 ptq->tid, state->from_ip, tm);
1855 static int intel_pt_next_tid(struct intel_pt *pt, struct intel_pt_queue *ptq)
1857 struct auxtrace_queue *queue;
1858 pid_t tid = ptq->next_tid;
1864 intel_pt_log("switch: cpu %d tid %d\n", ptq->cpu, tid);
1866 err = machine__set_current_tid(pt->machine, ptq->cpu, -1, tid);
1868 queue = &pt->queues.queue_array[ptq->queue_nr];
1869 intel_pt_set_pid_tid_cpu(pt, queue);
1876 static inline bool intel_pt_is_switch_ip(struct intel_pt_queue *ptq, u64 ip)
1878 struct intel_pt *pt = ptq->pt;
1880 return ip == pt->switch_ip &&
1881 (ptq->flags & PERF_IP_FLAG_BRANCH) &&
1882 !(ptq->flags & (PERF_IP_FLAG_CONDITIONAL | PERF_IP_FLAG_ASYNC |
1883 PERF_IP_FLAG_INTERRUPT | PERF_IP_FLAG_TX_ABORT));
1886 #define INTEL_PT_PWR_EVT (INTEL_PT_MWAIT_OP | INTEL_PT_PWR_ENTRY | \
1887 INTEL_PT_EX_STOP | INTEL_PT_PWR_EXIT)
1889 static int intel_pt_sample(struct intel_pt_queue *ptq)
1891 const struct intel_pt_state *state = ptq->state;
1892 struct intel_pt *pt = ptq->pt;
1895 if (!ptq->have_sample)
1898 ptq->have_sample = false;
1900 if (ptq->state->tot_cyc_cnt > ptq->ipc_cyc_cnt) {
1902 * Cycle count and instruction count only go together to create
1903 * a valid IPC ratio when the cycle count changes.
1905 ptq->ipc_insn_cnt = ptq->state->tot_insn_cnt;
1906 ptq->ipc_cyc_cnt = ptq->state->tot_cyc_cnt;
1910 * Do PEBS first to allow for the possibility that the PEBS timestamp
1911 * precedes the current timestamp.
1913 if (pt->sample_pebs && state->type & INTEL_PT_BLK_ITEMS) {
1914 err = intel_pt_synth_pebs_sample(ptq);
1919 if (pt->sample_pwr_events) {
1920 if (ptq->state->cbr != ptq->cbr_seen) {
1921 err = intel_pt_synth_cbr_sample(ptq);
1925 if (state->type & INTEL_PT_PWR_EVT) {
1926 if (state->type & INTEL_PT_MWAIT_OP) {
1927 err = intel_pt_synth_mwait_sample(ptq);
1931 if (state->type & INTEL_PT_PWR_ENTRY) {
1932 err = intel_pt_synth_pwre_sample(ptq);
1936 if (state->type & INTEL_PT_EX_STOP) {
1937 err = intel_pt_synth_exstop_sample(ptq);
1941 if (state->type & INTEL_PT_PWR_EXIT) {
1942 err = intel_pt_synth_pwrx_sample(ptq);
1949 if (pt->sample_instructions && (state->type & INTEL_PT_INSTRUCTION)) {
1950 err = intel_pt_synth_instruction_sample(ptq);
1955 if (pt->sample_transactions && (state->type & INTEL_PT_TRANSACTION)) {
1956 err = intel_pt_synth_transaction_sample(ptq);
1961 if (pt->sample_ptwrites && (state->type & INTEL_PT_PTW)) {
1962 err = intel_pt_synth_ptwrite_sample(ptq);
1967 if (!(state->type & INTEL_PT_BRANCH))
1970 if (pt->synth_opts.callchain || pt->synth_opts.thread_stack)
1971 thread_stack__event(ptq->thread, ptq->cpu, ptq->flags, state->from_ip,
1972 state->to_ip, ptq->insn_len,
1975 thread_stack__set_trace_nr(ptq->thread, ptq->cpu, state->trace_nr);
1977 if (pt->sample_branches) {
1978 err = intel_pt_synth_branch_sample(ptq);
1983 if (pt->synth_opts.last_branch)
1984 intel_pt_update_last_branch_rb(ptq);
1986 if (!ptq->sync_switch)
1989 if (intel_pt_is_switch_ip(ptq, state->to_ip)) {
1990 switch (ptq->switch_state) {
1991 case INTEL_PT_SS_NOT_TRACING:
1992 case INTEL_PT_SS_UNKNOWN:
1993 case INTEL_PT_SS_EXPECTING_SWITCH_IP:
1994 err = intel_pt_next_tid(pt, ptq);
1997 ptq->switch_state = INTEL_PT_SS_TRACING;
2000 ptq->switch_state = INTEL_PT_SS_EXPECTING_SWITCH_EVENT;
2003 } else if (!state->to_ip) {
2004 ptq->switch_state = INTEL_PT_SS_NOT_TRACING;
2005 } else if (ptq->switch_state == INTEL_PT_SS_NOT_TRACING) {
2006 ptq->switch_state = INTEL_PT_SS_UNKNOWN;
2007 } else if (ptq->switch_state == INTEL_PT_SS_UNKNOWN &&
2008 state->to_ip == pt->ptss_ip &&
2009 (ptq->flags & PERF_IP_FLAG_CALL)) {
2010 ptq->switch_state = INTEL_PT_SS_TRACING;
2016 static u64 intel_pt_switch_ip(struct intel_pt *pt, u64 *ptss_ip)
2018 struct machine *machine = pt->machine;
2020 struct symbol *sym, *start;
2021 u64 ip, switch_ip = 0;
2027 map = machine__kernel_map(machine);
2034 start = dso__first_symbol(map->dso);
2036 for (sym = start; sym; sym = dso__next_symbol(sym)) {
2037 if (sym->binding == STB_GLOBAL &&
2038 !strcmp(sym->name, "__switch_to")) {
2039 ip = map->unmap_ip(map, sym->start);
2040 if (ip >= map->start && ip < map->end) {
2047 if (!switch_ip || !ptss_ip)
2050 if (pt->have_sched_switch == 1)
2051 ptss = "perf_trace_sched_switch";
2053 ptss = "__perf_event_task_sched_out";
2055 for (sym = start; sym; sym = dso__next_symbol(sym)) {
2056 if (!strcmp(sym->name, ptss)) {
2057 ip = map->unmap_ip(map, sym->start);
2058 if (ip >= map->start && ip < map->end) {
2068 static void intel_pt_enable_sync_switch(struct intel_pt *pt)
2072 pt->sync_switch = true;
2074 for (i = 0; i < pt->queues.nr_queues; i++) {
2075 struct auxtrace_queue *queue = &pt->queues.queue_array[i];
2076 struct intel_pt_queue *ptq = queue->priv;
2079 ptq->sync_switch = true;
2084 * To filter against time ranges, it is only necessary to look at the next start
2087 static bool intel_pt_next_time(struct intel_pt_queue *ptq)
2089 struct intel_pt *pt = ptq->pt;
2091 if (ptq->sel_start) {
2092 /* Next time is an end time */
2093 ptq->sel_start = false;
2094 ptq->sel_timestamp = pt->time_ranges[ptq->sel_idx].end;
2096 } else if (ptq->sel_idx + 1 < pt->range_cnt) {
2097 /* Next time is a start time */
2098 ptq->sel_start = true;
2100 ptq->sel_timestamp = pt->time_ranges[ptq->sel_idx].start;
2108 static int intel_pt_time_filter(struct intel_pt_queue *ptq, u64 *ff_timestamp)
2113 if (ptq->sel_start) {
2114 if (ptq->timestamp >= ptq->sel_timestamp) {
2115 /* After start time, so consider next time */
2116 intel_pt_next_time(ptq);
2117 if (!ptq->sel_timestamp) {
2121 /* Check against end time */
2124 /* Before start time, so fast forward */
2125 ptq->have_sample = false;
2126 if (ptq->sel_timestamp > *ff_timestamp) {
2127 if (ptq->sync_switch) {
2128 intel_pt_next_tid(ptq->pt, ptq);
2129 ptq->switch_state = INTEL_PT_SS_UNKNOWN;
2131 *ff_timestamp = ptq->sel_timestamp;
2132 err = intel_pt_fast_forward(ptq->decoder,
2133 ptq->sel_timestamp);
2138 } else if (ptq->timestamp > ptq->sel_timestamp) {
2139 /* After end time, so consider next time */
2140 if (!intel_pt_next_time(ptq)) {
2141 /* No next time range, so stop decoding */
2142 ptq->have_sample = false;
2143 ptq->switch_state = INTEL_PT_SS_NOT_TRACING;
2146 /* Check against next start time */
2149 /* Before end time */
2155 static int intel_pt_run_decoder(struct intel_pt_queue *ptq, u64 *timestamp)
2157 const struct intel_pt_state *state = ptq->state;
2158 struct intel_pt *pt = ptq->pt;
2159 u64 ff_timestamp = 0;
2162 if (!pt->kernel_start) {
2163 pt->kernel_start = machine__kernel_start(pt->machine);
2164 if (pt->per_cpu_mmaps &&
2165 (pt->have_sched_switch == 1 || pt->have_sched_switch == 3) &&
2166 !pt->timeless_decoding && intel_pt_tracing_kernel(pt) &&
2167 !pt->sampling_mode) {
2168 pt->switch_ip = intel_pt_switch_ip(pt, &pt->ptss_ip);
2169 if (pt->switch_ip) {
2170 intel_pt_log("switch_ip: %"PRIx64" ptss_ip: %"PRIx64"\n",
2171 pt->switch_ip, pt->ptss_ip);
2172 intel_pt_enable_sync_switch(pt);
2177 intel_pt_log("queue %u decoding cpu %d pid %d tid %d\n",
2178 ptq->queue_nr, ptq->cpu, ptq->pid, ptq->tid);
2180 err = intel_pt_sample(ptq);
2184 state = intel_pt_decode(ptq->decoder);
2186 if (state->err == INTEL_PT_ERR_NODATA)
2188 if (ptq->sync_switch &&
2189 state->from_ip >= pt->kernel_start) {
2190 ptq->sync_switch = false;
2191 intel_pt_next_tid(pt, ptq);
2193 if (pt->synth_opts.errors) {
2194 err = intel_ptq_synth_error(ptq, state);
2202 ptq->have_sample = true;
2203 intel_pt_sample_flags(ptq);
2205 /* Use estimated TSC upon return to user space */
2207 (state->from_ip >= pt->kernel_start || !state->from_ip) &&
2208 state->to_ip && state->to_ip < pt->kernel_start) {
2209 intel_pt_log("TSC %"PRIx64" est. TSC %"PRIx64"\n",
2210 state->timestamp, state->est_timestamp);
2211 ptq->timestamp = state->est_timestamp;
2212 /* Use estimated TSC in unknown switch state */
2213 } else if (ptq->sync_switch &&
2214 ptq->switch_state == INTEL_PT_SS_UNKNOWN &&
2215 intel_pt_is_switch_ip(ptq, state->to_ip) &&
2216 ptq->next_tid == -1) {
2217 intel_pt_log("TSC %"PRIx64" est. TSC %"PRIx64"\n",
2218 state->timestamp, state->est_timestamp);
2219 ptq->timestamp = state->est_timestamp;
2220 } else if (state->timestamp > ptq->timestamp) {
2221 ptq->timestamp = state->timestamp;
2224 if (ptq->sel_timestamp) {
2225 err = intel_pt_time_filter(ptq, &ff_timestamp);
2230 if (!pt->timeless_decoding && ptq->timestamp >= *timestamp) {
2231 *timestamp = ptq->timestamp;
2238 static inline int intel_pt_update_queues(struct intel_pt *pt)
2240 if (pt->queues.new_data) {
2241 pt->queues.new_data = false;
2242 return intel_pt_setup_queues(pt);
2247 static int intel_pt_process_queues(struct intel_pt *pt, u64 timestamp)
2249 unsigned int queue_nr;
2254 struct auxtrace_queue *queue;
2255 struct intel_pt_queue *ptq;
2257 if (!pt->heap.heap_cnt)
2260 if (pt->heap.heap_array[0].ordinal >= timestamp)
2263 queue_nr = pt->heap.heap_array[0].queue_nr;
2264 queue = &pt->queues.queue_array[queue_nr];
2267 intel_pt_log("queue %u processing 0x%" PRIx64 " to 0x%" PRIx64 "\n",
2268 queue_nr, pt->heap.heap_array[0].ordinal,
2271 auxtrace_heap__pop(&pt->heap);
2273 if (pt->heap.heap_cnt) {
2274 ts = pt->heap.heap_array[0].ordinal + 1;
2281 intel_pt_set_pid_tid_cpu(pt, queue);
2283 ret = intel_pt_run_decoder(ptq, &ts);
2286 auxtrace_heap__add(&pt->heap, queue_nr, ts);
2291 ret = auxtrace_heap__add(&pt->heap, queue_nr, ts);
2295 ptq->on_heap = false;
2302 static int intel_pt_process_timeless_queues(struct intel_pt *pt, pid_t tid,
2305 struct auxtrace_queues *queues = &pt->queues;
2309 for (i = 0; i < queues->nr_queues; i++) {
2310 struct auxtrace_queue *queue = &pt->queues.queue_array[i];
2311 struct intel_pt_queue *ptq = queue->priv;
2313 if (ptq && (tid == -1 || ptq->tid == tid)) {
2315 intel_pt_set_pid_tid_cpu(pt, queue);
2316 intel_pt_run_decoder(ptq, &ts);
2322 static int intel_pt_lost(struct intel_pt *pt, struct perf_sample *sample)
2324 return intel_pt_synth_error(pt, INTEL_PT_ERR_LOST, sample->cpu,
2325 sample->pid, sample->tid, 0, sample->time);
2328 static struct intel_pt_queue *intel_pt_cpu_to_ptq(struct intel_pt *pt, int cpu)
2332 if (cpu < 0 || !pt->queues.nr_queues)
2335 if ((unsigned)cpu >= pt->queues.nr_queues)
2336 i = pt->queues.nr_queues - 1;
2340 if (pt->queues.queue_array[i].cpu == cpu)
2341 return pt->queues.queue_array[i].priv;
2343 for (j = 0; i > 0; j++) {
2344 if (pt->queues.queue_array[--i].cpu == cpu)
2345 return pt->queues.queue_array[i].priv;
2348 for (; j < pt->queues.nr_queues; j++) {
2349 if (pt->queues.queue_array[j].cpu == cpu)
2350 return pt->queues.queue_array[j].priv;
2356 static int intel_pt_sync_switch(struct intel_pt *pt, int cpu, pid_t tid,
2359 struct intel_pt_queue *ptq;
2362 if (!pt->sync_switch)
2365 ptq = intel_pt_cpu_to_ptq(pt, cpu);
2366 if (!ptq || !ptq->sync_switch)
2369 switch (ptq->switch_state) {
2370 case INTEL_PT_SS_NOT_TRACING:
2372 case INTEL_PT_SS_UNKNOWN:
2373 case INTEL_PT_SS_TRACING:
2374 ptq->next_tid = tid;
2375 ptq->switch_state = INTEL_PT_SS_EXPECTING_SWITCH_IP;
2377 case INTEL_PT_SS_EXPECTING_SWITCH_EVENT:
2378 if (!ptq->on_heap) {
2379 ptq->timestamp = perf_time_to_tsc(timestamp,
2381 err = auxtrace_heap__add(&pt->heap, ptq->queue_nr,
2385 ptq->on_heap = true;
2387 ptq->switch_state = INTEL_PT_SS_TRACING;
2389 case INTEL_PT_SS_EXPECTING_SWITCH_IP:
2390 intel_pt_log("ERROR: cpu %d expecting switch ip\n", cpu);
2401 static int intel_pt_process_switch(struct intel_pt *pt,
2402 struct perf_sample *sample)
2404 struct perf_evsel *evsel;
2408 evsel = perf_evlist__id2evsel(pt->session->evlist, sample->id);
2409 if (evsel != pt->switch_evsel)
2412 tid = perf_evsel__intval(evsel, sample, "next_pid");
2415 intel_pt_log("sched_switch: cpu %d tid %d time %"PRIu64" tsc %#"PRIx64"\n",
2416 cpu, tid, sample->time, perf_time_to_tsc(sample->time,
2419 ret = intel_pt_sync_switch(pt, cpu, tid, sample->time);
2423 return machine__set_current_tid(pt->machine, cpu, -1, tid);
2426 static int intel_pt_context_switch_in(struct intel_pt *pt,
2427 struct perf_sample *sample)
2429 pid_t pid = sample->pid;
2430 pid_t tid = sample->tid;
2431 int cpu = sample->cpu;
2433 if (pt->sync_switch) {
2434 struct intel_pt_queue *ptq;
2436 ptq = intel_pt_cpu_to_ptq(pt, cpu);
2437 if (ptq && ptq->sync_switch) {
2439 switch (ptq->switch_state) {
2440 case INTEL_PT_SS_NOT_TRACING:
2441 case INTEL_PT_SS_UNKNOWN:
2442 case INTEL_PT_SS_TRACING:
2444 case INTEL_PT_SS_EXPECTING_SWITCH_EVENT:
2445 case INTEL_PT_SS_EXPECTING_SWITCH_IP:
2446 ptq->switch_state = INTEL_PT_SS_TRACING;
2455 * If the current tid has not been updated yet, ensure it is now that
2456 * a "switch in" event has occurred.
2458 if (machine__get_current_tid(pt->machine, cpu) == tid)
2461 return machine__set_current_tid(pt->machine, cpu, pid, tid);
2464 static int intel_pt_context_switch(struct intel_pt *pt, union perf_event *event,
2465 struct perf_sample *sample)
2467 bool out = event->header.misc & PERF_RECORD_MISC_SWITCH_OUT;
2473 if (pt->have_sched_switch == 3) {
2475 return intel_pt_context_switch_in(pt, sample);
2476 if (event->header.type != PERF_RECORD_SWITCH_CPU_WIDE) {
2477 pr_err("Expecting CPU-wide context switch event\n");
2480 pid = event->context_switch.next_prev_pid;
2481 tid = event->context_switch.next_prev_tid;
2490 pr_err("context_switch event has no tid\n");
2494 intel_pt_log("context_switch: cpu %d pid %d tid %d time %"PRIu64" tsc %#"PRIx64"\n",
2495 cpu, pid, tid, sample->time, perf_time_to_tsc(sample->time,
2498 ret = intel_pt_sync_switch(pt, cpu, tid, sample->time);
2502 return machine__set_current_tid(pt->machine, cpu, pid, tid);
2505 static int intel_pt_process_itrace_start(struct intel_pt *pt,
2506 union perf_event *event,
2507 struct perf_sample *sample)
2509 if (!pt->per_cpu_mmaps)
2512 intel_pt_log("itrace_start: cpu %d pid %d tid %d time %"PRIu64" tsc %#"PRIx64"\n",
2513 sample->cpu, event->itrace_start.pid,
2514 event->itrace_start.tid, sample->time,
2515 perf_time_to_tsc(sample->time, &pt->tc));
2517 return machine__set_current_tid(pt->machine, sample->cpu,
2518 event->itrace_start.pid,
2519 event->itrace_start.tid);
2522 static int intel_pt_process_event(struct perf_session *session,
2523 union perf_event *event,
2524 struct perf_sample *sample,
2525 struct perf_tool *tool)
2527 struct intel_pt *pt = container_of(session->auxtrace, struct intel_pt,
2535 if (!tool->ordered_events) {
2536 pr_err("Intel Processor Trace requires ordered events\n");
2540 if (sample->time && sample->time != (u64)-1)
2541 timestamp = perf_time_to_tsc(sample->time, &pt->tc);
2545 if (timestamp || pt->timeless_decoding) {
2546 err = intel_pt_update_queues(pt);
2551 if (pt->timeless_decoding) {
2552 if (event->header.type == PERF_RECORD_EXIT) {
2553 err = intel_pt_process_timeless_queues(pt,
2557 } else if (timestamp) {
2558 err = intel_pt_process_queues(pt, timestamp);
2563 if (event->header.type == PERF_RECORD_AUX &&
2564 (event->aux.flags & PERF_AUX_FLAG_TRUNCATED) &&
2565 pt->synth_opts.errors) {
2566 err = intel_pt_lost(pt, sample);
2571 if (pt->switch_evsel && event->header.type == PERF_RECORD_SAMPLE)
2572 err = intel_pt_process_switch(pt, sample);
2573 else if (event->header.type == PERF_RECORD_ITRACE_START)
2574 err = intel_pt_process_itrace_start(pt, event, sample);
2575 else if (event->header.type == PERF_RECORD_SWITCH ||
2576 event->header.type == PERF_RECORD_SWITCH_CPU_WIDE)
2577 err = intel_pt_context_switch(pt, event, sample);
2579 intel_pt_log("event %u: cpu %d time %"PRIu64" tsc %#"PRIx64" ",
2580 event->header.type, sample->cpu, sample->time, timestamp);
2581 intel_pt_log_event(event);
2586 static int intel_pt_flush(struct perf_session *session, struct perf_tool *tool)
2588 struct intel_pt *pt = container_of(session->auxtrace, struct intel_pt,
2595 if (!tool->ordered_events)
2598 ret = intel_pt_update_queues(pt);
2602 if (pt->timeless_decoding)
2603 return intel_pt_process_timeless_queues(pt, -1,
2606 return intel_pt_process_queues(pt, MAX_TIMESTAMP);
2609 static void intel_pt_free_events(struct perf_session *session)
2611 struct intel_pt *pt = container_of(session->auxtrace, struct intel_pt,
2613 struct auxtrace_queues *queues = &pt->queues;
2616 for (i = 0; i < queues->nr_queues; i++) {
2617 intel_pt_free_queue(queues->queue_array[i].priv);
2618 queues->queue_array[i].priv = NULL;
2620 intel_pt_log_disable();
2621 auxtrace_queues__free(queues);
2624 static void intel_pt_free(struct perf_session *session)
2626 struct intel_pt *pt = container_of(session->auxtrace, struct intel_pt,
2629 auxtrace_heap__free(&pt->heap);
2630 intel_pt_free_events(session);
2631 session->auxtrace = NULL;
2632 thread__put(pt->unknown_thread);
2633 addr_filters__exit(&pt->filts);
2635 zfree(&pt->time_ranges);
2639 static int intel_pt_process_auxtrace_event(struct perf_session *session,
2640 union perf_event *event,
2641 struct perf_tool *tool __maybe_unused)
2643 struct intel_pt *pt = container_of(session->auxtrace, struct intel_pt,
2646 if (!pt->data_queued) {
2647 struct auxtrace_buffer *buffer;
2649 int fd = perf_data__fd(session->data);
2652 if (perf_data__is_pipe(session->data)) {
2655 data_offset = lseek(fd, 0, SEEK_CUR);
2656 if (data_offset == -1)
2660 err = auxtrace_queues__add_event(&pt->queues, session, event,
2661 data_offset, &buffer);
2665 /* Dump here now we have copied a piped trace out of the pipe */
2667 if (auxtrace_buffer__get_data(buffer, fd)) {
2668 intel_pt_dump_event(pt, buffer->data,
2670 auxtrace_buffer__put_data(buffer);
2678 struct intel_pt_synth {
2679 struct perf_tool dummy_tool;
2680 struct perf_session *session;
2683 static int intel_pt_event_synth(struct perf_tool *tool,
2684 union perf_event *event,
2685 struct perf_sample *sample __maybe_unused,
2686 struct machine *machine __maybe_unused)
2688 struct intel_pt_synth *intel_pt_synth =
2689 container_of(tool, struct intel_pt_synth, dummy_tool);
2691 return perf_session__deliver_synth_event(intel_pt_synth->session, event,
2695 static int intel_pt_synth_event(struct perf_session *session, const char *name,
2696 struct perf_event_attr *attr, u64 id)
2698 struct intel_pt_synth intel_pt_synth;
2701 pr_debug("Synthesizing '%s' event with id %" PRIu64 " sample type %#" PRIx64 "\n",
2702 name, id, (u64)attr->sample_type);
2704 memset(&intel_pt_synth, 0, sizeof(struct intel_pt_synth));
2705 intel_pt_synth.session = session;
2707 err = perf_event__synthesize_attr(&intel_pt_synth.dummy_tool, attr, 1,
2708 &id, intel_pt_event_synth);
2710 pr_err("%s: failed to synthesize '%s' event type\n",
2716 static void intel_pt_set_event_name(struct perf_evlist *evlist, u64 id,
2719 struct perf_evsel *evsel;
2721 evlist__for_each_entry(evlist, evsel) {
2722 if (evsel->id && evsel->id[0] == id) {
2724 zfree(&evsel->name);
2725 evsel->name = strdup(name);
2731 static struct perf_evsel *intel_pt_evsel(struct intel_pt *pt,
2732 struct perf_evlist *evlist)
2734 struct perf_evsel *evsel;
2736 evlist__for_each_entry(evlist, evsel) {
2737 if (evsel->attr.type == pt->pmu_type && evsel->ids)
2744 static int intel_pt_synth_events(struct intel_pt *pt,
2745 struct perf_session *session)
2747 struct perf_evlist *evlist = session->evlist;
2748 struct perf_evsel *evsel = intel_pt_evsel(pt, evlist);
2749 struct perf_event_attr attr;
2754 pr_debug("There are no selected events with Intel Processor Trace data\n");
2758 memset(&attr, 0, sizeof(struct perf_event_attr));
2759 attr.size = sizeof(struct perf_event_attr);
2760 attr.type = PERF_TYPE_HARDWARE;
2761 attr.sample_type = evsel->attr.sample_type & PERF_SAMPLE_MASK;
2762 attr.sample_type |= PERF_SAMPLE_IP | PERF_SAMPLE_TID |
2764 if (pt->timeless_decoding)
2765 attr.sample_type &= ~(u64)PERF_SAMPLE_TIME;
2767 attr.sample_type |= PERF_SAMPLE_TIME;
2768 if (!pt->per_cpu_mmaps)
2769 attr.sample_type &= ~(u64)PERF_SAMPLE_CPU;
2770 attr.exclude_user = evsel->attr.exclude_user;
2771 attr.exclude_kernel = evsel->attr.exclude_kernel;
2772 attr.exclude_hv = evsel->attr.exclude_hv;
2773 attr.exclude_host = evsel->attr.exclude_host;
2774 attr.exclude_guest = evsel->attr.exclude_guest;
2775 attr.sample_id_all = evsel->attr.sample_id_all;
2776 attr.read_format = evsel->attr.read_format;
2778 id = evsel->id[0] + 1000000000;
2782 if (pt->synth_opts.branches) {
2783 attr.config = PERF_COUNT_HW_BRANCH_INSTRUCTIONS;
2784 attr.sample_period = 1;
2785 attr.sample_type |= PERF_SAMPLE_ADDR;
2786 err = intel_pt_synth_event(session, "branches", &attr, id);
2789 pt->sample_branches = true;
2790 pt->branches_sample_type = attr.sample_type;
2791 pt->branches_id = id;
2793 attr.sample_type &= ~(u64)PERF_SAMPLE_ADDR;
2796 if (pt->synth_opts.callchain)
2797 attr.sample_type |= PERF_SAMPLE_CALLCHAIN;
2798 if (pt->synth_opts.last_branch)
2799 attr.sample_type |= PERF_SAMPLE_BRANCH_STACK;
2801 if (pt->synth_opts.instructions) {
2802 attr.config = PERF_COUNT_HW_INSTRUCTIONS;
2803 if (pt->synth_opts.period_type == PERF_ITRACE_PERIOD_NANOSECS)
2804 attr.sample_period =
2805 intel_pt_ns_to_ticks(pt, pt->synth_opts.period);
2807 attr.sample_period = pt->synth_opts.period;
2808 err = intel_pt_synth_event(session, "instructions", &attr, id);
2811 pt->sample_instructions = true;
2812 pt->instructions_sample_type = attr.sample_type;
2813 pt->instructions_id = id;
2817 attr.sample_type &= ~(u64)PERF_SAMPLE_PERIOD;
2818 attr.sample_period = 1;
2820 if (pt->synth_opts.transactions) {
2821 attr.config = PERF_COUNT_HW_INSTRUCTIONS;
2822 err = intel_pt_synth_event(session, "transactions", &attr, id);
2825 pt->sample_transactions = true;
2826 pt->transactions_sample_type = attr.sample_type;
2827 pt->transactions_id = id;
2828 intel_pt_set_event_name(evlist, id, "transactions");
2832 attr.type = PERF_TYPE_SYNTH;
2833 attr.sample_type |= PERF_SAMPLE_RAW;
2835 if (pt->synth_opts.ptwrites) {
2836 attr.config = PERF_SYNTH_INTEL_PTWRITE;
2837 err = intel_pt_synth_event(session, "ptwrite", &attr, id);
2840 pt->sample_ptwrites = true;
2841 pt->ptwrites_sample_type = attr.sample_type;
2842 pt->ptwrites_id = id;
2843 intel_pt_set_event_name(evlist, id, "ptwrite");
2847 if (pt->synth_opts.pwr_events) {
2848 pt->sample_pwr_events = true;
2849 pt->pwr_events_sample_type = attr.sample_type;
2851 attr.config = PERF_SYNTH_INTEL_CBR;
2852 err = intel_pt_synth_event(session, "cbr", &attr, id);
2856 intel_pt_set_event_name(evlist, id, "cbr");
2860 if (pt->synth_opts.pwr_events && (evsel->attr.config & 0x10)) {
2861 attr.config = PERF_SYNTH_INTEL_MWAIT;
2862 err = intel_pt_synth_event(session, "mwait", &attr, id);
2866 intel_pt_set_event_name(evlist, id, "mwait");
2869 attr.config = PERF_SYNTH_INTEL_PWRE;
2870 err = intel_pt_synth_event(session, "pwre", &attr, id);
2874 intel_pt_set_event_name(evlist, id, "pwre");
2877 attr.config = PERF_SYNTH_INTEL_EXSTOP;
2878 err = intel_pt_synth_event(session, "exstop", &attr, id);
2882 intel_pt_set_event_name(evlist, id, "exstop");
2885 attr.config = PERF_SYNTH_INTEL_PWRX;
2886 err = intel_pt_synth_event(session, "pwrx", &attr, id);
2890 intel_pt_set_event_name(evlist, id, "pwrx");
2897 static struct perf_evsel *intel_pt_find_sched_switch(struct perf_evlist *evlist)
2899 struct perf_evsel *evsel;
2901 evlist__for_each_entry_reverse(evlist, evsel) {
2902 const char *name = perf_evsel__name(evsel);
2904 if (!strcmp(name, "sched:sched_switch"))
2911 static bool intel_pt_find_switch(struct perf_evlist *evlist)
2913 struct perf_evsel *evsel;
2915 evlist__for_each_entry(evlist, evsel) {
2916 if (evsel->attr.context_switch)
2923 static int intel_pt_perf_config(const char *var, const char *value, void *data)
2925 struct intel_pt *pt = data;
2927 if (!strcmp(var, "intel-pt.mispred-all"))
2928 pt->mispred_all = perf_config_bool(var, value);
2933 /* Find least TSC which converts to ns or later */
2934 static u64 intel_pt_tsc_start(u64 ns, struct intel_pt *pt)
2938 tsc = perf_time_to_tsc(ns, &pt->tc);
2941 tm = tsc_to_perf_time(tsc, &pt->tc);
2948 tm = tsc_to_perf_time(++tsc, &pt->tc);
2953 /* Find greatest TSC which converts to ns or earlier */
2954 static u64 intel_pt_tsc_end(u64 ns, struct intel_pt *pt)
2958 tsc = perf_time_to_tsc(ns, &pt->tc);
2961 tm = tsc_to_perf_time(tsc, &pt->tc);
2968 tm = tsc_to_perf_time(--tsc, &pt->tc);
2973 static int intel_pt_setup_time_ranges(struct intel_pt *pt,
2974 struct itrace_synth_opts *opts)
2976 struct perf_time_interval *p = opts->ptime_range;
2977 int n = opts->range_num;
2980 if (!n || !p || pt->timeless_decoding)
2983 pt->time_ranges = calloc(n, sizeof(struct range));
2984 if (!pt->time_ranges)
2989 intel_pt_log("%s: %u range(s)\n", __func__, n);
2991 for (i = 0; i < n; i++) {
2992 struct range *r = &pt->time_ranges[i];
2993 u64 ts = p[i].start;
2997 * Take care to ensure the TSC range matches the perf-time range
2998 * when converted back to perf-time.
3000 r->start = ts ? intel_pt_tsc_start(ts, pt) : 0;
3001 r->end = te ? intel_pt_tsc_end(te, pt) : 0;
3003 intel_pt_log("range %d: perf time interval: %"PRIu64" to %"PRIu64"\n",
3005 intel_pt_log("range %d: TSC time interval: %#"PRIx64" to %#"PRIx64"\n",
3006 i, r->start, r->end);
3012 static const char * const intel_pt_info_fmts[] = {
3013 [INTEL_PT_PMU_TYPE] = " PMU Type %"PRId64"\n",
3014 [INTEL_PT_TIME_SHIFT] = " Time Shift %"PRIu64"\n",
3015 [INTEL_PT_TIME_MULT] = " Time Muliplier %"PRIu64"\n",
3016 [INTEL_PT_TIME_ZERO] = " Time Zero %"PRIu64"\n",
3017 [INTEL_PT_CAP_USER_TIME_ZERO] = " Cap Time Zero %"PRId64"\n",
3018 [INTEL_PT_TSC_BIT] = " TSC bit %#"PRIx64"\n",
3019 [INTEL_PT_NORETCOMP_BIT] = " NoRETComp bit %#"PRIx64"\n",
3020 [INTEL_PT_HAVE_SCHED_SWITCH] = " Have sched_switch %"PRId64"\n",
3021 [INTEL_PT_SNAPSHOT_MODE] = " Snapshot mode %"PRId64"\n",
3022 [INTEL_PT_PER_CPU_MMAPS] = " Per-cpu maps %"PRId64"\n",
3023 [INTEL_PT_MTC_BIT] = " MTC bit %#"PRIx64"\n",
3024 [INTEL_PT_TSC_CTC_N] = " TSC:CTC numerator %"PRIu64"\n",
3025 [INTEL_PT_TSC_CTC_D] = " TSC:CTC denominator %"PRIu64"\n",
3026 [INTEL_PT_CYC_BIT] = " CYC bit %#"PRIx64"\n",
3027 [INTEL_PT_MAX_NONTURBO_RATIO] = " Max non-turbo ratio %"PRIu64"\n",
3028 [INTEL_PT_FILTER_STR_LEN] = " Filter string len. %"PRIu64"\n",
3031 static void intel_pt_print_info(u64 *arr, int start, int finish)
3038 for (i = start; i <= finish; i++)
3039 fprintf(stdout, intel_pt_info_fmts[i], arr[i]);
3042 static void intel_pt_print_info_str(const char *name, const char *str)
3047 fprintf(stdout, " %-20s%s\n", name, str ? str : "");
3050 static bool intel_pt_has(struct auxtrace_info_event *auxtrace_info, int pos)
3052 return auxtrace_info->header.size >=
3053 sizeof(struct auxtrace_info_event) + (sizeof(u64) * (pos + 1));
3056 int intel_pt_process_auxtrace_info(union perf_event *event,
3057 struct perf_session *session)
3059 struct auxtrace_info_event *auxtrace_info = &event->auxtrace_info;
3060 size_t min_sz = sizeof(u64) * INTEL_PT_PER_CPU_MMAPS;
3061 struct intel_pt *pt;
3066 if (auxtrace_info->header.size < sizeof(struct auxtrace_info_event) +
3070 pt = zalloc(sizeof(struct intel_pt));
3074 addr_filters__init(&pt->filts);
3076 err = perf_config(intel_pt_perf_config, pt);
3080 err = auxtrace_queues__init(&pt->queues);
3084 intel_pt_log_set_name(INTEL_PT_PMU_NAME);
3086 pt->session = session;
3087 pt->machine = &session->machines.host; /* No kvm support */
3088 pt->auxtrace_type = auxtrace_info->type;
3089 pt->pmu_type = auxtrace_info->priv[INTEL_PT_PMU_TYPE];
3090 pt->tc.time_shift = auxtrace_info->priv[INTEL_PT_TIME_SHIFT];
3091 pt->tc.time_mult = auxtrace_info->priv[INTEL_PT_TIME_MULT];
3092 pt->tc.time_zero = auxtrace_info->priv[INTEL_PT_TIME_ZERO];
3093 pt->cap_user_time_zero = auxtrace_info->priv[INTEL_PT_CAP_USER_TIME_ZERO];
3094 pt->tsc_bit = auxtrace_info->priv[INTEL_PT_TSC_BIT];
3095 pt->noretcomp_bit = auxtrace_info->priv[INTEL_PT_NORETCOMP_BIT];
3096 pt->have_sched_switch = auxtrace_info->priv[INTEL_PT_HAVE_SCHED_SWITCH];
3097 pt->snapshot_mode = auxtrace_info->priv[INTEL_PT_SNAPSHOT_MODE];
3098 pt->per_cpu_mmaps = auxtrace_info->priv[INTEL_PT_PER_CPU_MMAPS];
3099 intel_pt_print_info(&auxtrace_info->priv[0], INTEL_PT_PMU_TYPE,
3100 INTEL_PT_PER_CPU_MMAPS);
3102 if (intel_pt_has(auxtrace_info, INTEL_PT_CYC_BIT)) {
3103 pt->mtc_bit = auxtrace_info->priv[INTEL_PT_MTC_BIT];
3104 pt->mtc_freq_bits = auxtrace_info->priv[INTEL_PT_MTC_FREQ_BITS];
3105 pt->tsc_ctc_ratio_n = auxtrace_info->priv[INTEL_PT_TSC_CTC_N];
3106 pt->tsc_ctc_ratio_d = auxtrace_info->priv[INTEL_PT_TSC_CTC_D];
3107 pt->cyc_bit = auxtrace_info->priv[INTEL_PT_CYC_BIT];
3108 intel_pt_print_info(&auxtrace_info->priv[0], INTEL_PT_MTC_BIT,
3112 if (intel_pt_has(auxtrace_info, INTEL_PT_MAX_NONTURBO_RATIO)) {
3113 pt->max_non_turbo_ratio =
3114 auxtrace_info->priv[INTEL_PT_MAX_NONTURBO_RATIO];
3115 intel_pt_print_info(&auxtrace_info->priv[0],
3116 INTEL_PT_MAX_NONTURBO_RATIO,
3117 INTEL_PT_MAX_NONTURBO_RATIO);
3120 info = &auxtrace_info->priv[INTEL_PT_FILTER_STR_LEN] + 1;
3121 info_end = (void *)info + auxtrace_info->header.size;
3123 if (intel_pt_has(auxtrace_info, INTEL_PT_FILTER_STR_LEN)) {
3126 len = auxtrace_info->priv[INTEL_PT_FILTER_STR_LEN];
3127 intel_pt_print_info(&auxtrace_info->priv[0],
3128 INTEL_PT_FILTER_STR_LEN,
3129 INTEL_PT_FILTER_STR_LEN);
3131 const char *filter = (const char *)info;
3133 len = roundup(len + 1, 8);
3135 if ((void *)info > info_end) {
3136 pr_err("%s: bad filter string length\n", __func__);
3138 goto err_free_queues;
3140 pt->filter = memdup(filter, len);
3143 goto err_free_queues;
3145 if (session->header.needs_swap)
3146 mem_bswap_64(pt->filter, len);
3147 if (pt->filter[len - 1]) {
3148 pr_err("%s: filter string not null terminated\n", __func__);
3150 goto err_free_queues;
3152 err = addr_filters__parse_bare_filter(&pt->filts,
3155 goto err_free_queues;
3157 intel_pt_print_info_str("Filter string", pt->filter);
3160 pt->timeless_decoding = intel_pt_timeless_decoding(pt);
3161 if (pt->timeless_decoding && !pt->tc.time_mult)
3162 pt->tc.time_mult = 1;
3163 pt->have_tsc = intel_pt_have_tsc(pt);
3164 pt->sampling_mode = false;
3165 pt->est_tsc = !pt->timeless_decoding;
3167 pt->unknown_thread = thread__new(999999999, 999999999);
3168 if (!pt->unknown_thread) {
3170 goto err_free_queues;
3174 * Since this thread will not be kept in any rbtree not in a
3175 * list, initialize its list node so that at thread__put() the
3176 * current thread lifetime assuption is kept and we don't segfault
3177 * at list_del_init().
3179 INIT_LIST_HEAD(&pt->unknown_thread->node);
3181 err = thread__set_comm(pt->unknown_thread, "unknown", 0);
3183 goto err_delete_thread;
3184 if (thread__init_map_groups(pt->unknown_thread, pt->machine)) {
3186 goto err_delete_thread;
3189 pt->auxtrace.process_event = intel_pt_process_event;
3190 pt->auxtrace.process_auxtrace_event = intel_pt_process_auxtrace_event;
3191 pt->auxtrace.flush_events = intel_pt_flush;
3192 pt->auxtrace.free_events = intel_pt_free_events;
3193 pt->auxtrace.free = intel_pt_free;
3194 session->auxtrace = &pt->auxtrace;
3199 if (pt->have_sched_switch == 1) {
3200 pt->switch_evsel = intel_pt_find_sched_switch(session->evlist);
3201 if (!pt->switch_evsel) {
3202 pr_err("%s: missing sched_switch event\n", __func__);
3204 goto err_delete_thread;
3206 } else if (pt->have_sched_switch == 2 &&
3207 !intel_pt_find_switch(session->evlist)) {
3208 pr_err("%s: missing context_switch attribute flag\n", __func__);
3210 goto err_delete_thread;
3213 if (session->itrace_synth_opts->set) {
3214 pt->synth_opts = *session->itrace_synth_opts;
3216 itrace_synth_opts__set_default(&pt->synth_opts,
3217 session->itrace_synth_opts->default_no_sample);
3218 if (!session->itrace_synth_opts->default_no_sample &&
3219 !session->itrace_synth_opts->inject) {
3220 pt->synth_opts.branches = false;
3221 pt->synth_opts.callchain = true;
3223 pt->synth_opts.thread_stack =
3224 session->itrace_synth_opts->thread_stack;
3227 if (pt->synth_opts.log)
3228 intel_pt_log_enable();
3230 /* Maximum non-turbo ratio is TSC freq / 100 MHz */
3231 if (pt->tc.time_mult) {
3232 u64 tsc_freq = intel_pt_ns_to_ticks(pt, 1000000000);
3234 if (!pt->max_non_turbo_ratio)
3235 pt->max_non_turbo_ratio =
3236 (tsc_freq + 50000000) / 100000000;
3237 intel_pt_log("TSC frequency %"PRIu64"\n", tsc_freq);
3238 intel_pt_log("Maximum non-turbo ratio %u\n",
3239 pt->max_non_turbo_ratio);
3240 pt->cbr2khz = tsc_freq / pt->max_non_turbo_ratio / 1000;
3243 err = intel_pt_setup_time_ranges(pt, session->itrace_synth_opts);
3245 goto err_delete_thread;
3247 if (pt->synth_opts.calls)
3248 pt->branches_filter |= PERF_IP_FLAG_CALL | PERF_IP_FLAG_ASYNC |
3249 PERF_IP_FLAG_TRACE_END;
3250 if (pt->synth_opts.returns)
3251 pt->branches_filter |= PERF_IP_FLAG_RETURN |
3252 PERF_IP_FLAG_TRACE_BEGIN;
3254 if (pt->synth_opts.callchain && !symbol_conf.use_callchain) {
3255 symbol_conf.use_callchain = true;
3256 if (callchain_register_param(&callchain_param) < 0) {
3257 symbol_conf.use_callchain = false;
3258 pt->synth_opts.callchain = false;
3262 err = intel_pt_synth_events(pt, session);
3264 goto err_delete_thread;
3266 err = auxtrace_queues__process_index(&pt->queues, session);
3268 goto err_delete_thread;
3270 if (pt->queues.populated)
3271 pt->data_queued = true;
3273 if (pt->timeless_decoding)
3274 pr_debug2("Intel PT decoding without timestamps\n");
3279 thread__zput(pt->unknown_thread);
3281 intel_pt_log_disable();
3282 auxtrace_queues__free(&pt->queues);
3283 session->auxtrace = NULL;
3285 addr_filters__exit(&pt->filts);
3287 zfree(&pt->time_ranges);