1 // SPDX-License-Identifier: GPL-2.0-only
3 * intel_pt_decoder.c: Intel Processor Trace support
4 * Copyright (c) 2013-2014, Intel Corporation.
16 #include <linux/compiler.h>
17 #include <linux/string.h>
18 #include <linux/zalloc.h>
20 #include "../auxtrace.h"
22 #include "intel-pt-insn-decoder.h"
23 #include "intel-pt-pkt-decoder.h"
24 #include "intel-pt-decoder.h"
25 #include "intel-pt-log.h"
27 #define BITULL(x) (1ULL << (x))
29 /* IA32_RTIT_CTL MSR bits */
30 #define INTEL_PT_CYC_ENABLE BITULL(1)
31 #define INTEL_PT_CYC_THRESHOLD (BITULL(22) | BITULL(21) | BITULL(20) | BITULL(19))
32 #define INTEL_PT_CYC_THRESHOLD_SHIFT 19
34 #define INTEL_PT_BLK_SIZE 1024
36 #define BIT63 (((uint64_t)1 << 63))
38 #define SEVEN_BYTES 0xffffffffffffffULL
40 #define NO_VMCS 0xffffffffffULL
42 #define INTEL_PT_RETURN 1
44 /* Maximum number of loops with no packets consumed i.e. stuck in a loop */
45 #define INTEL_PT_MAX_LOOPS 10000
48 struct intel_pt_blk *prev;
49 uint64_t ip[INTEL_PT_BLK_SIZE];
52 struct intel_pt_stack {
53 struct intel_pt_blk *blk;
54 struct intel_pt_blk *spare;
58 enum intel_pt_p_once {
59 INTEL_PT_PRT_ONCE_UNK_VMCS,
60 INTEL_PT_PRT_ONCE_ERANGE,
63 enum intel_pt_pkt_state {
64 INTEL_PT_STATE_NO_PSB,
66 INTEL_PT_STATE_ERR_RESYNC,
67 INTEL_PT_STATE_IN_SYNC,
68 INTEL_PT_STATE_TNT_CONT,
71 INTEL_PT_STATE_TIP_PGD,
73 INTEL_PT_STATE_FUP_NO_TIP,
74 INTEL_PT_STATE_FUP_IN_PSB,
75 INTEL_PT_STATE_RESAMPLE,
76 INTEL_PT_STATE_VM_TIME_CORRELATION,
79 static inline bool intel_pt_sample_time(enum intel_pt_pkt_state pkt_state)
82 case INTEL_PT_STATE_NO_PSB:
83 case INTEL_PT_STATE_NO_IP:
84 case INTEL_PT_STATE_ERR_RESYNC:
85 case INTEL_PT_STATE_IN_SYNC:
86 case INTEL_PT_STATE_TNT_CONT:
87 case INTEL_PT_STATE_RESAMPLE:
88 case INTEL_PT_STATE_VM_TIME_CORRELATION:
90 case INTEL_PT_STATE_TNT:
91 case INTEL_PT_STATE_TIP:
92 case INTEL_PT_STATE_TIP_PGD:
93 case INTEL_PT_STATE_FUP:
94 case INTEL_PT_STATE_FUP_NO_TIP:
95 case INTEL_PT_STATE_FUP_IN_PSB:
102 #ifdef INTEL_PT_STRICT
103 #define INTEL_PT_STATE_ERR1 INTEL_PT_STATE_NO_PSB
104 #define INTEL_PT_STATE_ERR2 INTEL_PT_STATE_NO_PSB
105 #define INTEL_PT_STATE_ERR3 INTEL_PT_STATE_NO_PSB
106 #define INTEL_PT_STATE_ERR4 INTEL_PT_STATE_NO_PSB
108 #define INTEL_PT_STATE_ERR1 (decoder->pkt_state)
109 #define INTEL_PT_STATE_ERR2 INTEL_PT_STATE_NO_IP
110 #define INTEL_PT_STATE_ERR3 INTEL_PT_STATE_ERR_RESYNC
111 #define INTEL_PT_STATE_ERR4 INTEL_PT_STATE_IN_SYNC
114 struct intel_pt_decoder {
115 int (*get_trace)(struct intel_pt_buffer *buffer, void *data);
116 int (*walk_insn)(struct intel_pt_insn *intel_pt_insn,
117 uint64_t *insn_cnt_ptr, uint64_t *ip, uint64_t to_ip,
118 uint64_t max_insn_cnt, void *data);
119 bool (*pgd_ip)(uint64_t ip, void *data);
120 int (*lookahead)(void *data, intel_pt_lookahead_cb_t cb, void *cb_data);
121 struct intel_pt_vmcs_info *(*findnew_vmcs_info)(void *data, uint64_t vmcs);
123 struct intel_pt_state state;
124 const unsigned char *buf;
126 bool return_compression;
137 bool vm_time_correlation;
138 bool vm_tm_corr_dry_run;
139 bool vm_tm_corr_reliable;
140 bool vm_tm_corr_same_buf;
141 bool vm_tm_corr_continuous;
144 enum intel_pt_param_flags flags;
148 uint64_t pip_payload;
150 uint64_t tsc_timestamp;
151 uint64_t ref_timestamp;
152 uint64_t buf_timestamp;
153 uint64_t sample_timestamp;
155 uint64_t ctc_timestamp;
158 uint64_t cyc_ref_timestamp;
159 uint64_t first_timestamp;
160 uint64_t last_reliable_timestamp;
165 uint32_t tsc_ctc_ratio_n;
166 uint32_t tsc_ctc_ratio_d;
167 uint32_t tsc_ctc_mult;
169 uint32_t ctc_rem_mask;
171 struct intel_pt_stack stack;
172 enum intel_pt_pkt_state pkt_state;
173 enum intel_pt_pkt_ctx pkt_ctx;
174 enum intel_pt_pkt_ctx prev_pkt_ctx;
175 enum intel_pt_blk_type blk_type;
177 struct intel_pt_pkt packet;
178 struct intel_pt_pkt tnt;
181 int last_packet_type;
183 unsigned int cbr_seen;
184 unsigned int max_non_turbo_ratio;
185 double max_non_turbo_ratio_fp;
186 double cbr_cyc_to_tsc;
187 double calc_cyc_to_tsc;
188 bool have_calc_cyc_to_tsc;
190 unsigned int insn_bytes;
192 enum intel_pt_period_type period_type;
193 uint64_t tot_insn_cnt;
194 uint64_t period_insn_cnt;
195 uint64_t period_mask;
196 uint64_t period_ticks;
197 uint64_t last_masked_timestamp;
198 uint64_t tot_cyc_cnt;
199 uint64_t sample_tot_cyc_cnt;
200 uint64_t base_cyc_cnt;
201 uint64_t cyc_cnt_timestamp;
203 uint64_t cyc_threshold;
205 bool continuous_period;
207 bool set_fup_tx_flags;
214 unsigned int fup_tx_flags;
215 unsigned int tx_flags;
216 uint64_t fup_ptw_payload;
217 uint64_t fup_mwait_payload;
218 uint64_t fup_pwre_payload;
219 uint64_t cbr_payload;
220 uint64_t timestamp_insn_cnt;
221 uint64_t sample_insn_cnt;
227 const unsigned char *next_buf;
229 unsigned char temp_buf[INTEL_PT_PKT_MAX_SZ];
232 static uint64_t intel_pt_lower_power_of_2(uint64_t x)
236 for (i = 0; x != 1; i++)
243 static void p_log(const char *fmt, ...)
249 vsnprintf(buf, sizeof(buf), fmt, args);
252 fprintf(stderr, "%s\n", buf);
253 intel_pt_log("%s\n", buf);
256 static bool intel_pt_print_once(struct intel_pt_decoder *decoder,
257 enum intel_pt_p_once id)
259 uint64_t bit = 1ULL << id;
261 if (decoder->print_once & bit)
263 decoder->print_once |= bit;
267 static uint64_t intel_pt_cyc_threshold(uint64_t ctl)
269 if (!(ctl & INTEL_PT_CYC_ENABLE))
272 return (ctl & INTEL_PT_CYC_THRESHOLD) >> INTEL_PT_CYC_THRESHOLD_SHIFT;
275 static void intel_pt_setup_period(struct intel_pt_decoder *decoder)
277 if (decoder->period_type == INTEL_PT_PERIOD_TICKS) {
280 period = intel_pt_lower_power_of_2(decoder->period);
281 decoder->period_mask = ~(period - 1);
282 decoder->period_ticks = period;
286 static uint64_t multdiv(uint64_t t, uint32_t n, uint32_t d)
290 return (t / d) * n + ((t % d) * n) / d;
293 struct intel_pt_decoder *intel_pt_decoder_new(struct intel_pt_params *params)
295 struct intel_pt_decoder *decoder;
297 if (!params->get_trace || !params->walk_insn)
300 decoder = zalloc(sizeof(struct intel_pt_decoder));
304 decoder->get_trace = params->get_trace;
305 decoder->walk_insn = params->walk_insn;
306 decoder->pgd_ip = params->pgd_ip;
307 decoder->lookahead = params->lookahead;
308 decoder->findnew_vmcs_info = params->findnew_vmcs_info;
309 decoder->data = params->data;
310 decoder->return_compression = params->return_compression;
311 decoder->branch_enable = params->branch_enable;
312 decoder->hop = params->quick >= 1;
313 decoder->leap = params->quick >= 2;
314 decoder->vm_time_correlation = params->vm_time_correlation;
315 decoder->vm_tm_corr_dry_run = params->vm_tm_corr_dry_run;
316 decoder->first_timestamp = params->first_timestamp;
317 decoder->last_reliable_timestamp = params->first_timestamp;
319 decoder->flags = params->flags;
321 decoder->ctl = params->ctl;
322 decoder->period = params->period;
323 decoder->period_type = params->period_type;
325 decoder->max_non_turbo_ratio = params->max_non_turbo_ratio;
326 decoder->max_non_turbo_ratio_fp = params->max_non_turbo_ratio;
328 decoder->cyc_threshold = intel_pt_cyc_threshold(decoder->ctl);
330 intel_pt_setup_period(decoder);
332 decoder->mtc_shift = params->mtc_period;
333 decoder->ctc_rem_mask = (1 << decoder->mtc_shift) - 1;
335 decoder->tsc_ctc_ratio_n = params->tsc_ctc_ratio_n;
336 decoder->tsc_ctc_ratio_d = params->tsc_ctc_ratio_d;
338 if (!decoder->tsc_ctc_ratio_n)
339 decoder->tsc_ctc_ratio_d = 0;
341 if (decoder->tsc_ctc_ratio_d) {
342 if (!(decoder->tsc_ctc_ratio_n % decoder->tsc_ctc_ratio_d))
343 decoder->tsc_ctc_mult = decoder->tsc_ctc_ratio_n /
344 decoder->tsc_ctc_ratio_d;
348 * A TSC packet can slip past MTC packets so that the timestamp appears
349 * to go backwards. One estimate is that can be up to about 40 CPU
350 * cycles, which is certainly less than 0x1000 TSC ticks, but accept
351 * slippage an order of magnitude more to be on the safe side.
353 decoder->tsc_slip = 0x10000;
355 intel_pt_log("timestamp: mtc_shift %u\n", decoder->mtc_shift);
356 intel_pt_log("timestamp: tsc_ctc_ratio_n %u\n", decoder->tsc_ctc_ratio_n);
357 intel_pt_log("timestamp: tsc_ctc_ratio_d %u\n", decoder->tsc_ctc_ratio_d);
358 intel_pt_log("timestamp: tsc_ctc_mult %u\n", decoder->tsc_ctc_mult);
359 intel_pt_log("timestamp: tsc_slip %#x\n", decoder->tsc_slip);
362 intel_pt_log("Hop mode: decoding FUP and TIPs, but not TNT\n");
367 void intel_pt_set_first_timestamp(struct intel_pt_decoder *decoder,
368 uint64_t first_timestamp)
370 decoder->first_timestamp = first_timestamp;
373 static void intel_pt_pop_blk(struct intel_pt_stack *stack)
375 struct intel_pt_blk *blk = stack->blk;
377 stack->blk = blk->prev;
384 static uint64_t intel_pt_pop(struct intel_pt_stack *stack)
389 intel_pt_pop_blk(stack);
392 stack->pos = INTEL_PT_BLK_SIZE;
394 return stack->blk->ip[--stack->pos];
397 static int intel_pt_alloc_blk(struct intel_pt_stack *stack)
399 struct intel_pt_blk *blk;
405 blk = malloc(sizeof(struct intel_pt_blk));
410 blk->prev = stack->blk;
416 static int intel_pt_push(struct intel_pt_stack *stack, uint64_t ip)
420 if (!stack->blk || stack->pos == INTEL_PT_BLK_SIZE) {
421 err = intel_pt_alloc_blk(stack);
426 stack->blk->ip[stack->pos++] = ip;
430 static void intel_pt_clear_stack(struct intel_pt_stack *stack)
433 intel_pt_pop_blk(stack);
437 static void intel_pt_free_stack(struct intel_pt_stack *stack)
439 intel_pt_clear_stack(stack);
441 zfree(&stack->spare);
444 void intel_pt_decoder_free(struct intel_pt_decoder *decoder)
446 intel_pt_free_stack(&decoder->stack);
450 static int intel_pt_ext_err(int code)
454 return INTEL_PT_ERR_NOMEM;
456 return INTEL_PT_ERR_INTERN;
458 return INTEL_PT_ERR_BADPKT;
460 return INTEL_PT_ERR_NODATA;
462 return INTEL_PT_ERR_NOINSN;
464 return INTEL_PT_ERR_MISMAT;
466 return INTEL_PT_ERR_OVR;
468 return INTEL_PT_ERR_LOST;
470 return INTEL_PT_ERR_NELOOP;
472 return INTEL_PT_ERR_UNK;
476 static const char *intel_pt_err_msgs[] = {
477 [INTEL_PT_ERR_NOMEM] = "Memory allocation failed",
478 [INTEL_PT_ERR_INTERN] = "Internal error",
479 [INTEL_PT_ERR_BADPKT] = "Bad packet",
480 [INTEL_PT_ERR_NODATA] = "No more data",
481 [INTEL_PT_ERR_NOINSN] = "Failed to get instruction",
482 [INTEL_PT_ERR_MISMAT] = "Trace doesn't match instruction",
483 [INTEL_PT_ERR_OVR] = "Overflow packet",
484 [INTEL_PT_ERR_LOST] = "Lost trace data",
485 [INTEL_PT_ERR_UNK] = "Unknown error!",
486 [INTEL_PT_ERR_NELOOP] = "Never-ending loop",
489 int intel_pt__strerror(int code, char *buf, size_t buflen)
491 if (code < 1 || code >= INTEL_PT_ERR_MAX)
492 code = INTEL_PT_ERR_UNK;
493 strlcpy(buf, intel_pt_err_msgs[code], buflen);
497 static uint64_t intel_pt_calc_ip(const struct intel_pt_pkt *packet,
502 switch (packet->count) {
504 ip = (last_ip & (uint64_t)0xffffffffffff0000ULL) |
508 ip = (last_ip & (uint64_t)0xffffffff00000000ULL) |
512 ip = packet->payload;
513 /* Sign-extend 6-byte ip */
514 if (ip & (uint64_t)0x800000000000ULL)
515 ip |= (uint64_t)0xffff000000000000ULL;
518 ip = (last_ip & (uint64_t)0xffff000000000000ULL) |
522 ip = packet->payload;
531 static inline void intel_pt_set_last_ip(struct intel_pt_decoder *decoder)
533 decoder->last_ip = intel_pt_calc_ip(&decoder->packet, decoder->last_ip);
534 decoder->have_last_ip = true;
537 static inline void intel_pt_set_ip(struct intel_pt_decoder *decoder)
539 intel_pt_set_last_ip(decoder);
540 decoder->ip = decoder->last_ip;
543 static void intel_pt_decoder_log_packet(struct intel_pt_decoder *decoder)
545 intel_pt_log_packet(&decoder->packet, decoder->pkt_len, decoder->pos,
549 static int intel_pt_bug(struct intel_pt_decoder *decoder)
551 intel_pt_log("ERROR: Internal error\n");
552 decoder->pkt_state = INTEL_PT_STATE_NO_PSB;
556 static inline void intel_pt_clear_tx_flags(struct intel_pt_decoder *decoder)
558 decoder->tx_flags = 0;
561 static inline void intel_pt_update_in_tx(struct intel_pt_decoder *decoder)
563 decoder->tx_flags = decoder->packet.payload & INTEL_PT_IN_TX;
566 static inline void intel_pt_update_pip(struct intel_pt_decoder *decoder)
568 decoder->pip_payload = decoder->packet.payload;
571 static inline void intel_pt_update_nr(struct intel_pt_decoder *decoder)
573 decoder->next_nr = decoder->pip_payload & 1;
576 static inline void intel_pt_set_nr(struct intel_pt_decoder *decoder)
578 decoder->nr = decoder->pip_payload & 1;
579 decoder->next_nr = decoder->nr;
582 static inline void intel_pt_set_pip(struct intel_pt_decoder *decoder)
584 intel_pt_update_pip(decoder);
585 intel_pt_set_nr(decoder);
588 static int intel_pt_bad_packet(struct intel_pt_decoder *decoder)
590 intel_pt_clear_tx_flags(decoder);
591 decoder->have_tma = false;
592 decoder->pkt_len = 1;
593 decoder->pkt_step = 1;
594 intel_pt_decoder_log_packet(decoder);
595 if (decoder->pkt_state != INTEL_PT_STATE_NO_PSB) {
596 intel_pt_log("ERROR: Bad packet\n");
597 decoder->pkt_state = INTEL_PT_STATE_ERR1;
602 static inline void intel_pt_update_sample_time(struct intel_pt_decoder *decoder)
604 decoder->sample_timestamp = decoder->timestamp;
605 decoder->sample_insn_cnt = decoder->timestamp_insn_cnt;
608 static void intel_pt_reposition(struct intel_pt_decoder *decoder)
611 decoder->pkt_state = INTEL_PT_STATE_NO_PSB;
612 decoder->timestamp = 0;
613 decoder->have_tma = false;
616 static int intel_pt_get_data(struct intel_pt_decoder *decoder, bool reposition)
618 struct intel_pt_buffer buffer = { .buf = 0, };
621 decoder->pkt_step = 0;
623 intel_pt_log("Getting more data\n");
624 ret = decoder->get_trace(&buffer, decoder->data);
627 decoder->buf = buffer.buf;
628 decoder->len = buffer.len;
630 intel_pt_log("No more data\n");
633 decoder->buf_timestamp = buffer.ref_timestamp;
634 if (!buffer.consecutive || reposition) {
635 intel_pt_reposition(decoder);
636 decoder->ref_timestamp = buffer.ref_timestamp;
637 decoder->state.trace_nr = buffer.trace_nr;
638 decoder->vm_tm_corr_same_buf = false;
639 intel_pt_log("Reference timestamp 0x%" PRIx64 "\n",
640 decoder->ref_timestamp);
647 static int intel_pt_get_next_data(struct intel_pt_decoder *decoder,
650 if (!decoder->next_buf)
651 return intel_pt_get_data(decoder, reposition);
653 decoder->buf = decoder->next_buf;
654 decoder->len = decoder->next_len;
655 decoder->next_buf = 0;
656 decoder->next_len = 0;
660 static int intel_pt_get_split_packet(struct intel_pt_decoder *decoder)
662 unsigned char *buf = decoder->temp_buf;
663 size_t old_len, len, n;
666 old_len = decoder->len;
668 memcpy(buf, decoder->buf, len);
670 ret = intel_pt_get_data(decoder, false);
672 decoder->pos += old_len;
673 return ret < 0 ? ret : -EINVAL;
676 n = INTEL_PT_PKT_MAX_SZ - len;
677 if (n > decoder->len)
679 memcpy(buf + len, decoder->buf, n);
682 decoder->prev_pkt_ctx = decoder->pkt_ctx;
683 ret = intel_pt_get_packet(buf, len, &decoder->packet, &decoder->pkt_ctx);
684 if (ret < (int)old_len) {
685 decoder->next_buf = decoder->buf;
686 decoder->next_len = decoder->len;
688 decoder->len = old_len;
689 return intel_pt_bad_packet(decoder);
692 decoder->next_buf = decoder->buf + (ret - old_len);
693 decoder->next_len = decoder->len - (ret - old_len);
701 struct intel_pt_pkt_info {
702 struct intel_pt_decoder *decoder;
703 struct intel_pt_pkt packet;
706 int last_packet_type;
710 typedef int (*intel_pt_pkt_cb_t)(struct intel_pt_pkt_info *pkt_info);
712 /* Lookahead packets in current buffer */
713 static int intel_pt_pkt_lookahead(struct intel_pt_decoder *decoder,
714 intel_pt_pkt_cb_t cb, void *data)
716 struct intel_pt_pkt_info pkt_info;
717 const unsigned char *buf = decoder->buf;
718 enum intel_pt_pkt_ctx pkt_ctx = decoder->pkt_ctx;
719 size_t len = decoder->len;
722 pkt_info.decoder = decoder;
723 pkt_info.pos = decoder->pos;
724 pkt_info.pkt_len = decoder->pkt_step;
725 pkt_info.last_packet_type = decoder->last_packet_type;
726 pkt_info.data = data;
730 pkt_info.pos += pkt_info.pkt_len;
731 buf += pkt_info.pkt_len;
732 len -= pkt_info.pkt_len;
735 return INTEL_PT_NEED_MORE_BYTES;
737 ret = intel_pt_get_packet(buf, len, &pkt_info.packet,
740 return INTEL_PT_NEED_MORE_BYTES;
744 pkt_info.pkt_len = ret;
745 } while (pkt_info.packet.type == INTEL_PT_PAD);
751 pkt_info.last_packet_type = pkt_info.packet.type;
755 struct intel_pt_calc_cyc_to_tsc_info {
759 uint64_t ctc_timestamp;
761 uint64_t tsc_timestamp;
766 double cbr_cyc_to_tsc;
770 * MTC provides a 8-bit slice of CTC but the TMA packet only provides the lower
771 * 16 bits of CTC. If mtc_shift > 8 then some of the MTC bits are not in the CTC
772 * provided by the TMA packet. Fix-up the last_mtc calculated from the TMA
773 * packet by copying the missing bits from the current MTC assuming the least
774 * difference between the two, and that the current MTC comes after last_mtc.
776 static void intel_pt_fixup_last_mtc(uint32_t mtc, int mtc_shift,
779 uint32_t first_missing_bit = 1U << (16 - mtc_shift);
780 uint32_t mask = ~(first_missing_bit - 1);
782 *last_mtc |= mtc & mask;
783 if (*last_mtc >= mtc) {
784 *last_mtc -= first_missing_bit;
789 static int intel_pt_calc_cyc_cb(struct intel_pt_pkt_info *pkt_info)
791 struct intel_pt_decoder *decoder = pkt_info->decoder;
792 struct intel_pt_calc_cyc_to_tsc_info *data = pkt_info->data;
796 uint32_t mtc, mtc_delta, ctc, fc, ctc_rem;
798 switch (pkt_info->packet.type) {
800 case INTEL_PT_TIP_PGE:
805 case INTEL_PT_MODE_EXEC:
806 case INTEL_PT_MODE_TSX:
807 case INTEL_PT_PSBEND:
811 case INTEL_PT_PTWRITE:
812 case INTEL_PT_PTWRITE_IP:
816 case INTEL_PT_BEP_IP:
823 mtc = pkt_info->packet.payload;
824 if (decoder->mtc_shift > 8 && data->fixup_last_mtc) {
825 data->fixup_last_mtc = false;
826 intel_pt_fixup_last_mtc(mtc, decoder->mtc_shift,
829 if (mtc > data->last_mtc)
830 mtc_delta = mtc - data->last_mtc;
832 mtc_delta = mtc + 256 - data->last_mtc;
833 data->ctc_delta += mtc_delta << decoder->mtc_shift;
834 data->last_mtc = mtc;
836 if (decoder->tsc_ctc_mult) {
837 timestamp = data->ctc_timestamp +
838 data->ctc_delta * decoder->tsc_ctc_mult;
840 timestamp = data->ctc_timestamp +
841 multdiv(data->ctc_delta,
842 decoder->tsc_ctc_ratio_n,
843 decoder->tsc_ctc_ratio_d);
846 if (timestamp < data->timestamp)
849 if (pkt_info->last_packet_type != INTEL_PT_CYC) {
850 data->timestamp = timestamp;
858 * For now, do not support using TSC packets - refer
859 * intel_pt_calc_cyc_to_tsc().
863 timestamp = pkt_info->packet.payload |
864 (data->timestamp & (0xffULL << 56));
865 if (data->from_mtc && timestamp < data->timestamp &&
866 data->timestamp - timestamp < decoder->tsc_slip)
868 if (timestamp < data->timestamp)
869 timestamp += (1ULL << 56);
870 if (pkt_info->last_packet_type != INTEL_PT_CYC) {
873 data->tsc_timestamp = timestamp;
874 data->timestamp = timestamp;
883 if (!decoder->tsc_ctc_ratio_d)
886 ctc = pkt_info->packet.payload;
887 fc = pkt_info->packet.count;
888 ctc_rem = ctc & decoder->ctc_rem_mask;
890 data->last_mtc = (ctc >> decoder->mtc_shift) & 0xff;
892 data->ctc_timestamp = data->tsc_timestamp - fc;
893 if (decoder->tsc_ctc_mult) {
894 data->ctc_timestamp -= ctc_rem * decoder->tsc_ctc_mult;
896 data->ctc_timestamp -=
897 multdiv(ctc_rem, decoder->tsc_ctc_ratio_n,
898 decoder->tsc_ctc_ratio_d);
902 data->have_tma = true;
903 data->fixup_last_mtc = true;
908 data->cycle_cnt += pkt_info->packet.payload;
912 cbr = pkt_info->packet.payload;
913 if (data->cbr && data->cbr != cbr)
916 data->cbr_cyc_to_tsc = decoder->max_non_turbo_ratio_fp / cbr;
919 case INTEL_PT_TIP_PGD:
920 case INTEL_PT_TRACESTOP:
921 case INTEL_PT_EXSTOP:
922 case INTEL_PT_EXSTOP_IP:
927 case INTEL_PT_BAD: /* Does not happen */
932 if (!data->cbr && decoder->cbr) {
933 data->cbr = decoder->cbr;
934 data->cbr_cyc_to_tsc = decoder->cbr_cyc_to_tsc;
937 if (!data->cycle_cnt)
940 cyc_to_tsc = (double)(timestamp - decoder->timestamp) / data->cycle_cnt;
942 if (data->cbr && cyc_to_tsc > data->cbr_cyc_to_tsc &&
943 cyc_to_tsc / data->cbr_cyc_to_tsc > 1.25) {
944 intel_pt_log("Timestamp: calculated %g TSC ticks per cycle too big (c.f. CBR-based value %g), pos " x64_fmt "\n",
945 cyc_to_tsc, data->cbr_cyc_to_tsc, pkt_info->pos);
949 decoder->calc_cyc_to_tsc = cyc_to_tsc;
950 decoder->have_calc_cyc_to_tsc = true;
953 intel_pt_log("Timestamp: calculated %g TSC ticks per cycle c.f. CBR-based value %g, pos " x64_fmt "\n",
954 cyc_to_tsc, data->cbr_cyc_to_tsc, pkt_info->pos);
956 intel_pt_log("Timestamp: calculated %g TSC ticks per cycle c.f. unknown CBR-based value, pos " x64_fmt "\n",
957 cyc_to_tsc, pkt_info->pos);
963 static void intel_pt_calc_cyc_to_tsc(struct intel_pt_decoder *decoder,
966 struct intel_pt_calc_cyc_to_tsc_info data = {
969 .last_mtc = decoder->last_mtc,
970 .ctc_timestamp = decoder->ctc_timestamp,
971 .ctc_delta = decoder->ctc_delta,
972 .tsc_timestamp = decoder->tsc_timestamp,
973 .timestamp = decoder->timestamp,
974 .have_tma = decoder->have_tma,
975 .fixup_last_mtc = decoder->fixup_last_mtc,
976 .from_mtc = from_mtc,
981 * For now, do not support using TSC packets for at least the reasons:
982 * 1) timing might have stopped
983 * 2) TSC packets within PSB+ can slip against CYC packets
988 intel_pt_pkt_lookahead(decoder, intel_pt_calc_cyc_cb, &data);
991 static int intel_pt_get_next_packet(struct intel_pt_decoder *decoder)
995 decoder->last_packet_type = decoder->packet.type;
998 decoder->pos += decoder->pkt_step;
999 decoder->buf += decoder->pkt_step;
1000 decoder->len -= decoder->pkt_step;
1002 if (!decoder->len) {
1003 ret = intel_pt_get_next_data(decoder, false);
1008 decoder->prev_pkt_ctx = decoder->pkt_ctx;
1009 ret = intel_pt_get_packet(decoder->buf, decoder->len,
1010 &decoder->packet, &decoder->pkt_ctx);
1011 if (ret == INTEL_PT_NEED_MORE_BYTES && BITS_PER_LONG == 32 &&
1012 decoder->len < INTEL_PT_PKT_MAX_SZ && !decoder->next_buf) {
1013 ret = intel_pt_get_split_packet(decoder);
1018 return intel_pt_bad_packet(decoder);
1020 decoder->pkt_len = ret;
1021 decoder->pkt_step = ret;
1022 intel_pt_decoder_log_packet(decoder);
1023 } while (decoder->packet.type == INTEL_PT_PAD);
1028 static uint64_t intel_pt_next_period(struct intel_pt_decoder *decoder)
1030 uint64_t timestamp, masked_timestamp;
1032 timestamp = decoder->timestamp + decoder->timestamp_insn_cnt;
1033 masked_timestamp = timestamp & decoder->period_mask;
1034 if (decoder->continuous_period) {
1035 if (masked_timestamp > decoder->last_masked_timestamp)
1039 masked_timestamp = timestamp & decoder->period_mask;
1040 if (masked_timestamp > decoder->last_masked_timestamp) {
1041 decoder->last_masked_timestamp = masked_timestamp;
1042 decoder->continuous_period = true;
1046 if (masked_timestamp < decoder->last_masked_timestamp)
1047 return decoder->period_ticks;
1049 return decoder->period_ticks - (timestamp - masked_timestamp);
1052 static uint64_t intel_pt_next_sample(struct intel_pt_decoder *decoder)
1054 switch (decoder->period_type) {
1055 case INTEL_PT_PERIOD_INSTRUCTIONS:
1056 return decoder->period - decoder->period_insn_cnt;
1057 case INTEL_PT_PERIOD_TICKS:
1058 return intel_pt_next_period(decoder);
1059 case INTEL_PT_PERIOD_NONE:
1060 case INTEL_PT_PERIOD_MTC:
1066 static void intel_pt_sample_insn(struct intel_pt_decoder *decoder)
1068 uint64_t timestamp, masked_timestamp;
1070 switch (decoder->period_type) {
1071 case INTEL_PT_PERIOD_INSTRUCTIONS:
1072 decoder->period_insn_cnt = 0;
1074 case INTEL_PT_PERIOD_TICKS:
1075 timestamp = decoder->timestamp + decoder->timestamp_insn_cnt;
1076 masked_timestamp = timestamp & decoder->period_mask;
1077 if (masked_timestamp > decoder->last_masked_timestamp)
1078 decoder->last_masked_timestamp = masked_timestamp;
1080 decoder->last_masked_timestamp += decoder->period_ticks;
1082 case INTEL_PT_PERIOD_NONE:
1083 case INTEL_PT_PERIOD_MTC:
1088 decoder->state.type |= INTEL_PT_INSTRUCTION;
1091 static int intel_pt_walk_insn(struct intel_pt_decoder *decoder,
1092 struct intel_pt_insn *intel_pt_insn, uint64_t ip)
1094 uint64_t max_insn_cnt, insn_cnt = 0;
1097 if (!decoder->mtc_insn)
1098 decoder->mtc_insn = true;
1100 max_insn_cnt = intel_pt_next_sample(decoder);
1102 err = decoder->walk_insn(intel_pt_insn, &insn_cnt, &decoder->ip, ip,
1103 max_insn_cnt, decoder->data);
1105 decoder->tot_insn_cnt += insn_cnt;
1106 decoder->timestamp_insn_cnt += insn_cnt;
1107 decoder->sample_insn_cnt += insn_cnt;
1108 decoder->period_insn_cnt += insn_cnt;
1111 decoder->no_progress = 0;
1112 decoder->pkt_state = INTEL_PT_STATE_ERR2;
1113 intel_pt_log_at("ERROR: Failed to get instruction",
1120 if (ip && decoder->ip == ip) {
1125 if (max_insn_cnt && insn_cnt >= max_insn_cnt)
1126 intel_pt_sample_insn(decoder);
1128 if (intel_pt_insn->branch == INTEL_PT_BR_NO_BRANCH) {
1129 decoder->state.type = INTEL_PT_INSTRUCTION;
1130 decoder->state.from_ip = decoder->ip;
1131 decoder->state.to_ip = 0;
1132 decoder->ip += intel_pt_insn->length;
1133 err = INTEL_PT_RETURN;
1137 if (intel_pt_insn->op == INTEL_PT_OP_CALL) {
1138 /* Zero-length calls are excluded */
1139 if (intel_pt_insn->branch != INTEL_PT_BR_UNCONDITIONAL ||
1140 intel_pt_insn->rel) {
1141 err = intel_pt_push(&decoder->stack, decoder->ip +
1142 intel_pt_insn->length);
1146 } else if (intel_pt_insn->op == INTEL_PT_OP_RET) {
1147 decoder->ret_addr = intel_pt_pop(&decoder->stack);
1150 if (intel_pt_insn->branch == INTEL_PT_BR_UNCONDITIONAL) {
1151 int cnt = decoder->no_progress++;
1153 decoder->state.from_ip = decoder->ip;
1154 decoder->ip += intel_pt_insn->length +
1156 decoder->state.to_ip = decoder->ip;
1157 err = INTEL_PT_RETURN;
1160 * Check for being stuck in a loop. This can happen if a
1161 * decoder error results in the decoder erroneously setting the
1162 * ip to an address that is itself in an infinite loop that
1163 * consumes no packets. When that happens, there must be an
1164 * unconditional branch.
1168 decoder->stuck_ip = decoder->state.to_ip;
1169 decoder->stuck_ip_prd = 1;
1170 decoder->stuck_ip_cnt = 1;
1171 } else if (cnt > INTEL_PT_MAX_LOOPS ||
1172 decoder->state.to_ip == decoder->stuck_ip) {
1173 intel_pt_log_at("ERROR: Never-ending loop",
1174 decoder->state.to_ip);
1175 decoder->pkt_state = INTEL_PT_STATE_ERR_RESYNC;
1178 } else if (!--decoder->stuck_ip_cnt) {
1179 decoder->stuck_ip_prd += 1;
1180 decoder->stuck_ip_cnt = decoder->stuck_ip_prd;
1181 decoder->stuck_ip = decoder->state.to_ip;
1184 goto out_no_progress;
1187 decoder->no_progress = 0;
1189 decoder->state.insn_op = intel_pt_insn->op;
1190 decoder->state.insn_len = intel_pt_insn->length;
1191 memcpy(decoder->state.insn, intel_pt_insn->buf,
1192 INTEL_PT_INSN_BUF_SZ);
1194 if (decoder->tx_flags & INTEL_PT_IN_TX)
1195 decoder->state.flags |= INTEL_PT_IN_TX;
1200 static bool intel_pt_fup_event(struct intel_pt_decoder *decoder)
1204 if (decoder->set_fup_tx_flags) {
1205 decoder->set_fup_tx_flags = false;
1206 decoder->tx_flags = decoder->fup_tx_flags;
1207 decoder->state.type = INTEL_PT_TRANSACTION;
1208 if (decoder->fup_tx_flags & INTEL_PT_ABORT_TX)
1209 decoder->state.type |= INTEL_PT_BRANCH;
1210 decoder->state.from_ip = decoder->ip;
1211 decoder->state.to_ip = 0;
1212 decoder->state.flags = decoder->fup_tx_flags;
1215 if (decoder->set_fup_ptw) {
1216 decoder->set_fup_ptw = false;
1217 decoder->state.type = INTEL_PT_PTW;
1218 decoder->state.flags |= INTEL_PT_FUP_IP;
1219 decoder->state.from_ip = decoder->ip;
1220 decoder->state.to_ip = 0;
1221 decoder->state.ptw_payload = decoder->fup_ptw_payload;
1224 if (decoder->set_fup_mwait) {
1225 decoder->set_fup_mwait = false;
1226 decoder->state.type = INTEL_PT_MWAIT_OP;
1227 decoder->state.from_ip = decoder->ip;
1228 decoder->state.to_ip = 0;
1229 decoder->state.mwait_payload = decoder->fup_mwait_payload;
1232 if (decoder->set_fup_pwre) {
1233 decoder->set_fup_pwre = false;
1234 decoder->state.type |= INTEL_PT_PWR_ENTRY;
1235 decoder->state.type &= ~INTEL_PT_BRANCH;
1236 decoder->state.from_ip = decoder->ip;
1237 decoder->state.to_ip = 0;
1238 decoder->state.pwre_payload = decoder->fup_pwre_payload;
1241 if (decoder->set_fup_exstop) {
1242 decoder->set_fup_exstop = false;
1243 decoder->state.type |= INTEL_PT_EX_STOP;
1244 decoder->state.type &= ~INTEL_PT_BRANCH;
1245 decoder->state.flags |= INTEL_PT_FUP_IP;
1246 decoder->state.from_ip = decoder->ip;
1247 decoder->state.to_ip = 0;
1250 if (decoder->set_fup_bep) {
1251 decoder->set_fup_bep = false;
1252 decoder->state.type |= INTEL_PT_BLK_ITEMS;
1253 decoder->state.type &= ~INTEL_PT_BRANCH;
1254 decoder->state.from_ip = decoder->ip;
1255 decoder->state.to_ip = 0;
1261 static inline bool intel_pt_fup_with_nlip(struct intel_pt_decoder *decoder,
1262 struct intel_pt_insn *intel_pt_insn,
1263 uint64_t ip, int err)
1265 return decoder->flags & INTEL_PT_FUP_WITH_NLIP && !err &&
1266 intel_pt_insn->branch == INTEL_PT_BR_INDIRECT &&
1267 ip == decoder->ip + intel_pt_insn->length;
1270 static int intel_pt_walk_fup(struct intel_pt_decoder *decoder)
1272 struct intel_pt_insn intel_pt_insn;
1276 ip = decoder->last_ip;
1279 err = intel_pt_walk_insn(decoder, &intel_pt_insn, ip);
1280 if (err == INTEL_PT_RETURN)
1282 if (err == -EAGAIN ||
1283 intel_pt_fup_with_nlip(decoder, &intel_pt_insn, ip, err)) {
1284 bool no_tip = decoder->pkt_state != INTEL_PT_STATE_FUP;
1286 decoder->pkt_state = INTEL_PT_STATE_IN_SYNC;
1287 if (intel_pt_fup_event(decoder) && no_tip)
1291 decoder->set_fup_tx_flags = false;
1295 if (intel_pt_insn.branch == INTEL_PT_BR_INDIRECT) {
1296 intel_pt_log_at("ERROR: Unexpected indirect branch",
1298 decoder->pkt_state = INTEL_PT_STATE_ERR_RESYNC;
1302 if (intel_pt_insn.branch == INTEL_PT_BR_CONDITIONAL) {
1303 intel_pt_log_at("ERROR: Unexpected conditional branch",
1305 decoder->pkt_state = INTEL_PT_STATE_ERR_RESYNC;
1309 intel_pt_bug(decoder);
1313 static int intel_pt_walk_tip(struct intel_pt_decoder *decoder)
1315 struct intel_pt_insn intel_pt_insn;
1318 err = intel_pt_walk_insn(decoder, &intel_pt_insn, 0);
1319 if (err == INTEL_PT_RETURN &&
1321 decoder->pkt_state == INTEL_PT_STATE_TIP_PGD &&
1322 (decoder->state.type & INTEL_PT_BRANCH) &&
1323 decoder->pgd_ip(decoder->state.to_ip, decoder->data)) {
1324 /* Unconditional branch leaving filter region */
1325 decoder->no_progress = 0;
1326 decoder->pge = false;
1327 decoder->continuous_period = false;
1328 decoder->pkt_state = INTEL_PT_STATE_IN_SYNC;
1329 decoder->state.type |= INTEL_PT_TRACE_END;
1330 intel_pt_update_nr(decoder);
1333 if (err == INTEL_PT_RETURN)
1338 intel_pt_update_nr(decoder);
1340 if (intel_pt_insn.branch == INTEL_PT_BR_INDIRECT) {
1341 if (decoder->pkt_state == INTEL_PT_STATE_TIP_PGD) {
1342 decoder->pge = false;
1343 decoder->continuous_period = false;
1344 decoder->pkt_state = INTEL_PT_STATE_IN_SYNC;
1345 decoder->state.from_ip = decoder->ip;
1346 if (decoder->packet.count == 0) {
1347 decoder->state.to_ip = 0;
1349 decoder->state.to_ip = decoder->last_ip;
1350 decoder->ip = decoder->last_ip;
1352 decoder->state.type |= INTEL_PT_TRACE_END;
1354 decoder->pkt_state = INTEL_PT_STATE_IN_SYNC;
1355 decoder->state.from_ip = decoder->ip;
1356 if (decoder->packet.count == 0) {
1357 decoder->state.to_ip = 0;
1359 decoder->state.to_ip = decoder->last_ip;
1360 decoder->ip = decoder->last_ip;
1366 if (intel_pt_insn.branch == INTEL_PT_BR_CONDITIONAL) {
1367 uint64_t to_ip = decoder->ip + intel_pt_insn.length +
1370 if (decoder->pgd_ip &&
1371 decoder->pkt_state == INTEL_PT_STATE_TIP_PGD &&
1372 decoder->pgd_ip(to_ip, decoder->data)) {
1373 /* Conditional branch leaving filter region */
1374 decoder->pge = false;
1375 decoder->continuous_period = false;
1376 decoder->pkt_state = INTEL_PT_STATE_IN_SYNC;
1377 decoder->ip = to_ip;
1378 decoder->state.from_ip = decoder->ip;
1379 decoder->state.to_ip = to_ip;
1380 decoder->state.type |= INTEL_PT_TRACE_END;
1383 intel_pt_log_at("ERROR: Conditional branch when expecting indirect branch",
1385 decoder->pkt_state = INTEL_PT_STATE_ERR_RESYNC;
1389 return intel_pt_bug(decoder);
1392 static int intel_pt_walk_tnt(struct intel_pt_decoder *decoder)
1394 struct intel_pt_insn intel_pt_insn;
1398 err = intel_pt_walk_insn(decoder, &intel_pt_insn, 0);
1399 if (err == INTEL_PT_RETURN)
1404 if (intel_pt_insn.op == INTEL_PT_OP_RET) {
1405 if (!decoder->return_compression) {
1406 intel_pt_log_at("ERROR: RET when expecting conditional branch",
1408 decoder->pkt_state = INTEL_PT_STATE_ERR3;
1411 if (!decoder->ret_addr) {
1412 intel_pt_log_at("ERROR: Bad RET compression (stack empty)",
1414 decoder->pkt_state = INTEL_PT_STATE_ERR3;
1417 if (!(decoder->tnt.payload & BIT63)) {
1418 intel_pt_log_at("ERROR: Bad RET compression (TNT=N)",
1420 decoder->pkt_state = INTEL_PT_STATE_ERR3;
1423 decoder->tnt.count -= 1;
1424 if (decoder->tnt.count)
1425 decoder->pkt_state = INTEL_PT_STATE_TNT_CONT;
1427 decoder->pkt_state = INTEL_PT_STATE_IN_SYNC;
1428 decoder->tnt.payload <<= 1;
1429 decoder->state.from_ip = decoder->ip;
1430 decoder->ip = decoder->ret_addr;
1431 decoder->state.to_ip = decoder->ip;
1435 if (intel_pt_insn.branch == INTEL_PT_BR_INDIRECT) {
1436 /* Handle deferred TIPs */
1437 err = intel_pt_get_next_packet(decoder);
1440 if (decoder->packet.type != INTEL_PT_TIP ||
1441 decoder->packet.count == 0) {
1442 intel_pt_log_at("ERROR: Missing deferred TIP for indirect branch",
1444 decoder->pkt_state = INTEL_PT_STATE_ERR3;
1445 decoder->pkt_step = 0;
1448 intel_pt_set_last_ip(decoder);
1449 decoder->state.from_ip = decoder->ip;
1450 decoder->state.to_ip = decoder->last_ip;
1451 decoder->ip = decoder->last_ip;
1452 intel_pt_update_nr(decoder);
1456 if (intel_pt_insn.branch == INTEL_PT_BR_CONDITIONAL) {
1457 decoder->tnt.count -= 1;
1458 if (decoder->tnt.count)
1459 decoder->pkt_state = INTEL_PT_STATE_TNT_CONT;
1461 decoder->pkt_state = INTEL_PT_STATE_IN_SYNC;
1462 if (decoder->tnt.payload & BIT63) {
1463 decoder->tnt.payload <<= 1;
1464 decoder->state.from_ip = decoder->ip;
1465 decoder->ip += intel_pt_insn.length +
1467 decoder->state.to_ip = decoder->ip;
1470 /* Instruction sample for a non-taken branch */
1471 if (decoder->state.type & INTEL_PT_INSTRUCTION) {
1472 decoder->tnt.payload <<= 1;
1473 decoder->state.type = INTEL_PT_INSTRUCTION;
1474 decoder->state.from_ip = decoder->ip;
1475 decoder->state.to_ip = 0;
1476 decoder->ip += intel_pt_insn.length;
1479 decoder->sample_cyc = false;
1480 decoder->ip += intel_pt_insn.length;
1481 if (!decoder->tnt.count) {
1482 intel_pt_update_sample_time(decoder);
1485 decoder->tnt.payload <<= 1;
1489 return intel_pt_bug(decoder);
1493 static int intel_pt_mode_tsx(struct intel_pt_decoder *decoder, bool *no_tip)
1495 unsigned int fup_tx_flags;
1498 fup_tx_flags = decoder->packet.payload &
1499 (INTEL_PT_IN_TX | INTEL_PT_ABORT_TX);
1500 err = intel_pt_get_next_packet(decoder);
1503 if (decoder->packet.type == INTEL_PT_FUP) {
1504 decoder->fup_tx_flags = fup_tx_flags;
1505 decoder->set_fup_tx_flags = true;
1506 if (!(decoder->fup_tx_flags & INTEL_PT_ABORT_TX))
1509 intel_pt_log_at("ERROR: Missing FUP after MODE.TSX",
1511 intel_pt_update_in_tx(decoder);
1516 static uint64_t intel_pt_8b_tsc(uint64_t timestamp, uint64_t ref_timestamp)
1518 timestamp |= (ref_timestamp & (0xffULL << 56));
1520 if (timestamp < ref_timestamp) {
1521 if (ref_timestamp - timestamp > (1ULL << 55))
1522 timestamp += (1ULL << 56);
1524 if (timestamp - ref_timestamp > (1ULL << 55))
1525 timestamp -= (1ULL << 56);
1531 /* For use only when decoder->vm_time_correlation is true */
1532 static bool intel_pt_time_in_range(struct intel_pt_decoder *decoder,
1535 uint64_t max_timestamp = decoder->buf_timestamp;
1537 if (!max_timestamp) {
1538 max_timestamp = decoder->last_reliable_timestamp +
1541 return timestamp >= decoder->last_reliable_timestamp &&
1542 timestamp < decoder->buf_timestamp;
1545 static void intel_pt_calc_tsc_timestamp(struct intel_pt_decoder *decoder)
1550 decoder->have_tma = false;
1552 if (decoder->ref_timestamp) {
1553 timestamp = intel_pt_8b_tsc(decoder->packet.payload,
1554 decoder->ref_timestamp);
1555 decoder->tsc_timestamp = timestamp;
1556 decoder->timestamp = timestamp;
1557 decoder->ref_timestamp = 0;
1558 decoder->timestamp_insn_cnt = 0;
1559 } else if (decoder->timestamp) {
1560 timestamp = decoder->packet.payload |
1561 (decoder->timestamp & (0xffULL << 56));
1562 decoder->tsc_timestamp = timestamp;
1563 if (timestamp < decoder->timestamp &&
1564 decoder->timestamp - timestamp < decoder->tsc_slip) {
1565 intel_pt_log_to("Suppressing backwards timestamp",
1567 timestamp = decoder->timestamp;
1569 if (timestamp < decoder->timestamp) {
1570 if (!decoder->buf_timestamp ||
1571 (timestamp + (1ULL << 56) < decoder->buf_timestamp)) {
1572 intel_pt_log_to("Wraparound timestamp", timestamp);
1573 timestamp += (1ULL << 56);
1574 decoder->tsc_timestamp = timestamp;
1576 intel_pt_log_to("Suppressing bad timestamp", timestamp);
1577 timestamp = decoder->timestamp;
1581 if (decoder->vm_time_correlation &&
1582 (bad || !intel_pt_time_in_range(decoder, timestamp)) &&
1583 intel_pt_print_once(decoder, INTEL_PT_PRT_ONCE_ERANGE))
1584 p_log("Timestamp out of range");
1585 decoder->timestamp = timestamp;
1586 decoder->timestamp_insn_cnt = 0;
1589 if (decoder->last_packet_type == INTEL_PT_CYC) {
1590 decoder->cyc_ref_timestamp = decoder->timestamp;
1591 decoder->cycle_cnt = 0;
1592 decoder->have_calc_cyc_to_tsc = false;
1593 intel_pt_calc_cyc_to_tsc(decoder, false);
1596 intel_pt_log_to("Setting timestamp", decoder->timestamp);
1599 static int intel_pt_overflow(struct intel_pt_decoder *decoder)
1601 intel_pt_log("ERROR: Buffer overflow\n");
1602 intel_pt_clear_tx_flags(decoder);
1603 intel_pt_set_nr(decoder);
1604 decoder->timestamp_insn_cnt = 0;
1605 decoder->pkt_state = INTEL_PT_STATE_ERR_RESYNC;
1606 decoder->overflow = true;
1610 static inline void intel_pt_mtc_cyc_cnt_pge(struct intel_pt_decoder *decoder)
1612 if (decoder->have_cyc)
1615 decoder->cyc_cnt_timestamp = decoder->timestamp;
1616 decoder->base_cyc_cnt = decoder->tot_cyc_cnt;
1619 static inline void intel_pt_mtc_cyc_cnt_cbr(struct intel_pt_decoder *decoder)
1621 decoder->tsc_to_cyc = decoder->cbr / decoder->max_non_turbo_ratio_fp;
1624 intel_pt_mtc_cyc_cnt_pge(decoder);
1627 static inline void intel_pt_mtc_cyc_cnt_upd(struct intel_pt_decoder *decoder)
1629 uint64_t tot_cyc_cnt, tsc_delta;
1631 if (decoder->have_cyc)
1634 decoder->sample_cyc = true;
1636 if (!decoder->pge || decoder->timestamp <= decoder->cyc_cnt_timestamp)
1639 tsc_delta = decoder->timestamp - decoder->cyc_cnt_timestamp;
1640 tot_cyc_cnt = tsc_delta * decoder->tsc_to_cyc + decoder->base_cyc_cnt;
1642 if (tot_cyc_cnt > decoder->tot_cyc_cnt)
1643 decoder->tot_cyc_cnt = tot_cyc_cnt;
1646 static void intel_pt_calc_tma(struct intel_pt_decoder *decoder)
1648 uint32_t ctc = decoder->packet.payload;
1649 uint32_t fc = decoder->packet.count;
1650 uint32_t ctc_rem = ctc & decoder->ctc_rem_mask;
1652 if (!decoder->tsc_ctc_ratio_d)
1655 if (decoder->pge && !decoder->in_psb)
1656 intel_pt_mtc_cyc_cnt_pge(decoder);
1658 intel_pt_mtc_cyc_cnt_upd(decoder);
1660 decoder->last_mtc = (ctc >> decoder->mtc_shift) & 0xff;
1661 decoder->last_ctc = ctc - ctc_rem;
1662 decoder->ctc_timestamp = decoder->tsc_timestamp - fc;
1663 if (decoder->tsc_ctc_mult) {
1664 decoder->ctc_timestamp -= ctc_rem * decoder->tsc_ctc_mult;
1666 decoder->ctc_timestamp -= multdiv(ctc_rem,
1667 decoder->tsc_ctc_ratio_n,
1668 decoder->tsc_ctc_ratio_d);
1670 decoder->ctc_delta = 0;
1671 decoder->have_tma = true;
1672 decoder->fixup_last_mtc = true;
1673 intel_pt_log("CTC timestamp " x64_fmt " last MTC %#x CTC rem %#x\n",
1674 decoder->ctc_timestamp, decoder->last_mtc, ctc_rem);
1677 static void intel_pt_calc_mtc_timestamp(struct intel_pt_decoder *decoder)
1680 uint32_t mtc, mtc_delta;
1682 if (!decoder->have_tma)
1685 mtc = decoder->packet.payload;
1687 if (decoder->mtc_shift > 8 && decoder->fixup_last_mtc) {
1688 decoder->fixup_last_mtc = false;
1689 intel_pt_fixup_last_mtc(mtc, decoder->mtc_shift,
1690 &decoder->last_mtc);
1693 if (mtc > decoder->last_mtc)
1694 mtc_delta = mtc - decoder->last_mtc;
1696 mtc_delta = mtc + 256 - decoder->last_mtc;
1698 decoder->ctc_delta += mtc_delta << decoder->mtc_shift;
1700 if (decoder->tsc_ctc_mult) {
1701 timestamp = decoder->ctc_timestamp +
1702 decoder->ctc_delta * decoder->tsc_ctc_mult;
1704 timestamp = decoder->ctc_timestamp +
1705 multdiv(decoder->ctc_delta,
1706 decoder->tsc_ctc_ratio_n,
1707 decoder->tsc_ctc_ratio_d);
1710 if (timestamp < decoder->timestamp)
1711 intel_pt_log("Suppressing MTC timestamp " x64_fmt " less than current timestamp " x64_fmt "\n",
1712 timestamp, decoder->timestamp);
1714 decoder->timestamp = timestamp;
1716 intel_pt_mtc_cyc_cnt_upd(decoder);
1718 decoder->timestamp_insn_cnt = 0;
1719 decoder->last_mtc = mtc;
1721 if (decoder->last_packet_type == INTEL_PT_CYC) {
1722 decoder->cyc_ref_timestamp = decoder->timestamp;
1723 decoder->cycle_cnt = 0;
1724 decoder->have_calc_cyc_to_tsc = false;
1725 intel_pt_calc_cyc_to_tsc(decoder, true);
1728 intel_pt_log_to("Setting timestamp", decoder->timestamp);
1731 static void intel_pt_calc_cbr(struct intel_pt_decoder *decoder)
1733 unsigned int cbr = decoder->packet.payload & 0xff;
1735 decoder->cbr_payload = decoder->packet.payload;
1737 if (decoder->cbr == cbr)
1741 decoder->cbr_cyc_to_tsc = decoder->max_non_turbo_ratio_fp / cbr;
1743 intel_pt_mtc_cyc_cnt_cbr(decoder);
1746 static void intel_pt_calc_cyc_timestamp(struct intel_pt_decoder *decoder)
1748 uint64_t timestamp = decoder->cyc_ref_timestamp;
1750 decoder->have_cyc = true;
1752 decoder->cycle_cnt += decoder->packet.payload;
1754 decoder->tot_cyc_cnt += decoder->packet.payload;
1755 decoder->sample_cyc = true;
1757 if (!decoder->cyc_ref_timestamp)
1760 if (decoder->have_calc_cyc_to_tsc)
1761 timestamp += decoder->cycle_cnt * decoder->calc_cyc_to_tsc;
1762 else if (decoder->cbr)
1763 timestamp += decoder->cycle_cnt * decoder->cbr_cyc_to_tsc;
1767 if (timestamp < decoder->timestamp)
1768 intel_pt_log("Suppressing CYC timestamp " x64_fmt " less than current timestamp " x64_fmt "\n",
1769 timestamp, decoder->timestamp);
1771 decoder->timestamp = timestamp;
1773 decoder->timestamp_insn_cnt = 0;
1775 intel_pt_log_to("Setting timestamp", decoder->timestamp);
1778 static void intel_pt_bbp(struct intel_pt_decoder *decoder)
1780 if (decoder->prev_pkt_ctx == INTEL_PT_NO_CTX) {
1781 memset(decoder->state.items.mask, 0, sizeof(decoder->state.items.mask));
1782 decoder->state.items.is_32_bit = false;
1784 decoder->blk_type = decoder->packet.payload;
1785 decoder->blk_type_pos = intel_pt_blk_type_pos(decoder->blk_type);
1786 if (decoder->blk_type == INTEL_PT_GP_REGS)
1787 decoder->state.items.is_32_bit = decoder->packet.count;
1788 if (decoder->blk_type_pos < 0) {
1789 intel_pt_log("WARNING: Unknown block type %u\n",
1791 } else if (decoder->state.items.mask[decoder->blk_type_pos]) {
1792 intel_pt_log("WARNING: Duplicate block type %u\n",
1797 static void intel_pt_bip(struct intel_pt_decoder *decoder)
1799 uint32_t id = decoder->packet.count;
1800 uint32_t bit = 1 << id;
1801 int pos = decoder->blk_type_pos;
1803 if (pos < 0 || id >= INTEL_PT_BLK_ITEM_ID_CNT) {
1804 intel_pt_log("WARNING: Unknown block item %u type %d\n",
1805 id, decoder->blk_type);
1809 if (decoder->state.items.mask[pos] & bit) {
1810 intel_pt_log("WARNING: Duplicate block item %u type %d\n",
1811 id, decoder->blk_type);
1814 decoder->state.items.mask[pos] |= bit;
1815 decoder->state.items.val[pos][id] = decoder->packet.payload;
1818 /* Walk PSB+ packets when already in sync. */
1819 static int intel_pt_walk_psbend(struct intel_pt_decoder *decoder)
1823 decoder->in_psb = true;
1826 err = intel_pt_get_next_packet(decoder);
1830 switch (decoder->packet.type) {
1831 case INTEL_PT_PSBEND:
1835 case INTEL_PT_TIP_PGD:
1836 case INTEL_PT_TIP_PGE:
1839 case INTEL_PT_TRACESTOP:
1842 case INTEL_PT_PTWRITE:
1843 case INTEL_PT_PTWRITE_IP:
1844 case INTEL_PT_EXSTOP:
1845 case INTEL_PT_EXSTOP_IP:
1846 case INTEL_PT_MWAIT:
1852 case INTEL_PT_BEP_IP:
1853 decoder->have_tma = false;
1854 intel_pt_log("ERROR: Unexpected packet\n");
1859 err = intel_pt_overflow(decoder);
1863 intel_pt_calc_tsc_timestamp(decoder);
1867 intel_pt_calc_tma(decoder);
1871 intel_pt_calc_cbr(decoder);
1874 case INTEL_PT_MODE_EXEC:
1875 decoder->exec_mode = decoder->packet.payload;
1879 intel_pt_set_pip(decoder);
1883 decoder->pge = true;
1884 if (decoder->packet.count) {
1885 intel_pt_set_last_ip(decoder);
1886 decoder->psb_ip = decoder->last_ip;
1890 case INTEL_PT_MODE_TSX:
1891 intel_pt_update_in_tx(decoder);
1895 intel_pt_calc_mtc_timestamp(decoder);
1896 if (decoder->period_type == INTEL_PT_PERIOD_MTC)
1897 decoder->state.type |= INTEL_PT_INSTRUCTION;
1901 intel_pt_calc_cyc_timestamp(decoder);
1912 decoder->in_psb = false;
1917 static int intel_pt_walk_fup_tip(struct intel_pt_decoder *decoder)
1921 if (decoder->tx_flags & INTEL_PT_ABORT_TX) {
1922 decoder->tx_flags = 0;
1923 decoder->state.flags &= ~INTEL_PT_IN_TX;
1924 decoder->state.flags |= INTEL_PT_ABORT_TX;
1926 decoder->state.flags |= INTEL_PT_ASYNC;
1930 err = intel_pt_get_next_packet(decoder);
1934 switch (decoder->packet.type) {
1937 case INTEL_PT_TRACESTOP:
1941 case INTEL_PT_MODE_TSX:
1943 case INTEL_PT_PSBEND:
1944 case INTEL_PT_PTWRITE:
1945 case INTEL_PT_PTWRITE_IP:
1946 case INTEL_PT_EXSTOP:
1947 case INTEL_PT_EXSTOP_IP:
1948 case INTEL_PT_MWAIT:
1954 case INTEL_PT_BEP_IP:
1955 intel_pt_log("ERROR: Missing TIP after FUP\n");
1956 decoder->pkt_state = INTEL_PT_STATE_ERR3;
1957 decoder->pkt_step = 0;
1961 intel_pt_calc_cbr(decoder);
1965 return intel_pt_overflow(decoder);
1967 case INTEL_PT_TIP_PGD:
1968 decoder->state.from_ip = decoder->ip;
1969 if (decoder->packet.count == 0) {
1970 decoder->state.to_ip = 0;
1972 intel_pt_set_ip(decoder);
1973 decoder->state.to_ip = decoder->ip;
1975 decoder->pge = false;
1976 decoder->continuous_period = false;
1977 decoder->state.type |= INTEL_PT_TRACE_END;
1978 intel_pt_update_nr(decoder);
1981 case INTEL_PT_TIP_PGE:
1982 decoder->pge = true;
1983 intel_pt_log("Omitting PGE ip " x64_fmt "\n",
1985 decoder->state.from_ip = 0;
1986 if (decoder->packet.count == 0) {
1987 decoder->state.to_ip = 0;
1989 intel_pt_set_ip(decoder);
1990 decoder->state.to_ip = decoder->ip;
1992 decoder->state.type |= INTEL_PT_TRACE_BEGIN;
1993 intel_pt_mtc_cyc_cnt_pge(decoder);
1994 intel_pt_set_nr(decoder);
1998 decoder->state.from_ip = decoder->ip;
1999 if (decoder->packet.count == 0) {
2000 decoder->state.to_ip = 0;
2002 intel_pt_set_ip(decoder);
2003 decoder->state.to_ip = decoder->ip;
2005 intel_pt_update_nr(decoder);
2009 intel_pt_update_pip(decoder);
2013 intel_pt_calc_mtc_timestamp(decoder);
2014 if (decoder->period_type == INTEL_PT_PERIOD_MTC)
2015 decoder->state.type |= INTEL_PT_INSTRUCTION;
2019 intel_pt_calc_cyc_timestamp(decoder);
2022 case INTEL_PT_MODE_EXEC:
2023 decoder->exec_mode = decoder->packet.payload;
2032 return intel_pt_bug(decoder);
2037 static int intel_pt_resample(struct intel_pt_decoder *decoder)
2039 decoder->pkt_state = INTEL_PT_STATE_IN_SYNC;
2040 decoder->state.type = INTEL_PT_INSTRUCTION;
2041 decoder->state.from_ip = decoder->ip;
2042 decoder->state.to_ip = 0;
2046 struct intel_pt_vm_tsc_info {
2047 struct intel_pt_pkt pip_packet;
2048 struct intel_pt_pkt vmcs_packet;
2049 struct intel_pt_pkt tma_packet;
2050 bool tsc, pip, vmcs, tma, psbend;
2056 /* Lookahead and get the PIP, VMCS and TMA packets from PSB+ */
2057 static int intel_pt_vm_psb_lookahead_cb(struct intel_pt_pkt_info *pkt_info)
2059 struct intel_pt_vm_tsc_info *data = pkt_info->data;
2061 switch (pkt_info->packet.type) {
2064 case INTEL_PT_MODE_EXEC:
2065 case INTEL_PT_MODE_TSX:
2077 data->tma_packet = pkt_info->packet;
2082 data->pip_packet = pkt_info->packet;
2087 data->vmcs_packet = pkt_info->packet;
2091 case INTEL_PT_PSBEND:
2092 data->psbend = true;
2095 case INTEL_PT_TIP_PGE:
2096 case INTEL_PT_PTWRITE:
2097 case INTEL_PT_PTWRITE_IP:
2098 case INTEL_PT_EXSTOP:
2099 case INTEL_PT_EXSTOP_IP:
2100 case INTEL_PT_MWAIT:
2106 case INTEL_PT_BEP_IP:
2110 case INTEL_PT_TIP_PGD:
2113 case INTEL_PT_TRACESTOP:
2121 struct intel_pt_ovf_fup_info {
2126 /* Lookahead to detect a FUP packet after OVF */
2127 static int intel_pt_ovf_fup_lookahead_cb(struct intel_pt_pkt_info *pkt_info)
2129 struct intel_pt_ovf_fup_info *data = pkt_info->data;
2131 if (pkt_info->packet.type == INTEL_PT_CYC ||
2132 pkt_info->packet.type == INTEL_PT_MTC ||
2133 pkt_info->packet.type == INTEL_PT_TSC)
2134 return !--(data->max_lookahead);
2135 data->found = pkt_info->packet.type == INTEL_PT_FUP;
2139 static bool intel_pt_ovf_fup_lookahead(struct intel_pt_decoder *decoder)
2141 struct intel_pt_ovf_fup_info data = {
2142 .max_lookahead = 16,
2146 intel_pt_pkt_lookahead(decoder, intel_pt_ovf_fup_lookahead_cb, &data);
2150 /* Lookahead and get the TMA packet after TSC */
2151 static int intel_pt_tma_lookahead_cb(struct intel_pt_pkt_info *pkt_info)
2153 struct intel_pt_vm_tsc_info *data = pkt_info->data;
2155 if (pkt_info->packet.type == INTEL_PT_CYC ||
2156 pkt_info->packet.type == INTEL_PT_MTC)
2157 return !--(data->max_lookahead);
2159 if (pkt_info->packet.type == INTEL_PT_TMA) {
2160 data->tma_packet = pkt_info->packet;
2166 static uint64_t intel_pt_ctc_to_tsc(struct intel_pt_decoder *decoder, uint64_t ctc)
2168 if (decoder->tsc_ctc_mult)
2169 return ctc * decoder->tsc_ctc_mult;
2171 return multdiv(ctc, decoder->tsc_ctc_ratio_n, decoder->tsc_ctc_ratio_d);
2174 static uint64_t intel_pt_calc_expected_tsc(struct intel_pt_decoder *decoder,
2177 uint64_t last_ctc_timestamp,
2181 /* Number of CTC ticks from last_ctc_timestamp to last_mtc */
2182 uint64_t last_mtc_ctc = last_ctc + ctc_delta;
2184 * Number of CTC ticks from there until current TMA packet. We would
2185 * expect last_mtc_ctc to be before ctc, but the TSC packet can slip
2186 * past an MTC, so a sign-extended value is used.
2188 uint64_t delta = (int16_t)((uint16_t)ctc - (uint16_t)last_mtc_ctc);
2189 /* Total CTC ticks from last_ctc_timestamp to current TMA packet */
2190 uint64_t new_ctc_delta = ctc_delta + delta;
2191 uint64_t expected_tsc;
2194 * Convert CTC ticks to TSC ticks, add the starting point
2195 * (last_ctc_timestamp) and the fast counter from the TMA packet.
2197 expected_tsc = last_ctc_timestamp + intel_pt_ctc_to_tsc(decoder, new_ctc_delta) + fc;
2199 if (intel_pt_enable_logging) {
2200 intel_pt_log_x64(last_mtc_ctc);
2201 intel_pt_log_x32(last_ctc);
2202 intel_pt_log_x64(ctc_delta);
2203 intel_pt_log_x64(delta);
2204 intel_pt_log_x32(ctc);
2205 intel_pt_log_x64(new_ctc_delta);
2206 intel_pt_log_x64(last_ctc_timestamp);
2207 intel_pt_log_x32(fc);
2208 intel_pt_log_x64(intel_pt_ctc_to_tsc(decoder, new_ctc_delta));
2209 intel_pt_log_x64(expected_tsc);
2212 return expected_tsc;
2215 static uint64_t intel_pt_expected_tsc(struct intel_pt_decoder *decoder,
2216 struct intel_pt_vm_tsc_info *data)
2218 uint32_t ctc = data->tma_packet.payload;
2219 uint32_t fc = data->tma_packet.count;
2221 return intel_pt_calc_expected_tsc(decoder, ctc, fc,
2222 decoder->ctc_timestamp,
2223 data->ctc_delta, data->last_ctc);
2226 static void intel_pt_translate_vm_tsc(struct intel_pt_decoder *decoder,
2227 struct intel_pt_vmcs_info *vmcs_info)
2229 uint64_t payload = decoder->packet.payload;
2231 /* VMX adds the TSC Offset, so subtract to get host TSC */
2232 decoder->packet.payload -= vmcs_info->tsc_offset;
2233 /* TSC packet has only 7 bytes */
2234 decoder->packet.payload &= SEVEN_BYTES;
2237 * The buffer is mmapped from the data file, so this also updates the
2240 if (!decoder->vm_tm_corr_dry_run)
2241 memcpy((void *)decoder->buf + 1, &decoder->packet.payload, 7);
2243 intel_pt_log("Translated VM TSC %#" PRIx64 " -> %#" PRIx64
2244 " VMCS %#" PRIx64 " TSC Offset %#" PRIx64 "\n",
2245 payload, decoder->packet.payload, vmcs_info->vmcs,
2246 vmcs_info->tsc_offset);
2249 static void intel_pt_translate_vm_tsc_offset(struct intel_pt_decoder *decoder,
2250 uint64_t tsc_offset)
2252 struct intel_pt_vmcs_info vmcs_info = {
2254 .tsc_offset = tsc_offset
2257 intel_pt_translate_vm_tsc(decoder, &vmcs_info);
2260 static inline bool in_vm(uint64_t pip_payload)
2262 return pip_payload & 1;
2265 static inline bool pip_in_vm(struct intel_pt_pkt *pip_packet)
2267 return pip_packet->payload & 1;
2270 static void intel_pt_print_vmcs_info(struct intel_pt_vmcs_info *vmcs_info)
2272 p_log("VMCS: %#" PRIx64 " TSC Offset %#" PRIx64,
2273 vmcs_info->vmcs, vmcs_info->tsc_offset);
2276 static void intel_pt_vm_tm_corr_psb(struct intel_pt_decoder *decoder,
2277 struct intel_pt_vm_tsc_info *data)
2279 memset(data, 0, sizeof(*data));
2280 data->ctc_delta = decoder->ctc_delta;
2281 data->last_ctc = decoder->last_ctc;
2282 intel_pt_pkt_lookahead(decoder, intel_pt_vm_psb_lookahead_cb, data);
2283 if (data->tsc && !data->psbend)
2284 p_log("ERROR: PSB without PSBEND");
2285 decoder->in_psb = data->psbend;
2288 static void intel_pt_vm_tm_corr_first_tsc(struct intel_pt_decoder *decoder,
2289 struct intel_pt_vm_tsc_info *data,
2290 struct intel_pt_vmcs_info *vmcs_info,
2293 if (!decoder->in_psb) {
2295 p_log("ERROR: First TSC is not in PSB+");
2299 if (pip_in_vm(&data->pip_packet)) { /* Guest */
2300 if (vmcs_info && vmcs_info->tsc_offset) {
2301 intel_pt_translate_vm_tsc(decoder, vmcs_info);
2302 decoder->vm_tm_corr_reliable = true;
2304 p_log("ERROR: First TSC, unknown TSC Offset");
2307 decoder->vm_tm_corr_reliable = true;
2309 } else { /* Host or Guest */
2310 decoder->vm_tm_corr_reliable = false;
2311 if (intel_pt_time_in_range(decoder, host_tsc)) {
2315 if (vmcs_info && vmcs_info->tsc_offset)
2316 intel_pt_translate_vm_tsc(decoder, vmcs_info);
2318 p_log("ERROR: First TSC, no PIP, unknown TSC Offset");
2323 static void intel_pt_vm_tm_corr_tsc(struct intel_pt_decoder *decoder,
2324 struct intel_pt_vm_tsc_info *data)
2326 struct intel_pt_vmcs_info *vmcs_info;
2327 uint64_t tsc_offset = 0;
2329 bool reliable = true;
2330 uint64_t expected_tsc;
2332 uint64_t ref_timestamp;
2334 bool assign = false;
2335 bool assign_reliable = false;
2337 /* Already have 'data' for the in_psb case */
2338 if (!decoder->in_psb) {
2339 memset(data, 0, sizeof(*data));
2340 data->ctc_delta = decoder->ctc_delta;
2341 data->last_ctc = decoder->last_ctc;
2342 data->max_lookahead = 16;
2343 intel_pt_pkt_lookahead(decoder, intel_pt_tma_lookahead_cb, data);
2346 data->pip_packet.payload = decoder->pip_payload;
2350 /* Calculations depend on having TMA packets */
2352 p_log("ERROR: TSC without TMA");
2356 vmcs = data->vmcs ? data->vmcs_packet.payload : decoder->vmcs;
2357 if (vmcs == NO_VMCS)
2360 vmcs_info = decoder->findnew_vmcs_info(decoder->data, vmcs);
2362 ref_timestamp = decoder->timestamp ? decoder->timestamp : decoder->buf_timestamp;
2363 host_tsc = intel_pt_8b_tsc(decoder->packet.payload, ref_timestamp);
2365 if (!decoder->ctc_timestamp) {
2366 intel_pt_vm_tm_corr_first_tsc(decoder, data, vmcs_info, host_tsc);
2370 expected_tsc = intel_pt_expected_tsc(decoder, data);
2372 tsc_offset = host_tsc - expected_tsc;
2374 /* Determine if TSC is from Host or Guest */
2376 if (pip_in_vm(&data->pip_packet)) { /* Guest */
2378 /* PIP NR=1 without VMCS cannot happen */
2379 p_log("ERROR: Missing VMCS");
2380 intel_pt_translate_vm_tsc_offset(decoder, tsc_offset);
2381 decoder->vm_tm_corr_reliable = false;
2385 decoder->last_reliable_timestamp = host_tsc;
2386 decoder->vm_tm_corr_reliable = true;
2389 } else { /* Host or Guest */
2390 reliable = false; /* Host/Guest is a guess, so not reliable */
2391 if (decoder->in_psb) {
2393 return; /* Zero TSC Offset, assume Host */
2395 * TSC packet has only 7 bytes of TSC. We have no
2396 * information about the Guest's 8th byte, but it
2397 * doesn't matter because we only need 7 bytes.
2398 * Here, since the 8th byte is unreliable and
2399 * irrelevant, compare only 7 byes.
2402 (tsc_offset & SEVEN_BYTES) ==
2403 (vmcs_info->tsc_offset & SEVEN_BYTES)) {
2404 /* Same TSC Offset as last VMCS, assume Guest */
2409 * Check if the host_tsc is within the expected range.
2410 * Note, we could narrow the range more by looking ahead for
2411 * the next host TSC in the same buffer, but we don't bother to
2412 * do that because this is probably good enough.
2414 if (host_tsc >= expected_tsc && intel_pt_time_in_range(decoder, host_tsc)) {
2415 /* Within expected range for Host TSC, assume Host */
2416 decoder->vm_tm_corr_reliable = false;
2421 guest: /* Assuming Guest */
2423 /* Determine whether to assign TSC Offset */
2424 if (vmcs_info && vmcs_info->vmcs) {
2425 if (vmcs_info->tsc_offset && vmcs_info->reliable) {
2427 } else if (decoder->in_psb && data->pip && decoder->vm_tm_corr_reliable &&
2428 decoder->vm_tm_corr_continuous && decoder->vm_tm_corr_same_buf) {
2429 /* Continuous tracing, TSC in a PSB is not a time loss */
2431 assign_reliable = true;
2432 } else if (decoder->in_psb && data->pip && decoder->vm_tm_corr_same_buf) {
2434 * Unlikely to be a time loss TSC in a PSB which is not
2435 * at the start of a buffer.
2438 assign_reliable = false;
2442 /* Record VMCS TSC Offset */
2443 if (assign && (vmcs_info->tsc_offset != tsc_offset ||
2444 vmcs_info->reliable != assign_reliable)) {
2445 bool print = vmcs_info->tsc_offset != tsc_offset;
2447 vmcs_info->tsc_offset = tsc_offset;
2448 vmcs_info->reliable = assign_reliable;
2450 intel_pt_print_vmcs_info(vmcs_info);
2453 /* Determine what TSC Offset to use */
2454 if (vmcs_info && vmcs_info->tsc_offset) {
2455 if (!vmcs_info->reliable)
2457 intel_pt_translate_vm_tsc(decoder, vmcs_info);
2461 if (!vmcs_info->error_printed) {
2462 p_log("ERROR: Unknown TSC Offset for VMCS %#" PRIx64,
2464 vmcs_info->error_printed = true;
2467 if (intel_pt_print_once(decoder, INTEL_PT_PRT_ONCE_UNK_VMCS))
2468 p_log("ERROR: Unknown VMCS");
2470 intel_pt_translate_vm_tsc_offset(decoder, tsc_offset);
2473 decoder->vm_tm_corr_reliable = reliable;
2476 static void intel_pt_vm_tm_corr_pebs_tsc(struct intel_pt_decoder *decoder)
2478 uint64_t host_tsc = decoder->packet.payload;
2479 uint64_t guest_tsc = decoder->packet.payload;
2480 struct intel_pt_vmcs_info *vmcs_info;
2483 vmcs = decoder->vmcs;
2484 if (vmcs == NO_VMCS)
2487 vmcs_info = decoder->findnew_vmcs_info(decoder->data, vmcs);
2490 if (in_vm(decoder->pip_payload)) { /* Guest */
2492 /* PIP NR=1 without VMCS cannot happen */
2493 p_log("ERROR: Missing VMCS");
2498 } else { /* Host or Guest */
2499 if (intel_pt_time_in_range(decoder, host_tsc)) {
2500 /* Within expected range for Host TSC, assume Host */
2506 /* Translate Guest TSC to Host TSC */
2507 host_tsc = ((guest_tsc & SEVEN_BYTES) - vmcs_info->tsc_offset) & SEVEN_BYTES;
2508 host_tsc = intel_pt_8b_tsc(host_tsc, decoder->timestamp);
2509 intel_pt_log("Translated VM TSC %#" PRIx64 " -> %#" PRIx64
2510 " VMCS %#" PRIx64 " TSC Offset %#" PRIx64 "\n",
2511 guest_tsc, host_tsc, vmcs_info->vmcs,
2512 vmcs_info->tsc_offset);
2513 if (!intel_pt_time_in_range(decoder, host_tsc) &&
2514 intel_pt_print_once(decoder, INTEL_PT_PRT_ONCE_ERANGE))
2515 p_log("Timestamp out of range");
2517 if (intel_pt_print_once(decoder, INTEL_PT_PRT_ONCE_UNK_VMCS))
2518 p_log("ERROR: Unknown VMCS");
2519 host_tsc = decoder->timestamp;
2522 decoder->packet.payload = host_tsc;
2524 if (!decoder->vm_tm_corr_dry_run)
2525 memcpy((void *)decoder->buf + 1, &host_tsc, 8);
2528 static int intel_pt_vm_time_correlation(struct intel_pt_decoder *decoder)
2530 struct intel_pt_vm_tsc_info data = { .psbend = false };
2534 if (decoder->in_psb)
2535 intel_pt_vm_tm_corr_psb(decoder, &data);
2538 err = intel_pt_get_next_packet(decoder);
2539 if (err == -ENOLINK)
2544 switch (decoder->packet.type) {
2545 case INTEL_PT_TIP_PGD:
2546 decoder->pge = false;
2547 decoder->vm_tm_corr_continuous = false;
2552 case INTEL_PT_TIP_PGE:
2553 decoder->pge = true;
2557 decoder->in_psb = false;
2559 decoder->pge = intel_pt_ovf_fup_lookahead(decoder);
2560 if (pge != decoder->pge)
2561 intel_pt_log("Surprising PGE change in OVF!");
2563 decoder->vm_tm_corr_continuous = false;
2567 if (decoder->in_psb)
2568 decoder->pge = true;
2571 case INTEL_PT_TRACESTOP:
2572 decoder->pge = false;
2573 decoder->vm_tm_corr_continuous = false;
2574 decoder->have_tma = false;
2578 intel_pt_vm_tm_corr_psb(decoder, &data);
2582 decoder->pip_payload = decoder->packet.payload;
2586 intel_pt_calc_mtc_timestamp(decoder);
2590 intel_pt_vm_tm_corr_tsc(decoder, &data);
2591 intel_pt_calc_tsc_timestamp(decoder);
2592 decoder->vm_tm_corr_same_buf = true;
2593 decoder->vm_tm_corr_continuous = decoder->pge;
2597 intel_pt_calc_tma(decoder);
2601 intel_pt_calc_cyc_timestamp(decoder);
2605 intel_pt_calc_cbr(decoder);
2608 case INTEL_PT_PSBEND:
2609 decoder->in_psb = false;
2610 data.psbend = false;
2614 if (decoder->packet.payload != NO_VMCS)
2615 decoder->vmcs = decoder->packet.payload;
2619 decoder->blk_type = decoder->packet.payload;
2623 if (decoder->blk_type == INTEL_PT_PEBS_BASIC &&
2624 decoder->packet.count == 2)
2625 intel_pt_vm_tm_corr_pebs_tsc(decoder);
2629 case INTEL_PT_BEP_IP:
2630 decoder->blk_type = 0;
2633 case INTEL_PT_MODE_EXEC:
2634 case INTEL_PT_MODE_TSX:
2637 case INTEL_PT_PTWRITE_IP:
2638 case INTEL_PT_PTWRITE:
2639 case INTEL_PT_MWAIT:
2641 case INTEL_PT_EXSTOP_IP:
2642 case INTEL_PT_EXSTOP:
2644 case INTEL_PT_BAD: /* Does not happen */
2653 #define HOP_PROCESS 0
2654 #define HOP_IGNORE 1
2655 #define HOP_RETURN 2
2658 static int intel_pt_scan_for_psb(struct intel_pt_decoder *decoder);
2660 /* Hop mode: Ignore TNT, do not walk code, but get ip from FUPs and TIPs */
2661 static int intel_pt_hop_trace(struct intel_pt_decoder *decoder, bool *no_tip, int *err)
2663 /* Leap from PSB to PSB, getting ip from FUP within PSB+ */
2664 if (decoder->leap && !decoder->in_psb && decoder->packet.type != INTEL_PT_PSB) {
2665 *err = intel_pt_scan_for_psb(decoder);
2670 switch (decoder->packet.type) {
2674 case INTEL_PT_TIP_PGD:
2675 if (!decoder->packet.count) {
2676 intel_pt_set_nr(decoder);
2679 intel_pt_set_ip(decoder);
2680 decoder->state.type |= INTEL_PT_TRACE_END;
2681 decoder->state.from_ip = 0;
2682 decoder->state.to_ip = decoder->ip;
2683 intel_pt_update_nr(decoder);
2687 if (!decoder->packet.count) {
2688 intel_pt_set_nr(decoder);
2691 intel_pt_set_ip(decoder);
2692 decoder->state.type = INTEL_PT_INSTRUCTION;
2693 decoder->state.from_ip = decoder->ip;
2694 decoder->state.to_ip = 0;
2695 intel_pt_update_nr(decoder);
2699 if (!decoder->packet.count)
2701 intel_pt_set_ip(decoder);
2702 if (intel_pt_fup_event(decoder))
2704 if (!decoder->branch_enable)
2707 decoder->state.type = INTEL_PT_INSTRUCTION;
2708 decoder->state.from_ip = decoder->ip;
2709 decoder->state.to_ip = 0;
2712 *err = intel_pt_walk_fup_tip(decoder);
2714 decoder->pkt_state = INTEL_PT_STATE_RESAMPLE;
2718 decoder->state.psb_offset = decoder->pos;
2719 decoder->psb_ip = 0;
2720 decoder->last_ip = 0;
2721 decoder->have_last_ip = true;
2722 *err = intel_pt_walk_psbend(decoder);
2723 if (*err == -EAGAIN)
2727 decoder->state.type = INTEL_PT_PSB_EVT;
2728 if (decoder->psb_ip) {
2729 decoder->state.type |= INTEL_PT_INSTRUCTION;
2730 decoder->ip = decoder->psb_ip;
2732 decoder->state.from_ip = decoder->psb_ip;
2733 decoder->state.to_ip = 0;
2738 case INTEL_PT_TIP_PGE:
2741 case INTEL_PT_MODE_EXEC:
2742 case INTEL_PT_MODE_TSX:
2746 case INTEL_PT_PSBEND:
2748 case INTEL_PT_TRACESTOP:
2752 case INTEL_PT_PTWRITE:
2753 case INTEL_PT_PTWRITE_IP:
2754 case INTEL_PT_EXSTOP:
2755 case INTEL_PT_EXSTOP_IP:
2756 case INTEL_PT_MWAIT:
2762 case INTEL_PT_BEP_IP:
2768 struct intel_pt_psb_info {
2769 struct intel_pt_pkt fup_packet;
2774 /* Lookahead and get the FUP packet from PSB+ */
2775 static int intel_pt_psb_lookahead_cb(struct intel_pt_pkt_info *pkt_info)
2777 struct intel_pt_psb_info *data = pkt_info->data;
2779 switch (pkt_info->packet.type) {
2784 case INTEL_PT_MODE_EXEC:
2785 case INTEL_PT_MODE_TSX:
2791 if (data->after_psbend) {
2792 data->after_psbend -= 1;
2793 if (!data->after_psbend)
2799 if (data->after_psbend)
2801 if (data->fup || pkt_info->packet.count == 0)
2803 data->fup_packet = pkt_info->packet;
2807 case INTEL_PT_PSBEND:
2810 /* Keep going to check for a TIP.PGE */
2811 data->after_psbend = 6;
2814 case INTEL_PT_TIP_PGE:
2815 /* Ignore FUP in PSB+ if followed by TIP.PGE */
2816 if (data->after_psbend)
2820 case INTEL_PT_PTWRITE:
2821 case INTEL_PT_PTWRITE_IP:
2822 case INTEL_PT_EXSTOP:
2823 case INTEL_PT_EXSTOP_IP:
2824 case INTEL_PT_MWAIT:
2830 case INTEL_PT_BEP_IP:
2831 if (data->after_psbend) {
2832 data->after_psbend -= 1;
2833 if (!data->after_psbend)
2842 case INTEL_PT_TIP_PGD:
2845 case INTEL_PT_TRACESTOP:
2853 static int intel_pt_psb(struct intel_pt_decoder *decoder)
2857 decoder->last_ip = 0;
2858 decoder->psb_ip = 0;
2859 decoder->have_last_ip = true;
2860 intel_pt_clear_stack(&decoder->stack);
2861 err = intel_pt_walk_psbend(decoder);
2864 decoder->state.type = INTEL_PT_PSB_EVT;
2865 decoder->state.from_ip = decoder->psb_ip;
2866 decoder->state.to_ip = 0;
2870 static int intel_pt_fup_in_psb(struct intel_pt_decoder *decoder)
2874 if (decoder->ip != decoder->last_ip) {
2875 err = intel_pt_walk_fup(decoder);
2876 if (!err || err != -EAGAIN)
2880 decoder->pkt_state = INTEL_PT_STATE_IN_SYNC;
2881 err = intel_pt_psb(decoder);
2883 decoder->pkt_state = INTEL_PT_STATE_ERR3;
2890 static bool intel_pt_psb_with_fup(struct intel_pt_decoder *decoder, int *err)
2892 struct intel_pt_psb_info data = { .fup = false };
2894 if (!decoder->branch_enable || !decoder->pge)
2897 intel_pt_pkt_lookahead(decoder, intel_pt_psb_lookahead_cb, &data);
2901 decoder->packet = data.fup_packet;
2902 intel_pt_set_last_ip(decoder);
2903 decoder->pkt_state = INTEL_PT_STATE_FUP_IN_PSB;
2905 *err = intel_pt_fup_in_psb(decoder);
2910 static int intel_pt_walk_trace(struct intel_pt_decoder *decoder)
2912 int last_packet_type = INTEL_PT_PAD;
2913 bool no_tip = false;
2917 err = intel_pt_get_next_packet(decoder);
2921 if (decoder->cyc_threshold) {
2922 if (decoder->sample_cyc && last_packet_type != INTEL_PT_CYC)
2923 decoder->sample_cyc = false;
2924 last_packet_type = decoder->packet.type;
2928 switch (intel_pt_hop_trace(decoder, &no_tip, &err)) {
2940 switch (decoder->packet.type) {
2942 if (!decoder->packet.count)
2944 decoder->tnt = decoder->packet;
2945 decoder->pkt_state = INTEL_PT_STATE_TNT;
2946 err = intel_pt_walk_tnt(decoder);
2951 case INTEL_PT_TIP_PGD:
2952 if (decoder->packet.count != 0)
2953 intel_pt_set_last_ip(decoder);
2954 decoder->pkt_state = INTEL_PT_STATE_TIP_PGD;
2955 return intel_pt_walk_tip(decoder);
2957 case INTEL_PT_TIP_PGE: {
2958 decoder->pge = true;
2959 intel_pt_mtc_cyc_cnt_pge(decoder);
2960 intel_pt_set_nr(decoder);
2961 if (decoder->packet.count == 0) {
2962 intel_pt_log_at("Skipping zero TIP.PGE",
2966 intel_pt_set_ip(decoder);
2967 decoder->state.from_ip = 0;
2968 decoder->state.to_ip = decoder->ip;
2969 decoder->state.type |= INTEL_PT_TRACE_BEGIN;
2971 * In hop mode, resample to get the to_ip as an
2972 * "instruction" sample.
2975 decoder->pkt_state = INTEL_PT_STATE_RESAMPLE;
2980 return intel_pt_overflow(decoder);
2983 if (decoder->packet.count != 0)
2984 intel_pt_set_last_ip(decoder);
2985 decoder->pkt_state = INTEL_PT_STATE_TIP;
2986 return intel_pt_walk_tip(decoder);
2989 if (decoder->packet.count == 0) {
2990 intel_pt_log_at("Skipping zero FUP",
2995 intel_pt_set_last_ip(decoder);
2996 if (!decoder->branch_enable) {
2997 decoder->ip = decoder->last_ip;
2998 if (intel_pt_fup_event(decoder))
3003 if (decoder->set_fup_mwait)
3006 decoder->pkt_state = INTEL_PT_STATE_FUP_NO_TIP;
3008 decoder->pkt_state = INTEL_PT_STATE_FUP;
3009 err = intel_pt_walk_fup(decoder);
3016 return intel_pt_walk_fup_tip(decoder);
3018 case INTEL_PT_TRACESTOP:
3019 decoder->pge = false;
3020 decoder->continuous_period = false;
3021 intel_pt_clear_tx_flags(decoder);
3022 decoder->have_tma = false;
3026 decoder->state.psb_offset = decoder->pos;
3027 decoder->psb_ip = 0;
3028 if (intel_pt_psb_with_fup(decoder, &err))
3030 err = intel_pt_psb(decoder);
3036 intel_pt_update_pip(decoder);
3040 intel_pt_calc_mtc_timestamp(decoder);
3041 if (decoder->period_type != INTEL_PT_PERIOD_MTC)
3044 * Ensure that there has been an instruction since the
3047 if (!decoder->mtc_insn)
3049 decoder->mtc_insn = false;
3050 /* Ensure that there is a timestamp */
3051 if (!decoder->timestamp)
3053 decoder->state.type = INTEL_PT_INSTRUCTION;
3054 decoder->state.from_ip = decoder->ip;
3055 decoder->state.to_ip = 0;
3056 decoder->mtc_insn = false;
3060 intel_pt_calc_tsc_timestamp(decoder);
3064 intel_pt_calc_tma(decoder);
3068 intel_pt_calc_cyc_timestamp(decoder);
3072 intel_pt_calc_cbr(decoder);
3073 if (decoder->cbr != decoder->cbr_seen) {
3074 decoder->state.type = 0;
3079 case INTEL_PT_MODE_EXEC:
3080 decoder->exec_mode = decoder->packet.payload;
3083 case INTEL_PT_MODE_TSX:
3084 /* MODE_TSX need not be followed by FUP */
3085 if (!decoder->pge || decoder->in_psb) {
3086 intel_pt_update_in_tx(decoder);
3089 err = intel_pt_mode_tsx(decoder, &no_tip);
3094 case INTEL_PT_BAD: /* Does not happen */
3095 return intel_pt_bug(decoder);
3097 case INTEL_PT_PSBEND:
3103 case INTEL_PT_PTWRITE_IP:
3104 decoder->fup_ptw_payload = decoder->packet.payload;
3105 err = intel_pt_get_next_packet(decoder);
3108 if (decoder->packet.type == INTEL_PT_FUP) {
3109 decoder->set_fup_ptw = true;
3112 intel_pt_log_at("ERROR: Missing FUP after PTWRITE",
3117 case INTEL_PT_PTWRITE:
3118 decoder->state.type = INTEL_PT_PTW;
3119 decoder->state.from_ip = decoder->ip;
3120 decoder->state.to_ip = 0;
3121 decoder->state.ptw_payload = decoder->packet.payload;
3124 case INTEL_PT_MWAIT:
3125 decoder->fup_mwait_payload = decoder->packet.payload;
3126 decoder->set_fup_mwait = true;
3130 if (decoder->set_fup_mwait) {
3131 decoder->fup_pwre_payload =
3132 decoder->packet.payload;
3133 decoder->set_fup_pwre = true;
3136 decoder->state.type = INTEL_PT_PWR_ENTRY;
3137 decoder->state.from_ip = decoder->ip;
3138 decoder->state.to_ip = 0;
3139 decoder->state.pwrx_payload = decoder->packet.payload;
3142 case INTEL_PT_EXSTOP_IP:
3143 err = intel_pt_get_next_packet(decoder);
3146 if (decoder->packet.type == INTEL_PT_FUP) {
3147 decoder->set_fup_exstop = true;
3150 intel_pt_log_at("ERROR: Missing FUP after EXSTOP",
3155 case INTEL_PT_EXSTOP:
3156 decoder->state.type = INTEL_PT_EX_STOP;
3157 decoder->state.from_ip = decoder->ip;
3158 decoder->state.to_ip = 0;
3162 decoder->state.type = INTEL_PT_PWR_EXIT;
3163 decoder->state.from_ip = decoder->ip;
3164 decoder->state.to_ip = 0;
3165 decoder->state.pwrx_payload = decoder->packet.payload;
3169 intel_pt_bbp(decoder);
3173 intel_pt_bip(decoder);
3177 decoder->state.type = INTEL_PT_BLK_ITEMS;
3178 decoder->state.from_ip = decoder->ip;
3179 decoder->state.to_ip = 0;
3182 case INTEL_PT_BEP_IP:
3183 err = intel_pt_get_next_packet(decoder);
3186 if (decoder->packet.type == INTEL_PT_FUP) {
3187 decoder->set_fup_bep = true;
3190 intel_pt_log_at("ERROR: Missing FUP after BEP",
3196 return intel_pt_bug(decoder);
3201 static inline bool intel_pt_have_ip(struct intel_pt_decoder *decoder)
3203 return decoder->packet.count &&
3204 (decoder->have_last_ip || decoder->packet.count == 3 ||
3205 decoder->packet.count == 6);
3208 /* Walk PSB+ packets to get in sync. */
3209 static int intel_pt_walk_psb(struct intel_pt_decoder *decoder)
3213 decoder->in_psb = true;
3216 err = intel_pt_get_next_packet(decoder);
3220 switch (decoder->packet.type) {
3221 case INTEL_PT_TIP_PGD:
3222 decoder->continuous_period = false;
3224 case INTEL_PT_TIP_PGE:
3226 case INTEL_PT_PTWRITE:
3227 case INTEL_PT_PTWRITE_IP:
3228 case INTEL_PT_EXSTOP:
3229 case INTEL_PT_EXSTOP_IP:
3230 case INTEL_PT_MWAIT:
3236 case INTEL_PT_BEP_IP:
3237 intel_pt_log("ERROR: Unexpected packet\n");
3242 decoder->pge = true;
3243 if (intel_pt_have_ip(decoder)) {
3244 uint64_t current_ip = decoder->ip;
3246 intel_pt_set_ip(decoder);
3247 decoder->psb_ip = decoder->ip;
3249 intel_pt_log_to("Setting IP",
3255 intel_pt_calc_mtc_timestamp(decoder);
3259 intel_pt_calc_tsc_timestamp(decoder);
3263 intel_pt_calc_tma(decoder);
3267 intel_pt_calc_cyc_timestamp(decoder);
3271 intel_pt_calc_cbr(decoder);
3275 intel_pt_set_pip(decoder);
3278 case INTEL_PT_MODE_EXEC:
3279 decoder->exec_mode = decoder->packet.payload;
3282 case INTEL_PT_MODE_TSX:
3283 intel_pt_update_in_tx(decoder);
3286 case INTEL_PT_TRACESTOP:
3287 decoder->pge = false;
3288 decoder->continuous_period = false;
3289 intel_pt_clear_tx_flags(decoder);
3293 decoder->have_tma = false;
3294 intel_pt_log("ERROR: Unexpected packet\n");
3296 decoder->pkt_state = INTEL_PT_STATE_ERR4;
3298 decoder->pkt_state = INTEL_PT_STATE_ERR3;
3302 case INTEL_PT_BAD: /* Does not happen */
3303 err = intel_pt_bug(decoder);
3307 err = intel_pt_overflow(decoder);
3310 case INTEL_PT_PSBEND:
3323 decoder->in_psb = false;
3328 static int intel_pt_walk_to_ip(struct intel_pt_decoder *decoder)
3333 err = intel_pt_get_next_packet(decoder);
3337 switch (decoder->packet.type) {
3338 case INTEL_PT_TIP_PGD:
3339 decoder->continuous_period = false;
3340 decoder->pge = false;
3341 if (intel_pt_have_ip(decoder))
3342 intel_pt_set_ip(decoder);
3345 decoder->state.type |= INTEL_PT_TRACE_END;
3348 case INTEL_PT_TIP_PGE:
3349 decoder->pge = true;
3350 intel_pt_mtc_cyc_cnt_pge(decoder);
3351 if (intel_pt_have_ip(decoder))
3352 intel_pt_set_ip(decoder);
3355 decoder->state.type |= INTEL_PT_TRACE_BEGIN;
3359 decoder->pge = true;
3360 if (intel_pt_have_ip(decoder))
3361 intel_pt_set_ip(decoder);
3367 if (intel_pt_have_ip(decoder))
3368 intel_pt_set_ip(decoder);
3374 intel_pt_calc_mtc_timestamp(decoder);
3378 intel_pt_calc_tsc_timestamp(decoder);
3382 intel_pt_calc_tma(decoder);
3386 intel_pt_calc_cyc_timestamp(decoder);
3390 intel_pt_calc_cbr(decoder);
3394 intel_pt_set_pip(decoder);
3397 case INTEL_PT_MODE_EXEC:
3398 decoder->exec_mode = decoder->packet.payload;
3401 case INTEL_PT_MODE_TSX:
3402 intel_pt_update_in_tx(decoder);
3406 return intel_pt_overflow(decoder);
3408 case INTEL_PT_BAD: /* Does not happen */
3409 return intel_pt_bug(decoder);
3411 case INTEL_PT_TRACESTOP:
3412 decoder->pge = false;
3413 decoder->continuous_period = false;
3414 intel_pt_clear_tx_flags(decoder);
3415 decoder->have_tma = false;
3419 decoder->state.psb_offset = decoder->pos;
3420 decoder->psb_ip = 0;
3421 decoder->last_ip = 0;
3422 decoder->have_last_ip = true;
3423 intel_pt_clear_stack(&decoder->stack);
3424 err = intel_pt_walk_psb(decoder);
3427 decoder->state.type = INTEL_PT_PSB_EVT;
3428 decoder->state.from_ip = decoder->psb_ip;
3429 decoder->state.to_ip = 0;
3433 case INTEL_PT_PSBEND:
3437 case INTEL_PT_PTWRITE:
3438 case INTEL_PT_PTWRITE_IP:
3439 case INTEL_PT_EXSTOP:
3440 case INTEL_PT_EXSTOP_IP:
3441 case INTEL_PT_MWAIT:
3447 case INTEL_PT_BEP_IP:
3454 static int intel_pt_sync_ip(struct intel_pt_decoder *decoder)
3458 decoder->set_fup_tx_flags = false;
3459 decoder->set_fup_ptw = false;
3460 decoder->set_fup_mwait = false;
3461 decoder->set_fup_pwre = false;
3462 decoder->set_fup_exstop = false;
3463 decoder->set_fup_bep = false;
3465 if (!decoder->branch_enable) {
3466 decoder->pkt_state = INTEL_PT_STATE_IN_SYNC;
3467 decoder->overflow = false;
3468 decoder->state.type = 0; /* Do not have a sample */
3472 intel_pt_log("Scanning for full IP\n");
3473 err = intel_pt_walk_to_ip(decoder);
3474 if (err || ((decoder->state.type & INTEL_PT_PSB_EVT) && !decoder->ip))
3477 /* In hop mode, resample to get the to_ip as an "instruction" sample */
3479 decoder->pkt_state = INTEL_PT_STATE_RESAMPLE;
3481 decoder->pkt_state = INTEL_PT_STATE_IN_SYNC;
3482 decoder->overflow = false;
3484 decoder->state.from_ip = 0;
3485 decoder->state.to_ip = decoder->ip;
3486 intel_pt_log_to("Setting IP", decoder->ip);
3491 static int intel_pt_part_psb(struct intel_pt_decoder *decoder)
3493 const unsigned char *end = decoder->buf + decoder->len;
3496 for (i = INTEL_PT_PSB_LEN - 1; i; i--) {
3497 if (i > decoder->len)
3499 if (!memcmp(end - i, INTEL_PT_PSB_STR, i))
3505 static int intel_pt_rest_psb(struct intel_pt_decoder *decoder, int part_psb)
3507 size_t rest_psb = INTEL_PT_PSB_LEN - part_psb;
3508 const char *psb = INTEL_PT_PSB_STR;
3510 if (rest_psb > decoder->len ||
3511 memcmp(decoder->buf, psb + part_psb, rest_psb))
3517 static int intel_pt_get_split_psb(struct intel_pt_decoder *decoder,
3522 decoder->pos += decoder->len;
3525 ret = intel_pt_get_next_data(decoder, false);
3529 rest_psb = intel_pt_rest_psb(decoder, part_psb);
3533 decoder->pos -= part_psb;
3534 decoder->next_buf = decoder->buf + rest_psb;
3535 decoder->next_len = decoder->len - rest_psb;
3536 memcpy(decoder->temp_buf, INTEL_PT_PSB_STR, INTEL_PT_PSB_LEN);
3537 decoder->buf = decoder->temp_buf;
3538 decoder->len = INTEL_PT_PSB_LEN;
3543 static int intel_pt_scan_for_psb(struct intel_pt_decoder *decoder)
3545 unsigned char *next;
3548 intel_pt_log("Scanning for PSB\n");
3550 if (!decoder->len) {
3551 ret = intel_pt_get_next_data(decoder, false);
3556 next = memmem(decoder->buf, decoder->len, INTEL_PT_PSB_STR,
3561 part_psb = intel_pt_part_psb(decoder);
3563 ret = intel_pt_get_split_psb(decoder, part_psb);
3567 decoder->pos += decoder->len;
3573 decoder->pkt_step = next - decoder->buf;
3574 return intel_pt_get_next_packet(decoder);
3578 static int intel_pt_sync(struct intel_pt_decoder *decoder)
3582 decoder->pge = false;
3583 decoder->continuous_period = false;
3584 decoder->have_last_ip = false;
3585 decoder->last_ip = 0;
3586 decoder->psb_ip = 0;
3588 intel_pt_clear_stack(&decoder->stack);
3590 err = intel_pt_scan_for_psb(decoder);
3594 if (decoder->vm_time_correlation) {
3595 decoder->in_psb = true;
3596 if (!decoder->timestamp)
3597 decoder->timestamp = 1;
3598 decoder->state.type = 0;
3599 decoder->pkt_state = INTEL_PT_STATE_VM_TIME_CORRELATION;
3603 decoder->have_last_ip = true;
3604 decoder->pkt_state = INTEL_PT_STATE_NO_IP;
3606 err = intel_pt_walk_psb(decoder);
3610 decoder->state.type = INTEL_PT_PSB_EVT; /* Only PSB sample */
3611 decoder->state.from_ip = decoder->psb_ip;
3612 decoder->state.to_ip = 0;
3616 * In hop mode, resample to get the PSB FUP ip as an
3617 * "instruction" sample.
3620 decoder->pkt_state = INTEL_PT_STATE_RESAMPLE;
3622 decoder->pkt_state = INTEL_PT_STATE_IN_SYNC;
3628 static uint64_t intel_pt_est_timestamp(struct intel_pt_decoder *decoder)
3630 uint64_t est = decoder->sample_insn_cnt << 1;
3632 if (!decoder->cbr || !decoder->max_non_turbo_ratio)
3635 est *= decoder->max_non_turbo_ratio;
3636 est /= decoder->cbr;
3638 return decoder->sample_timestamp + est;
3641 const struct intel_pt_state *intel_pt_decode(struct intel_pt_decoder *decoder)
3646 decoder->state.type = INTEL_PT_BRANCH;
3647 decoder->state.flags = 0;
3649 switch (decoder->pkt_state) {
3650 case INTEL_PT_STATE_NO_PSB:
3651 err = intel_pt_sync(decoder);
3653 case INTEL_PT_STATE_NO_IP:
3654 decoder->have_last_ip = false;
3655 decoder->last_ip = 0;
3658 case INTEL_PT_STATE_ERR_RESYNC:
3659 err = intel_pt_sync_ip(decoder);
3661 case INTEL_PT_STATE_IN_SYNC:
3662 err = intel_pt_walk_trace(decoder);
3664 case INTEL_PT_STATE_TNT:
3665 case INTEL_PT_STATE_TNT_CONT:
3666 err = intel_pt_walk_tnt(decoder);
3668 err = intel_pt_walk_trace(decoder);
3670 case INTEL_PT_STATE_TIP:
3671 case INTEL_PT_STATE_TIP_PGD:
3672 err = intel_pt_walk_tip(decoder);
3674 case INTEL_PT_STATE_FUP:
3675 err = intel_pt_walk_fup(decoder);
3677 err = intel_pt_walk_fup_tip(decoder);
3679 case INTEL_PT_STATE_FUP_NO_TIP:
3680 err = intel_pt_walk_fup(decoder);
3682 err = intel_pt_walk_trace(decoder);
3684 case INTEL_PT_STATE_FUP_IN_PSB:
3685 err = intel_pt_fup_in_psb(decoder);
3687 case INTEL_PT_STATE_RESAMPLE:
3688 err = intel_pt_resample(decoder);
3690 case INTEL_PT_STATE_VM_TIME_CORRELATION:
3691 err = intel_pt_vm_time_correlation(decoder);
3694 err = intel_pt_bug(decoder);
3697 } while (err == -ENOLINK);
3700 decoder->state.err = intel_pt_ext_err(err);
3701 decoder->state.from_ip = decoder->ip;
3702 intel_pt_update_sample_time(decoder);
3703 decoder->sample_tot_cyc_cnt = decoder->tot_cyc_cnt;
3704 intel_pt_set_nr(decoder);
3706 decoder->state.err = 0;
3707 if (decoder->cbr != decoder->cbr_seen) {
3708 decoder->cbr_seen = decoder->cbr;
3709 if (!decoder->state.type) {
3710 decoder->state.from_ip = decoder->ip;
3711 decoder->state.to_ip = 0;
3713 decoder->state.type |= INTEL_PT_CBR_CHG;
3714 decoder->state.cbr_payload = decoder->cbr_payload;
3715 decoder->state.cbr = decoder->cbr;
3717 if (intel_pt_sample_time(decoder->pkt_state)) {
3718 intel_pt_update_sample_time(decoder);
3719 if (decoder->sample_cyc) {
3720 decoder->sample_tot_cyc_cnt = decoder->tot_cyc_cnt;
3721 decoder->state.flags |= INTEL_PT_SAMPLE_IPC;
3722 decoder->sample_cyc = false;
3726 * When using only TSC/MTC to compute cycles, IPC can be
3727 * sampled as soon as the cycle count changes.
3729 if (!decoder->have_cyc)
3730 decoder->state.flags |= INTEL_PT_SAMPLE_IPC;
3733 /* Let PSB event always have TSC timestamp */
3734 if ((decoder->state.type & INTEL_PT_PSB_EVT) && decoder->tsc_timestamp)
3735 decoder->sample_timestamp = decoder->tsc_timestamp;
3737 decoder->state.from_nr = decoder->nr;
3738 decoder->state.to_nr = decoder->next_nr;
3739 decoder->nr = decoder->next_nr;
3741 decoder->state.timestamp = decoder->sample_timestamp;
3742 decoder->state.est_timestamp = intel_pt_est_timestamp(decoder);
3743 decoder->state.tot_insn_cnt = decoder->tot_insn_cnt;
3744 decoder->state.tot_cyc_cnt = decoder->sample_tot_cyc_cnt;
3746 return &decoder->state;
3750 * intel_pt_next_psb - move buffer pointer to the start of the next PSB packet.
3751 * @buf: pointer to buffer pointer
3752 * @len: size of buffer
3754 * Updates the buffer pointer to point to the start of the next PSB packet if
3755 * there is one, otherwise the buffer pointer is unchanged. If @buf is updated,
3756 * @len is adjusted accordingly.
3758 * Return: %true if a PSB packet is found, %false otherwise.
3760 static bool intel_pt_next_psb(unsigned char **buf, size_t *len)
3762 unsigned char *next;
3764 next = memmem(*buf, *len, INTEL_PT_PSB_STR, INTEL_PT_PSB_LEN);
3766 *len -= next - *buf;
3774 * intel_pt_step_psb - move buffer pointer to the start of the following PSB
3776 * @buf: pointer to buffer pointer
3777 * @len: size of buffer
3779 * Updates the buffer pointer to point to the start of the following PSB packet
3780 * (skipping the PSB at @buf itself) if there is one, otherwise the buffer
3781 * pointer is unchanged. If @buf is updated, @len is adjusted accordingly.
3783 * Return: %true if a PSB packet is found, %false otherwise.
3785 static bool intel_pt_step_psb(unsigned char **buf, size_t *len)
3787 unsigned char *next;
3792 next = memmem(*buf + 1, *len - 1, INTEL_PT_PSB_STR, INTEL_PT_PSB_LEN);
3794 *len -= next - *buf;
3802 * intel_pt_last_psb - find the last PSB packet in a buffer.
3804 * @len: size of buffer
3806 * This function finds the last PSB in a buffer.
3808 * Return: A pointer to the last PSB in @buf if found, %NULL otherwise.
3810 static unsigned char *intel_pt_last_psb(unsigned char *buf, size_t len)
3812 const char *n = INTEL_PT_PSB_STR;
3816 if (len < INTEL_PT_PSB_LEN)
3819 k = len - INTEL_PT_PSB_LEN + 1;
3821 p = memrchr(buf, n[0], k);
3824 if (!memcmp(p + 1, n + 1, INTEL_PT_PSB_LEN - 1))
3833 * intel_pt_next_tsc - find and return next TSC.
3835 * @len: size of buffer
3836 * @tsc: TSC value returned
3837 * @rem: returns remaining size when TSC is found
3839 * Find a TSC packet in @buf and return the TSC value. This function assumes
3840 * that @buf starts at a PSB and that PSB+ will contain TSC and so stops if a
3841 * PSBEND packet is found.
3843 * Return: %true if TSC is found, false otherwise.
3845 static bool intel_pt_next_tsc(unsigned char *buf, size_t len, uint64_t *tsc,
3848 enum intel_pt_pkt_ctx ctx = INTEL_PT_NO_CTX;
3849 struct intel_pt_pkt packet;
3853 ret = intel_pt_get_packet(buf, len, &packet, &ctx);
3856 if (packet.type == INTEL_PT_TSC) {
3857 *tsc = packet.payload;
3861 if (packet.type == INTEL_PT_PSBEND)
3870 * intel_pt_tsc_cmp - compare 7-byte TSCs.
3871 * @tsc1: first TSC to compare
3872 * @tsc2: second TSC to compare
3874 * This function compares 7-byte TSC values allowing for the possibility that
3875 * TSC wrapped around. Generally it is not possible to know if TSC has wrapped
3876 * around so for that purpose this function assumes the absolute difference is
3877 * less than half the maximum difference.
3879 * Return: %-1 if @tsc1 is before @tsc2, %0 if @tsc1 == @tsc2, %1 if @tsc1 is
3882 static int intel_pt_tsc_cmp(uint64_t tsc1, uint64_t tsc2)
3884 const uint64_t halfway = (1ULL << 55);
3890 if (tsc2 - tsc1 < halfway)
3895 if (tsc1 - tsc2 < halfway)
3902 #define MAX_PADDING (PERF_AUXTRACE_RECORD_ALIGNMENT - 1)
3905 * adj_for_padding - adjust overlap to account for padding.
3906 * @buf_b: second buffer
3907 * @buf_a: first buffer
3908 * @len_a: size of first buffer
3910 * @buf_a might have up to 7 bytes of padding appended. Adjust the overlap
3913 * Return: A pointer into @buf_b from where non-overlapped data starts
3915 static unsigned char *adj_for_padding(unsigned char *buf_b,
3916 unsigned char *buf_a, size_t len_a)
3918 unsigned char *p = buf_b - MAX_PADDING;
3919 unsigned char *q = buf_a + len_a - MAX_PADDING;
3922 for (i = MAX_PADDING; i; i--, p++, q++) {
3931 * intel_pt_find_overlap_tsc - determine start of non-overlapped trace data
3933 * @buf_a: first buffer
3934 * @len_a: size of first buffer
3935 * @buf_b: second buffer
3936 * @len_b: size of second buffer
3937 * @consecutive: returns true if there is data in buf_b that is consecutive
3939 * @ooo_tsc: out-of-order TSC due to VM TSC offset / scaling
3941 * If the trace contains TSC we can look at the last TSC of @buf_a and the
3942 * first TSC of @buf_b in order to determine if the buffers overlap, and then
3943 * walk forward in @buf_b until a later TSC is found. A precondition is that
3944 * @buf_a and @buf_b are positioned at a PSB.
3946 * Return: A pointer into @buf_b from where non-overlapped data starts, or
3947 * @buf_b + @len_b if there is no non-overlapped data.
3949 static unsigned char *intel_pt_find_overlap_tsc(unsigned char *buf_a,
3951 unsigned char *buf_b,
3952 size_t len_b, bool *consecutive,
3955 uint64_t tsc_a, tsc_b;
3957 size_t len, rem_a, rem_b;
3959 p = intel_pt_last_psb(buf_a, len_a);
3961 return buf_b; /* No PSB in buf_a => no overlap */
3963 len = len_a - (p - buf_a);
3964 if (!intel_pt_next_tsc(p, len, &tsc_a, &rem_a)) {
3965 /* The last PSB+ in buf_a is incomplete, so go back one more */
3967 p = intel_pt_last_psb(buf_a, len_a);
3969 return buf_b; /* No full PSB+ => assume no overlap */
3970 len = len_a - (p - buf_a);
3971 if (!intel_pt_next_tsc(p, len, &tsc_a, &rem_a))
3972 return buf_b; /* No TSC in buf_a => assume no overlap */
3976 /* Ignore PSB+ with no TSC */
3977 if (intel_pt_next_tsc(buf_b, len_b, &tsc_b, &rem_b)) {
3978 int cmp = intel_pt_tsc_cmp(tsc_a, tsc_b);
3980 /* Same TSC, so buffers are consecutive */
3981 if (!cmp && rem_b >= rem_a) {
3982 unsigned char *start;
3984 *consecutive = true;
3985 start = buf_b + len_b - (rem_b - rem_a);
3986 return adj_for_padding(start, buf_a, len_a);
3988 if (cmp < 0 && !ooo_tsc)
3989 return buf_b; /* tsc_a < tsc_b => no overlap */
3992 if (!intel_pt_step_psb(&buf_b, &len_b))
3993 return buf_b + len_b; /* No PSB in buf_b => no data */
3998 * intel_pt_find_overlap - determine start of non-overlapped trace data.
3999 * @buf_a: first buffer
4000 * @len_a: size of first buffer
4001 * @buf_b: second buffer
4002 * @len_b: size of second buffer
4003 * @have_tsc: can use TSC packets to detect overlap
4004 * @consecutive: returns true if there is data in buf_b that is consecutive
4006 * @ooo_tsc: out-of-order TSC due to VM TSC offset / scaling
4008 * When trace samples or snapshots are recorded there is the possibility that
4009 * the data overlaps. Note that, for the purposes of decoding, data is only
4010 * useful if it begins with a PSB packet.
4012 * Return: A pointer into @buf_b from where non-overlapped data starts, or
4013 * @buf_b + @len_b if there is no non-overlapped data.
4015 unsigned char *intel_pt_find_overlap(unsigned char *buf_a, size_t len_a,
4016 unsigned char *buf_b, size_t len_b,
4017 bool have_tsc, bool *consecutive,
4020 unsigned char *found;
4022 /* Buffer 'b' must start at PSB so throw away everything before that */
4023 if (!intel_pt_next_psb(&buf_b, &len_b))
4024 return buf_b + len_b; /* No PSB */
4026 if (!intel_pt_next_psb(&buf_a, &len_a))
4027 return buf_b; /* No overlap */
4030 found = intel_pt_find_overlap_tsc(buf_a, len_a, buf_b, len_b,
4031 consecutive, ooo_tsc);
4037 * Buffer 'b' cannot end within buffer 'a' so, for comparison purposes,
4038 * we can ignore the first part of buffer 'a'.
4040 while (len_b < len_a) {
4041 if (!intel_pt_step_psb(&buf_a, &len_a))
4042 return buf_b; /* No overlap */
4045 /* Now len_b >= len_a */
4047 /* Potential overlap so check the bytes */
4048 found = memmem(buf_a, len_a, buf_b, len_a);
4050 *consecutive = true;
4051 return adj_for_padding(buf_b + len_a, buf_a, len_a);
4054 /* Try again at next PSB in buffer 'a' */
4055 if (!intel_pt_step_psb(&buf_a, &len_a))
4056 return buf_b; /* No overlap */
4061 * struct fast_forward_data - data used by intel_pt_ff_cb().
4062 * @timestamp: timestamp to fast forward towards
4063 * @buf_timestamp: buffer timestamp of last buffer with trace data earlier than
4064 * the fast forward timestamp.
4066 struct fast_forward_data {
4068 uint64_t buf_timestamp;
4072 * intel_pt_ff_cb - fast forward lookahead callback.
4073 * @buffer: Intel PT trace buffer
4074 * @data: opaque pointer to fast forward data (struct fast_forward_data)
4076 * Determine if @buffer trace is past the fast forward timestamp.
4078 * Return: 1 (stop lookahead) if @buffer trace is past the fast forward
4079 * timestamp, and 0 otherwise.
4081 static int intel_pt_ff_cb(struct intel_pt_buffer *buffer, void *data)
4083 struct fast_forward_data *d = data;
4089 buf = (unsigned char *)buffer->buf;
4092 if (!intel_pt_next_psb(&buf, &len) ||
4093 !intel_pt_next_tsc(buf, len, &tsc, &rem))
4096 tsc = intel_pt_8b_tsc(tsc, buffer->ref_timestamp);
4098 intel_pt_log("Buffer 1st timestamp " x64_fmt " ref timestamp " x64_fmt "\n",
4099 tsc, buffer->ref_timestamp);
4102 * If the buffer contains a timestamp earlier that the fast forward
4103 * timestamp, then record it, else stop.
4105 if (tsc < d->timestamp)
4106 d->buf_timestamp = buffer->ref_timestamp;
4114 * intel_pt_fast_forward - reposition decoder forwards.
4115 * @decoder: Intel PT decoder
4116 * @timestamp: timestamp to fast forward towards
4118 * Reposition decoder at the last PSB with a timestamp earlier than @timestamp.
4120 * Return: 0 on success or negative error code on failure.
4122 int intel_pt_fast_forward(struct intel_pt_decoder *decoder, uint64_t timestamp)
4124 struct fast_forward_data d = { .timestamp = timestamp };
4129 intel_pt_log("Fast forward towards timestamp " x64_fmt "\n", timestamp);
4131 /* Find buffer timestamp of buffer to fast forward to */
4132 err = decoder->lookahead(decoder->data, intel_pt_ff_cb, &d);
4136 /* Walk to buffer with same buffer timestamp */
4137 if (d.buf_timestamp) {
4139 decoder->pos += decoder->len;
4141 err = intel_pt_get_next_data(decoder, true);
4142 /* -ENOLINK means non-consecutive trace */
4143 if (err && err != -ENOLINK)
4145 } while (decoder->buf_timestamp != d.buf_timestamp);
4151 buf = (unsigned char *)decoder->buf;
4154 if (!intel_pt_next_psb(&buf, &len))
4158 * Walk PSBs while the PSB timestamp is less than the fast forward
4165 if (!intel_pt_next_tsc(buf, len, &tsc, &rem))
4167 tsc = intel_pt_8b_tsc(tsc, decoder->buf_timestamp);
4169 * A TSC packet can slip past MTC packets but, after fast
4170 * forward, decoding starts at the TSC timestamp. That means
4171 * the timestamps may not be exactly the same as the timestamps
4172 * that would have been decoded without fast forward.
4174 if (tsc < timestamp) {
4175 intel_pt_log("Fast forward to next PSB timestamp " x64_fmt "\n", tsc);
4176 decoder->pos += decoder->len - len;
4179 intel_pt_reposition(decoder);
4183 } while (intel_pt_step_psb(&buf, &len));