1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright(C) 2015-2018 Linaro Limited.
5 * Author: Tor Jeremiassen <tor@ti.com>
6 * Author: Mathieu Poirier <mathieu.poirier@linaro.org>
9 #include <linux/bitops.h>
10 #include <linux/coresight-pmu.h>
11 #include <linux/err.h>
12 #include <linux/kernel.h>
13 #include <linux/log2.h>
14 #include <linux/types.h>
15 #include <linux/zalloc.h>
17 #include <opencsd/ocsd_if_types.h>
23 #include "cs-etm-decoder/cs-etm-decoder.h"
32 #include "map_symbol.h"
37 #include "thread-stack.h"
38 #include <tools/libc_compat.h>
39 #include "util/synthetic-events.h"
41 struct cs_etm_auxtrace {
42 struct auxtrace auxtrace;
43 struct auxtrace_queues queues;
44 struct auxtrace_heap heap;
45 struct itrace_synth_opts synth_opts;
46 struct perf_session *session;
47 struct machine *machine;
48 struct thread *unknown_thread;
54 u8 sample_instructions;
57 u64 latest_kernel_timestamp;
59 u64 branches_sample_type;
61 u64 instructions_sample_type;
62 u64 instructions_sample_period;
65 unsigned int pmu_type;
68 struct cs_etm_traceid_queue {
71 u64 period_instructions;
72 size_t last_branch_pos;
73 union perf_event *event_buf;
74 struct thread *thread;
75 struct branch_stack *last_branch;
76 struct branch_stack *last_branch_rb;
77 struct cs_etm_packet *prev_packet;
78 struct cs_etm_packet *packet;
79 struct cs_etm_packet_queue packet_queue;
83 struct cs_etm_auxtrace *etm;
84 struct cs_etm_decoder *decoder;
85 struct auxtrace_buffer *buffer;
86 unsigned int queue_nr;
87 u8 pending_timestamp_chan_id;
89 const unsigned char *buf;
90 size_t buf_len, buf_used;
91 /* Conversion between traceID and index in traceid_queues array */
92 struct intlist *traceid_queues_list;
93 struct cs_etm_traceid_queue **traceid_queues;
96 /* RB tree for quick conversion between traceID and metadata pointers */
97 static struct intlist *traceid_list;
99 static int cs_etm__process_queues(struct cs_etm_auxtrace *etm);
100 static int cs_etm__process_timeless_queues(struct cs_etm_auxtrace *etm,
102 static int cs_etm__get_data_block(struct cs_etm_queue *etmq);
103 static int cs_etm__decode_data_block(struct cs_etm_queue *etmq);
105 /* PTMs ETMIDR [11:8] set to b0011 */
106 #define ETMIDR_PTM_VERSION 0x00000300
109 * A struct auxtrace_heap_item only has a queue_nr and a timestamp to
110 * work with. One option is to modify to auxtrace_heap_XYZ() API or simply
111 * encode the etm queue number as the upper 16 bit and the channel as
114 #define TO_CS_QUEUE_NR(queue_nr, trace_chan_id) \
115 (queue_nr << 16 | trace_chan_id)
116 #define TO_QUEUE_NR(cs_queue_nr) (cs_queue_nr >> 16)
117 #define TO_TRACE_CHAN_ID(cs_queue_nr) (cs_queue_nr & 0x0000ffff)
119 static u32 cs_etm__get_v7_protocol_version(u32 etmidr)
121 etmidr &= ETMIDR_PTM_VERSION;
123 if (etmidr == ETMIDR_PTM_VERSION)
124 return CS_ETM_PROTO_PTM;
126 return CS_ETM_PROTO_ETMV3;
129 static int cs_etm__get_magic(u8 trace_chan_id, u64 *magic)
131 struct int_node *inode;
134 inode = intlist__find(traceid_list, trace_chan_id);
138 metadata = inode->priv;
139 *magic = metadata[CS_ETM_MAGIC];
143 int cs_etm__get_cpu(u8 trace_chan_id, int *cpu)
145 struct int_node *inode;
148 inode = intlist__find(traceid_list, trace_chan_id);
152 metadata = inode->priv;
153 *cpu = (int)metadata[CS_ETM_CPU];
158 * The returned PID format is presented by two bits:
160 * Bit ETM_OPT_CTXTID: CONTEXTIDR or CONTEXTIDR_EL1 is traced;
161 * Bit ETM_OPT_CTXTID2: CONTEXTIDR_EL2 is traced.
163 * It's possible that the two bits ETM_OPT_CTXTID and ETM_OPT_CTXTID2
164 * are enabled at the same time when the session runs on an EL2 kernel.
165 * This means the CONTEXTIDR_EL1 and CONTEXTIDR_EL2 both will be
166 * recorded in the trace data, the tool will selectively use
167 * CONTEXTIDR_EL2 as PID.
169 int cs_etm__get_pid_fmt(u8 trace_chan_id, u64 *pid_fmt)
171 struct int_node *inode;
174 inode = intlist__find(traceid_list, trace_chan_id);
178 metadata = inode->priv;
180 if (metadata[CS_ETM_MAGIC] == __perf_cs_etmv3_magic) {
181 val = metadata[CS_ETM_ETMCR];
182 /* CONTEXTIDR is traced */
183 if (val & BIT(ETM_OPT_CTXTID))
184 *pid_fmt = BIT(ETM_OPT_CTXTID);
186 val = metadata[CS_ETMV4_TRCCONFIGR];
187 /* CONTEXTIDR_EL2 is traced */
188 if (val & (BIT(ETM4_CFG_BIT_VMID) | BIT(ETM4_CFG_BIT_VMID_OPT)))
189 *pid_fmt = BIT(ETM_OPT_CTXTID2);
190 /* CONTEXTIDR_EL1 is traced */
191 else if (val & BIT(ETM4_CFG_BIT_CTXTID))
192 *pid_fmt = BIT(ETM_OPT_CTXTID);
198 void cs_etm__etmq_set_traceid_queue_timestamp(struct cs_etm_queue *etmq,
202 * When a timestamp packet is encountered the backend code
203 * is stopped so that the front end has time to process packets
204 * that were accumulated in the traceID queue. Since there can
205 * be more than one channel per cs_etm_queue, we need to specify
206 * what traceID queue needs servicing.
208 etmq->pending_timestamp_chan_id = trace_chan_id;
211 static u64 cs_etm__etmq_get_timestamp(struct cs_etm_queue *etmq,
214 struct cs_etm_packet_queue *packet_queue;
216 if (!etmq->pending_timestamp_chan_id)
220 *trace_chan_id = etmq->pending_timestamp_chan_id;
222 packet_queue = cs_etm__etmq_get_packet_queue(etmq,
223 etmq->pending_timestamp_chan_id);
227 /* Acknowledge pending status */
228 etmq->pending_timestamp_chan_id = 0;
230 /* See function cs_etm_decoder__do_{hard|soft}_timestamp() */
231 return packet_queue->cs_timestamp;
234 static void cs_etm__clear_packet_queue(struct cs_etm_packet_queue *queue)
240 queue->packet_count = 0;
241 for (i = 0; i < CS_ETM_PACKET_MAX_BUFFER; i++) {
242 queue->packet_buffer[i].isa = CS_ETM_ISA_UNKNOWN;
243 queue->packet_buffer[i].start_addr = CS_ETM_INVAL_ADDR;
244 queue->packet_buffer[i].end_addr = CS_ETM_INVAL_ADDR;
245 queue->packet_buffer[i].instr_count = 0;
246 queue->packet_buffer[i].last_instr_taken_branch = false;
247 queue->packet_buffer[i].last_instr_size = 0;
248 queue->packet_buffer[i].last_instr_type = 0;
249 queue->packet_buffer[i].last_instr_subtype = 0;
250 queue->packet_buffer[i].last_instr_cond = 0;
251 queue->packet_buffer[i].flags = 0;
252 queue->packet_buffer[i].exception_number = UINT32_MAX;
253 queue->packet_buffer[i].trace_chan_id = UINT8_MAX;
254 queue->packet_buffer[i].cpu = INT_MIN;
258 static void cs_etm__clear_all_packet_queues(struct cs_etm_queue *etmq)
261 struct int_node *inode;
262 struct cs_etm_traceid_queue *tidq;
263 struct intlist *traceid_queues_list = etmq->traceid_queues_list;
265 intlist__for_each_entry(inode, traceid_queues_list) {
266 idx = (int)(intptr_t)inode->priv;
267 tidq = etmq->traceid_queues[idx];
268 cs_etm__clear_packet_queue(&tidq->packet_queue);
272 static int cs_etm__init_traceid_queue(struct cs_etm_queue *etmq,
273 struct cs_etm_traceid_queue *tidq,
277 struct auxtrace_queue *queue;
278 struct cs_etm_auxtrace *etm = etmq->etm;
280 cs_etm__clear_packet_queue(&tidq->packet_queue);
282 queue = &etmq->etm->queues.queue_array[etmq->queue_nr];
283 tidq->tid = queue->tid;
285 tidq->trace_chan_id = trace_chan_id;
287 tidq->packet = zalloc(sizeof(struct cs_etm_packet));
291 tidq->prev_packet = zalloc(sizeof(struct cs_etm_packet));
292 if (!tidq->prev_packet)
295 if (etm->synth_opts.last_branch) {
296 size_t sz = sizeof(struct branch_stack);
298 sz += etm->synth_opts.last_branch_sz *
299 sizeof(struct branch_entry);
300 tidq->last_branch = zalloc(sz);
301 if (!tidq->last_branch)
303 tidq->last_branch_rb = zalloc(sz);
304 if (!tidq->last_branch_rb)
308 tidq->event_buf = malloc(PERF_SAMPLE_MAX_SIZE);
309 if (!tidq->event_buf)
315 zfree(&tidq->last_branch_rb);
316 zfree(&tidq->last_branch);
317 zfree(&tidq->prev_packet);
318 zfree(&tidq->packet);
323 static struct cs_etm_traceid_queue
324 *cs_etm__etmq_get_traceid_queue(struct cs_etm_queue *etmq, u8 trace_chan_id)
327 struct int_node *inode;
328 struct intlist *traceid_queues_list;
329 struct cs_etm_traceid_queue *tidq, **traceid_queues;
330 struct cs_etm_auxtrace *etm = etmq->etm;
332 if (etm->timeless_decoding)
333 trace_chan_id = CS_ETM_PER_THREAD_TRACEID;
335 traceid_queues_list = etmq->traceid_queues_list;
338 * Check if the traceid_queue exist for this traceID by looking
341 inode = intlist__find(traceid_queues_list, trace_chan_id);
343 idx = (int)(intptr_t)inode->priv;
344 return etmq->traceid_queues[idx];
347 /* We couldn't find a traceid_queue for this traceID, allocate one */
348 tidq = malloc(sizeof(*tidq));
352 memset(tidq, 0, sizeof(*tidq));
354 /* Get a valid index for the new traceid_queue */
355 idx = intlist__nr_entries(traceid_queues_list);
356 /* Memory for the inode is free'ed in cs_etm_free_traceid_queues () */
357 inode = intlist__findnew(traceid_queues_list, trace_chan_id);
361 /* Associate this traceID with this index */
362 inode->priv = (void *)(intptr_t)idx;
364 if (cs_etm__init_traceid_queue(etmq, tidq, trace_chan_id))
367 /* Grow the traceid_queues array by one unit */
368 traceid_queues = etmq->traceid_queues;
369 traceid_queues = reallocarray(traceid_queues,
371 sizeof(*traceid_queues));
374 * On failure reallocarray() returns NULL and the original block of
375 * memory is left untouched.
380 traceid_queues[idx] = tidq;
381 etmq->traceid_queues = traceid_queues;
383 return etmq->traceid_queues[idx];
387 * Function intlist__remove() removes the inode from the list
388 * and delete the memory associated to it.
390 intlist__remove(traceid_queues_list, inode);
396 struct cs_etm_packet_queue
397 *cs_etm__etmq_get_packet_queue(struct cs_etm_queue *etmq, u8 trace_chan_id)
399 struct cs_etm_traceid_queue *tidq;
401 tidq = cs_etm__etmq_get_traceid_queue(etmq, trace_chan_id);
403 return &tidq->packet_queue;
408 static void cs_etm__packet_swap(struct cs_etm_auxtrace *etm,
409 struct cs_etm_traceid_queue *tidq)
411 struct cs_etm_packet *tmp;
413 if (etm->sample_branches || etm->synth_opts.last_branch ||
414 etm->sample_instructions) {
416 * Swap PACKET with PREV_PACKET: PACKET becomes PREV_PACKET for
417 * the next incoming packet.
420 tidq->packet = tidq->prev_packet;
421 tidq->prev_packet = tmp;
425 static void cs_etm__packet_dump(const char *pkt_string)
427 const char *color = PERF_COLOR_BLUE;
428 int len = strlen(pkt_string);
430 if (len && (pkt_string[len-1] == '\n'))
431 color_fprintf(stdout, color, " %s", pkt_string);
433 color_fprintf(stdout, color, " %s\n", pkt_string);
438 static void cs_etm__set_trace_param_etmv3(struct cs_etm_trace_params *t_params,
439 struct cs_etm_auxtrace *etm, int idx,
442 u64 **metadata = etm->metadata;
444 t_params[idx].protocol = cs_etm__get_v7_protocol_version(etmidr);
445 t_params[idx].etmv3.reg_ctrl = metadata[idx][CS_ETM_ETMCR];
446 t_params[idx].etmv3.reg_trc_id = metadata[idx][CS_ETM_ETMTRACEIDR];
449 static void cs_etm__set_trace_param_etmv4(struct cs_etm_trace_params *t_params,
450 struct cs_etm_auxtrace *etm, int idx)
452 u64 **metadata = etm->metadata;
454 t_params[idx].protocol = CS_ETM_PROTO_ETMV4i;
455 t_params[idx].etmv4.reg_idr0 = metadata[idx][CS_ETMV4_TRCIDR0];
456 t_params[idx].etmv4.reg_idr1 = metadata[idx][CS_ETMV4_TRCIDR1];
457 t_params[idx].etmv4.reg_idr2 = metadata[idx][CS_ETMV4_TRCIDR2];
458 t_params[idx].etmv4.reg_idr8 = metadata[idx][CS_ETMV4_TRCIDR8];
459 t_params[idx].etmv4.reg_configr = metadata[idx][CS_ETMV4_TRCCONFIGR];
460 t_params[idx].etmv4.reg_traceidr = metadata[idx][CS_ETMV4_TRCTRACEIDR];
463 static void cs_etm__set_trace_param_ete(struct cs_etm_trace_params *t_params,
464 struct cs_etm_auxtrace *etm, int idx)
466 u64 **metadata = etm->metadata;
468 t_params[idx].protocol = CS_ETM_PROTO_ETE;
469 t_params[idx].ete.reg_idr0 = metadata[idx][CS_ETMV4_TRCIDR0];
470 t_params[idx].ete.reg_idr1 = metadata[idx][CS_ETMV4_TRCIDR1];
471 t_params[idx].ete.reg_idr2 = metadata[idx][CS_ETMV4_TRCIDR2];
472 t_params[idx].ete.reg_idr8 = metadata[idx][CS_ETMV4_TRCIDR8];
473 t_params[idx].ete.reg_configr = metadata[idx][CS_ETMV4_TRCCONFIGR];
474 t_params[idx].ete.reg_traceidr = metadata[idx][CS_ETMV4_TRCTRACEIDR];
475 t_params[idx].ete.reg_devarch = metadata[idx][CS_ETE_TRCDEVARCH];
478 static int cs_etm__init_trace_params(struct cs_etm_trace_params *t_params,
479 struct cs_etm_auxtrace *etm,
486 for (i = 0; i < decoders; i++) {
487 architecture = etm->metadata[i][CS_ETM_MAGIC];
489 switch (architecture) {
490 case __perf_cs_etmv3_magic:
491 etmidr = etm->metadata[i][CS_ETM_ETMIDR];
492 cs_etm__set_trace_param_etmv3(t_params, etm, i, etmidr);
494 case __perf_cs_etmv4_magic:
495 cs_etm__set_trace_param_etmv4(t_params, etm, i);
497 case __perf_cs_ete_magic:
498 cs_etm__set_trace_param_ete(t_params, etm, i);
508 static int cs_etm__init_decoder_params(struct cs_etm_decoder_params *d_params,
509 struct cs_etm_queue *etmq,
510 enum cs_etm_decoder_operation mode,
515 if (!(mode < CS_ETM_OPERATION_MAX))
518 d_params->packet_printer = cs_etm__packet_dump;
519 d_params->operation = mode;
520 d_params->data = etmq;
521 d_params->formatted = formatted;
522 d_params->fsyncs = false;
523 d_params->hsyncs = false;
524 d_params->frame_aligned = true;
531 static void cs_etm__dump_event(struct cs_etm_queue *etmq,
532 struct auxtrace_buffer *buffer)
535 const char *color = PERF_COLOR_BLUE;
536 size_t buffer_used = 0;
538 fprintf(stdout, "\n");
539 color_fprintf(stdout, color,
540 ". ... CoreSight %s Trace data: size %#zx bytes\n",
541 cs_etm_decoder__get_name(etmq->decoder), buffer->size);
546 ret = cs_etm_decoder__process_data_block(
547 etmq->decoder, buffer->offset,
548 &((u8 *)buffer->data)[buffer_used],
549 buffer->size - buffer_used, &consumed);
553 buffer_used += consumed;
554 } while (buffer_used < buffer->size);
556 cs_etm_decoder__reset(etmq->decoder);
559 static int cs_etm__flush_events(struct perf_session *session,
560 struct perf_tool *tool)
562 struct cs_etm_auxtrace *etm = container_of(session->auxtrace,
563 struct cs_etm_auxtrace,
568 if (!tool->ordered_events)
571 if (etm->timeless_decoding)
572 return cs_etm__process_timeless_queues(etm, -1);
574 return cs_etm__process_queues(etm);
577 static void cs_etm__free_traceid_queues(struct cs_etm_queue *etmq)
581 struct int_node *inode, *tmp;
582 struct cs_etm_traceid_queue *tidq;
583 struct intlist *traceid_queues_list = etmq->traceid_queues_list;
585 intlist__for_each_entry_safe(inode, tmp, traceid_queues_list) {
586 priv = (uintptr_t)inode->priv;
589 /* Free this traceid_queue from the array */
590 tidq = etmq->traceid_queues[idx];
591 thread__zput(tidq->thread);
592 zfree(&tidq->event_buf);
593 zfree(&tidq->last_branch);
594 zfree(&tidq->last_branch_rb);
595 zfree(&tidq->prev_packet);
596 zfree(&tidq->packet);
600 * Function intlist__remove() removes the inode from the list
601 * and delete the memory associated to it.
603 intlist__remove(traceid_queues_list, inode);
606 /* Then the RB tree itself */
607 intlist__delete(traceid_queues_list);
608 etmq->traceid_queues_list = NULL;
610 /* finally free the traceid_queues array */
611 zfree(&etmq->traceid_queues);
614 static void cs_etm__free_queue(void *priv)
616 struct cs_etm_queue *etmq = priv;
621 cs_etm_decoder__free(etmq->decoder);
622 cs_etm__free_traceid_queues(etmq);
626 static void cs_etm__free_events(struct perf_session *session)
629 struct cs_etm_auxtrace *aux = container_of(session->auxtrace,
630 struct cs_etm_auxtrace,
632 struct auxtrace_queues *queues = &aux->queues;
634 for (i = 0; i < queues->nr_queues; i++) {
635 cs_etm__free_queue(queues->queue_array[i].priv);
636 queues->queue_array[i].priv = NULL;
639 auxtrace_queues__free(queues);
642 static void cs_etm__free(struct perf_session *session)
645 struct int_node *inode, *tmp;
646 struct cs_etm_auxtrace *aux = container_of(session->auxtrace,
647 struct cs_etm_auxtrace,
649 cs_etm__free_events(session);
650 session->auxtrace = NULL;
652 /* First remove all traceID/metadata nodes for the RB tree */
653 intlist__for_each_entry_safe(inode, tmp, traceid_list)
654 intlist__remove(traceid_list, inode);
655 /* Then the RB tree itself */
656 intlist__delete(traceid_list);
658 for (i = 0; i < aux->num_cpu; i++)
659 zfree(&aux->metadata[i]);
661 thread__zput(aux->unknown_thread);
662 zfree(&aux->metadata);
666 static bool cs_etm__evsel_is_auxtrace(struct perf_session *session,
669 struct cs_etm_auxtrace *aux = container_of(session->auxtrace,
670 struct cs_etm_auxtrace,
673 return evsel->core.attr.type == aux->pmu_type;
676 static u8 cs_etm__cpu_mode(struct cs_etm_queue *etmq, u64 address)
678 struct machine *machine;
680 machine = etmq->etm->machine;
682 if (address >= machine__kernel_start(machine)) {
683 if (machine__is_host(machine))
684 return PERF_RECORD_MISC_KERNEL;
686 return PERF_RECORD_MISC_GUEST_KERNEL;
688 if (machine__is_host(machine))
689 return PERF_RECORD_MISC_USER;
691 return PERF_RECORD_MISC_GUEST_USER;
693 return PERF_RECORD_MISC_HYPERVISOR;
697 static u32 cs_etm__mem_access(struct cs_etm_queue *etmq, u8 trace_chan_id,
698 u64 address, size_t size, u8 *buffer)
703 struct thread *thread;
704 struct machine *machine;
705 struct addr_location al;
706 struct cs_etm_traceid_queue *tidq;
711 machine = etmq->etm->machine;
712 cpumode = cs_etm__cpu_mode(etmq, address);
713 tidq = cs_etm__etmq_get_traceid_queue(etmq, trace_chan_id);
717 thread = tidq->thread;
719 if (cpumode != PERF_RECORD_MISC_KERNEL)
721 thread = etmq->etm->unknown_thread;
724 if (!thread__find_map(thread, cpumode, address, &al) || !al.map->dso)
727 if (al.map->dso->data.status == DSO_DATA_STATUS_ERROR &&
728 dso__data_status_seen(al.map->dso, DSO_DATA_STATUS_SEEN_ITRACE))
731 offset = al.map->map_ip(al.map, address);
735 len = dso__data_read_offset(al.map->dso, machine, offset, buffer, size);
738 ui__warning_once("CS ETM Trace: Missing DSO. Use 'perf archive' or debuginfod to export data from the traced system.\n"
739 " Enable CONFIG_PROC_KCORE or use option '-k /path/to/vmlinux' for kernel symbols.\n");
740 if (!al.map->dso->auxtrace_warned) {
741 pr_err("CS ETM Trace: Debug data not found for address %#"PRIx64" in %s\n",
743 al.map->dso->long_name ? al.map->dso->long_name : "Unknown");
744 al.map->dso->auxtrace_warned = true;
752 static struct cs_etm_queue *cs_etm__alloc_queue(struct cs_etm_auxtrace *etm,
755 struct cs_etm_decoder_params d_params;
756 struct cs_etm_trace_params *t_params = NULL;
757 struct cs_etm_queue *etmq;
759 * Each queue can only contain data from one CPU when unformatted, so only one decoder is
762 int decoders = formatted ? etm->num_cpu : 1;
764 etmq = zalloc(sizeof(*etmq));
768 etmq->traceid_queues_list = intlist__new(NULL);
769 if (!etmq->traceid_queues_list)
772 /* Use metadata to fill in trace parameters for trace decoder */
773 t_params = zalloc(sizeof(*t_params) * decoders);
778 if (cs_etm__init_trace_params(t_params, etm, decoders))
781 /* Set decoder parameters to decode trace packets */
782 if (cs_etm__init_decoder_params(&d_params, etmq,
783 dump_trace ? CS_ETM_OPERATION_PRINT :
784 CS_ETM_OPERATION_DECODE,
788 etmq->decoder = cs_etm_decoder__new(decoders, &d_params,
795 * Register a function to handle all memory accesses required by
796 * the trace decoder library.
798 if (cs_etm_decoder__add_mem_access_cb(etmq->decoder,
801 goto out_free_decoder;
807 cs_etm_decoder__free(etmq->decoder);
809 intlist__delete(etmq->traceid_queues_list);
815 static int cs_etm__setup_queue(struct cs_etm_auxtrace *etm,
816 struct auxtrace_queue *queue,
817 unsigned int queue_nr,
820 struct cs_etm_queue *etmq = queue->priv;
822 if (list_empty(&queue->head) || etmq)
825 etmq = cs_etm__alloc_queue(etm, formatted);
832 etmq->queue_nr = queue_nr;
838 static int cs_etm__queue_first_cs_timestamp(struct cs_etm_auxtrace *etm,
839 struct cs_etm_queue *etmq,
840 unsigned int queue_nr)
843 unsigned int cs_queue_nr;
848 * We are under a CPU-wide trace scenario. As such we need to know
849 * when the code that generated the traces started to execute so that
850 * it can be correlated with execution on other CPUs. So we get a
851 * handle on the beginning of traces and decode until we find a
852 * timestamp. The timestamp is then added to the auxtrace min heap
853 * in order to know what nibble (of all the etmqs) to decode first.
857 * Fetch an aux_buffer from this etmq. Bail if no more
858 * blocks or an error has been encountered.
860 ret = cs_etm__get_data_block(etmq);
865 * Run decoder on the trace block. The decoder will stop when
866 * encountering a CS timestamp, a full packet queue or the end of
867 * trace for that block.
869 ret = cs_etm__decode_data_block(etmq);
874 * Function cs_etm_decoder__do_{hard|soft}_timestamp() does all
875 * the timestamp calculation for us.
877 cs_timestamp = cs_etm__etmq_get_timestamp(etmq, &trace_chan_id);
879 /* We found a timestamp, no need to continue. */
884 * We didn't find a timestamp so empty all the traceid packet
885 * queues before looking for another timestamp packet, either
886 * in the current data block or a new one. Packets that were
887 * just decoded are useless since no timestamp has been
888 * associated with them. As such simply discard them.
890 cs_etm__clear_all_packet_queues(etmq);
894 * We have a timestamp. Add it to the min heap to reflect when
895 * instructions conveyed by the range packets of this traceID queue
896 * started to execute. Once the same has been done for all the traceID
897 * queues of each etmq, redenring and decoding can start in
898 * chronological order.
900 * Note that packets decoded above are still in the traceID's packet
901 * queue and will be processed in cs_etm__process_queues().
903 cs_queue_nr = TO_CS_QUEUE_NR(queue_nr, trace_chan_id);
904 ret = auxtrace_heap__add(&etm->heap, cs_queue_nr, cs_timestamp);
910 void cs_etm__copy_last_branch_rb(struct cs_etm_queue *etmq,
911 struct cs_etm_traceid_queue *tidq)
913 struct branch_stack *bs_src = tidq->last_branch_rb;
914 struct branch_stack *bs_dst = tidq->last_branch;
918 * Set the number of records before early exit: ->nr is used to
919 * determine how many branches to copy from ->entries.
921 bs_dst->nr = bs_src->nr;
924 * Early exit when there is nothing to copy.
930 * As bs_src->entries is a circular buffer, we need to copy from it in
931 * two steps. First, copy the branches from the most recently inserted
932 * branch ->last_branch_pos until the end of bs_src->entries buffer.
934 nr = etmq->etm->synth_opts.last_branch_sz - tidq->last_branch_pos;
935 memcpy(&bs_dst->entries[0],
936 &bs_src->entries[tidq->last_branch_pos],
937 sizeof(struct branch_entry) * nr);
940 * If we wrapped around at least once, the branches from the beginning
941 * of the bs_src->entries buffer and until the ->last_branch_pos element
942 * are older valid branches: copy them over. The total number of
943 * branches copied over will be equal to the number of branches asked by
944 * the user in last_branch_sz.
946 if (bs_src->nr >= etmq->etm->synth_opts.last_branch_sz) {
947 memcpy(&bs_dst->entries[nr],
949 sizeof(struct branch_entry) * tidq->last_branch_pos);
954 void cs_etm__reset_last_branch_rb(struct cs_etm_traceid_queue *tidq)
956 tidq->last_branch_pos = 0;
957 tidq->last_branch_rb->nr = 0;
960 static inline int cs_etm__t32_instr_size(struct cs_etm_queue *etmq,
961 u8 trace_chan_id, u64 addr)
965 cs_etm__mem_access(etmq, trace_chan_id, addr,
966 ARRAY_SIZE(instrBytes), instrBytes);
968 * T32 instruction size is indicated by bits[15:11] of the first
969 * 16-bit word of the instruction: 0b11101, 0b11110 and 0b11111
970 * denote a 32-bit instruction.
972 return ((instrBytes[1] & 0xF8) >= 0xE8) ? 4 : 2;
975 static inline u64 cs_etm__first_executed_instr(struct cs_etm_packet *packet)
977 /* Returns 0 for the CS_ETM_DISCONTINUITY packet */
978 if (packet->sample_type == CS_ETM_DISCONTINUITY)
981 return packet->start_addr;
985 u64 cs_etm__last_executed_instr(const struct cs_etm_packet *packet)
987 /* Returns 0 for the CS_ETM_DISCONTINUITY packet */
988 if (packet->sample_type == CS_ETM_DISCONTINUITY)
991 return packet->end_addr - packet->last_instr_size;
994 static inline u64 cs_etm__instr_addr(struct cs_etm_queue *etmq,
996 const struct cs_etm_packet *packet,
999 if (packet->isa == CS_ETM_ISA_T32) {
1000 u64 addr = packet->start_addr;
1003 addr += cs_etm__t32_instr_size(etmq,
1004 trace_chan_id, addr);
1010 /* Assume a 4 byte instruction size (A32/A64) */
1011 return packet->start_addr + offset * 4;
1014 static void cs_etm__update_last_branch_rb(struct cs_etm_queue *etmq,
1015 struct cs_etm_traceid_queue *tidq)
1017 struct branch_stack *bs = tidq->last_branch_rb;
1018 struct branch_entry *be;
1021 * The branches are recorded in a circular buffer in reverse
1022 * chronological order: we start recording from the last element of the
1023 * buffer down. After writing the first element of the stack, move the
1024 * insert position back to the end of the buffer.
1026 if (!tidq->last_branch_pos)
1027 tidq->last_branch_pos = etmq->etm->synth_opts.last_branch_sz;
1029 tidq->last_branch_pos -= 1;
1031 be = &bs->entries[tidq->last_branch_pos];
1032 be->from = cs_etm__last_executed_instr(tidq->prev_packet);
1033 be->to = cs_etm__first_executed_instr(tidq->packet);
1034 /* No support for mispredict */
1035 be->flags.mispred = 0;
1036 be->flags.predicted = 1;
1039 * Increment bs->nr until reaching the number of last branches asked by
1040 * the user on the command line.
1042 if (bs->nr < etmq->etm->synth_opts.last_branch_sz)
1046 static int cs_etm__inject_event(union perf_event *event,
1047 struct perf_sample *sample, u64 type)
1049 event->header.size = perf_event__sample_event_size(sample, type, 0);
1050 return perf_event__synthesize_sample(event, type, 0, sample);
1055 cs_etm__get_trace(struct cs_etm_queue *etmq)
1057 struct auxtrace_buffer *aux_buffer = etmq->buffer;
1058 struct auxtrace_buffer *old_buffer = aux_buffer;
1059 struct auxtrace_queue *queue;
1061 queue = &etmq->etm->queues.queue_array[etmq->queue_nr];
1063 aux_buffer = auxtrace_buffer__next(queue, aux_buffer);
1065 /* If no more data, drop the previous auxtrace_buffer and return */
1068 auxtrace_buffer__drop_data(old_buffer);
1073 etmq->buffer = aux_buffer;
1075 /* If the aux_buffer doesn't have data associated, try to load it */
1076 if (!aux_buffer->data) {
1077 /* get the file desc associated with the perf data file */
1078 int fd = perf_data__fd(etmq->etm->session->data);
1080 aux_buffer->data = auxtrace_buffer__get_data(aux_buffer, fd);
1081 if (!aux_buffer->data)
1085 /* If valid, drop the previous buffer */
1087 auxtrace_buffer__drop_data(old_buffer);
1090 etmq->buf_len = aux_buffer->size;
1091 etmq->buf = aux_buffer->data;
1093 return etmq->buf_len;
1096 static void cs_etm__set_pid_tid_cpu(struct cs_etm_auxtrace *etm,
1097 struct cs_etm_traceid_queue *tidq)
1099 if ((!tidq->thread) && (tidq->tid != -1))
1100 tidq->thread = machine__find_thread(etm->machine, -1,
1104 tidq->pid = tidq->thread->pid_;
1107 int cs_etm__etmq_set_tid(struct cs_etm_queue *etmq,
1108 pid_t tid, u8 trace_chan_id)
1110 int cpu, err = -EINVAL;
1111 struct cs_etm_auxtrace *etm = etmq->etm;
1112 struct cs_etm_traceid_queue *tidq;
1114 tidq = cs_etm__etmq_get_traceid_queue(etmq, trace_chan_id);
1118 if (cs_etm__get_cpu(trace_chan_id, &cpu) < 0)
1121 err = machine__set_current_tid(etm->machine, cpu, tid, tid);
1126 thread__zput(tidq->thread);
1128 cs_etm__set_pid_tid_cpu(etm, tidq);
1132 bool cs_etm__etmq_is_timeless(struct cs_etm_queue *etmq)
1134 return !!etmq->etm->timeless_decoding;
1137 static void cs_etm__copy_insn(struct cs_etm_queue *etmq,
1139 const struct cs_etm_packet *packet,
1140 struct perf_sample *sample)
1143 * It's pointless to read instructions for the CS_ETM_DISCONTINUITY
1144 * packet, so directly bail out with 'insn_len' = 0.
1146 if (packet->sample_type == CS_ETM_DISCONTINUITY) {
1147 sample->insn_len = 0;
1152 * T32 instruction size might be 32-bit or 16-bit, decide by calling
1153 * cs_etm__t32_instr_size().
1155 if (packet->isa == CS_ETM_ISA_T32)
1156 sample->insn_len = cs_etm__t32_instr_size(etmq, trace_chan_id,
1158 /* Otherwise, A64 and A32 instruction size are always 32-bit. */
1160 sample->insn_len = 4;
1162 cs_etm__mem_access(etmq, trace_chan_id, sample->ip,
1163 sample->insn_len, (void *)sample->insn);
1166 static int cs_etm__synth_instruction_sample(struct cs_etm_queue *etmq,
1167 struct cs_etm_traceid_queue *tidq,
1168 u64 addr, u64 period)
1171 struct cs_etm_auxtrace *etm = etmq->etm;
1172 union perf_event *event = tidq->event_buf;
1173 struct perf_sample sample = {.ip = 0,};
1175 event->sample.header.type = PERF_RECORD_SAMPLE;
1176 event->sample.header.misc = cs_etm__cpu_mode(etmq, addr);
1177 event->sample.header.size = sizeof(struct perf_event_header);
1179 if (!etm->timeless_decoding)
1180 sample.time = etm->latest_kernel_timestamp;
1182 sample.pid = tidq->pid;
1183 sample.tid = tidq->tid;
1184 sample.id = etmq->etm->instructions_id;
1185 sample.stream_id = etmq->etm->instructions_id;
1186 sample.period = period;
1187 sample.cpu = tidq->packet->cpu;
1188 sample.flags = tidq->prev_packet->flags;
1189 sample.cpumode = event->sample.header.misc;
1191 cs_etm__copy_insn(etmq, tidq->trace_chan_id, tidq->packet, &sample);
1193 if (etm->synth_opts.last_branch)
1194 sample.branch_stack = tidq->last_branch;
1196 if (etm->synth_opts.inject) {
1197 ret = cs_etm__inject_event(event, &sample,
1198 etm->instructions_sample_type);
1203 ret = perf_session__deliver_synth_event(etm->session, event, &sample);
1207 "CS ETM Trace: failed to deliver instruction event, error %d\n",
1214 * The cs etm packet encodes an instruction range between a branch target
1215 * and the next taken branch. Generate sample accordingly.
1217 static int cs_etm__synth_branch_sample(struct cs_etm_queue *etmq,
1218 struct cs_etm_traceid_queue *tidq)
1221 struct cs_etm_auxtrace *etm = etmq->etm;
1222 struct perf_sample sample = {.ip = 0,};
1223 union perf_event *event = tidq->event_buf;
1224 struct dummy_branch_stack {
1227 struct branch_entry entries;
1231 ip = cs_etm__last_executed_instr(tidq->prev_packet);
1233 event->sample.header.type = PERF_RECORD_SAMPLE;
1234 event->sample.header.misc = cs_etm__cpu_mode(etmq, ip);
1235 event->sample.header.size = sizeof(struct perf_event_header);
1237 if (!etm->timeless_decoding)
1238 sample.time = etm->latest_kernel_timestamp;
1240 sample.pid = tidq->pid;
1241 sample.tid = tidq->tid;
1242 sample.addr = cs_etm__first_executed_instr(tidq->packet);
1243 sample.id = etmq->etm->branches_id;
1244 sample.stream_id = etmq->etm->branches_id;
1246 sample.cpu = tidq->packet->cpu;
1247 sample.flags = tidq->prev_packet->flags;
1248 sample.cpumode = event->sample.header.misc;
1250 cs_etm__copy_insn(etmq, tidq->trace_chan_id, tidq->prev_packet,
1254 * perf report cannot handle events without a branch stack
1256 if (etm->synth_opts.last_branch) {
1257 dummy_bs = (struct dummy_branch_stack){
1265 sample.branch_stack = (struct branch_stack *)&dummy_bs;
1268 if (etm->synth_opts.inject) {
1269 ret = cs_etm__inject_event(event, &sample,
1270 etm->branches_sample_type);
1275 ret = perf_session__deliver_synth_event(etm->session, event, &sample);
1279 "CS ETM Trace: failed to deliver instruction event, error %d\n",
1285 struct cs_etm_synth {
1286 struct perf_tool dummy_tool;
1287 struct perf_session *session;
1290 static int cs_etm__event_synth(struct perf_tool *tool,
1291 union perf_event *event,
1292 struct perf_sample *sample __maybe_unused,
1293 struct machine *machine __maybe_unused)
1295 struct cs_etm_synth *cs_etm_synth =
1296 container_of(tool, struct cs_etm_synth, dummy_tool);
1298 return perf_session__deliver_synth_event(cs_etm_synth->session,
1302 static int cs_etm__synth_event(struct perf_session *session,
1303 struct perf_event_attr *attr, u64 id)
1305 struct cs_etm_synth cs_etm_synth;
1307 memset(&cs_etm_synth, 0, sizeof(struct cs_etm_synth));
1308 cs_etm_synth.session = session;
1310 return perf_event__synthesize_attr(&cs_etm_synth.dummy_tool, attr, 1,
1311 &id, cs_etm__event_synth);
1314 static int cs_etm__synth_events(struct cs_etm_auxtrace *etm,
1315 struct perf_session *session)
1317 struct evlist *evlist = session->evlist;
1318 struct evsel *evsel;
1319 struct perf_event_attr attr;
1324 evlist__for_each_entry(evlist, evsel) {
1325 if (evsel->core.attr.type == etm->pmu_type) {
1332 pr_debug("No selected events with CoreSight Trace data\n");
1336 memset(&attr, 0, sizeof(struct perf_event_attr));
1337 attr.size = sizeof(struct perf_event_attr);
1338 attr.type = PERF_TYPE_HARDWARE;
1339 attr.sample_type = evsel->core.attr.sample_type & PERF_SAMPLE_MASK;
1340 attr.sample_type |= PERF_SAMPLE_IP | PERF_SAMPLE_TID |
1342 if (etm->timeless_decoding)
1343 attr.sample_type &= ~(u64)PERF_SAMPLE_TIME;
1345 attr.sample_type |= PERF_SAMPLE_TIME;
1347 attr.exclude_user = evsel->core.attr.exclude_user;
1348 attr.exclude_kernel = evsel->core.attr.exclude_kernel;
1349 attr.exclude_hv = evsel->core.attr.exclude_hv;
1350 attr.exclude_host = evsel->core.attr.exclude_host;
1351 attr.exclude_guest = evsel->core.attr.exclude_guest;
1352 attr.sample_id_all = evsel->core.attr.sample_id_all;
1353 attr.read_format = evsel->core.attr.read_format;
1355 /* create new id val to be a fixed offset from evsel id */
1356 id = evsel->core.id[0] + 1000000000;
1361 if (etm->synth_opts.branches) {
1362 attr.config = PERF_COUNT_HW_BRANCH_INSTRUCTIONS;
1363 attr.sample_period = 1;
1364 attr.sample_type |= PERF_SAMPLE_ADDR;
1365 err = cs_etm__synth_event(session, &attr, id);
1368 etm->sample_branches = true;
1369 etm->branches_sample_type = attr.sample_type;
1370 etm->branches_id = id;
1372 attr.sample_type &= ~(u64)PERF_SAMPLE_ADDR;
1375 if (etm->synth_opts.last_branch) {
1376 attr.sample_type |= PERF_SAMPLE_BRANCH_STACK;
1378 * We don't use the hardware index, but the sample generation
1379 * code uses the new format branch_stack with this field,
1380 * so the event attributes must indicate that it's present.
1382 attr.branch_sample_type |= PERF_SAMPLE_BRANCH_HW_INDEX;
1385 if (etm->synth_opts.instructions) {
1386 attr.config = PERF_COUNT_HW_INSTRUCTIONS;
1387 attr.sample_period = etm->synth_opts.period;
1388 etm->instructions_sample_period = attr.sample_period;
1389 err = cs_etm__synth_event(session, &attr, id);
1392 etm->sample_instructions = true;
1393 etm->instructions_sample_type = attr.sample_type;
1394 etm->instructions_id = id;
1401 static int cs_etm__sample(struct cs_etm_queue *etmq,
1402 struct cs_etm_traceid_queue *tidq)
1404 struct cs_etm_auxtrace *etm = etmq->etm;
1406 u8 trace_chan_id = tidq->trace_chan_id;
1409 /* Get instructions remainder from previous packet */
1410 instrs_prev = tidq->period_instructions;
1412 tidq->period_instructions += tidq->packet->instr_count;
1415 * Record a branch when the last instruction in
1416 * PREV_PACKET is a branch.
1418 if (etm->synth_opts.last_branch &&
1419 tidq->prev_packet->sample_type == CS_ETM_RANGE &&
1420 tidq->prev_packet->last_instr_taken_branch)
1421 cs_etm__update_last_branch_rb(etmq, tidq);
1423 if (etm->sample_instructions &&
1424 tidq->period_instructions >= etm->instructions_sample_period) {
1426 * Emit instruction sample periodically
1427 * TODO: allow period to be defined in cycles and clock time
1431 * Below diagram demonstrates the instruction samples
1434 * Instrs Instrs Instrs Instrs
1435 * Sample(n) Sample(n+1) Sample(n+2) Sample(n+3)
1438 * --------------------------------------------------
1442 * instructions(Pi) instructions(Pi')
1445 * \---------------- -----------------/
1447 * tidq->packet->instr_count
1449 * Instrs Sample(n...) are the synthesised samples occurring
1450 * every etm->instructions_sample_period instructions - as
1451 * defined on the perf command line. Sample(n) is being the
1452 * last sample before the current etm packet, n+1 to n+3
1453 * samples are generated from the current etm packet.
1455 * tidq->packet->instr_count represents the number of
1456 * instructions in the current etm packet.
1458 * Period instructions (Pi) contains the the number of
1459 * instructions executed after the sample point(n) from the
1460 * previous etm packet. This will always be less than
1461 * etm->instructions_sample_period.
1463 * When generate new samples, it combines with two parts
1464 * instructions, one is the tail of the old packet and another
1465 * is the head of the new coming packet, to generate
1466 * sample(n+1); sample(n+2) and sample(n+3) consume the
1467 * instructions with sample period. After sample(n+3), the rest
1468 * instructions will be used by later packet and it is assigned
1469 * to tidq->period_instructions for next round calculation.
1473 * Get the initial offset into the current packet instructions;
1474 * entry conditions ensure that instrs_prev is less than
1475 * etm->instructions_sample_period.
1477 u64 offset = etm->instructions_sample_period - instrs_prev;
1480 /* Prepare last branches for instruction sample */
1481 if (etm->synth_opts.last_branch)
1482 cs_etm__copy_last_branch_rb(etmq, tidq);
1484 while (tidq->period_instructions >=
1485 etm->instructions_sample_period) {
1487 * Calculate the address of the sampled instruction (-1
1488 * as sample is reported as though instruction has just
1489 * been executed, but PC has not advanced to next
1492 addr = cs_etm__instr_addr(etmq, trace_chan_id,
1493 tidq->packet, offset - 1);
1494 ret = cs_etm__synth_instruction_sample(
1496 etm->instructions_sample_period);
1500 offset += etm->instructions_sample_period;
1501 tidq->period_instructions -=
1502 etm->instructions_sample_period;
1506 if (etm->sample_branches) {
1507 bool generate_sample = false;
1509 /* Generate sample for tracing on packet */
1510 if (tidq->prev_packet->sample_type == CS_ETM_DISCONTINUITY)
1511 generate_sample = true;
1513 /* Generate sample for branch taken packet */
1514 if (tidq->prev_packet->sample_type == CS_ETM_RANGE &&
1515 tidq->prev_packet->last_instr_taken_branch)
1516 generate_sample = true;
1518 if (generate_sample) {
1519 ret = cs_etm__synth_branch_sample(etmq, tidq);
1525 cs_etm__packet_swap(etm, tidq);
1530 static int cs_etm__exception(struct cs_etm_traceid_queue *tidq)
1533 * When the exception packet is inserted, whether the last instruction
1534 * in previous range packet is taken branch or not, we need to force
1535 * to set 'prev_packet->last_instr_taken_branch' to true. This ensures
1536 * to generate branch sample for the instruction range before the
1537 * exception is trapped to kernel or before the exception returning.
1539 * The exception packet includes the dummy address values, so don't
1540 * swap PACKET with PREV_PACKET. This keeps PREV_PACKET to be useful
1541 * for generating instruction and branch samples.
1543 if (tidq->prev_packet->sample_type == CS_ETM_RANGE)
1544 tidq->prev_packet->last_instr_taken_branch = true;
1549 static int cs_etm__flush(struct cs_etm_queue *etmq,
1550 struct cs_etm_traceid_queue *tidq)
1553 struct cs_etm_auxtrace *etm = etmq->etm;
1555 /* Handle start tracing packet */
1556 if (tidq->prev_packet->sample_type == CS_ETM_EMPTY)
1559 if (etmq->etm->synth_opts.last_branch &&
1560 tidq->prev_packet->sample_type == CS_ETM_RANGE) {
1563 /* Prepare last branches for instruction sample */
1564 cs_etm__copy_last_branch_rb(etmq, tidq);
1567 * Generate a last branch event for the branches left in the
1568 * circular buffer at the end of the trace.
1570 * Use the address of the end of the last reported execution
1573 addr = cs_etm__last_executed_instr(tidq->prev_packet);
1575 err = cs_etm__synth_instruction_sample(
1577 tidq->period_instructions);
1581 tidq->period_instructions = 0;
1585 if (etm->sample_branches &&
1586 tidq->prev_packet->sample_type == CS_ETM_RANGE) {
1587 err = cs_etm__synth_branch_sample(etmq, tidq);
1593 cs_etm__packet_swap(etm, tidq);
1595 /* Reset last branches after flush the trace */
1596 if (etm->synth_opts.last_branch)
1597 cs_etm__reset_last_branch_rb(tidq);
1602 static int cs_etm__end_block(struct cs_etm_queue *etmq,
1603 struct cs_etm_traceid_queue *tidq)
1608 * It has no new packet coming and 'etmq->packet' contains the stale
1609 * packet which was set at the previous time with packets swapping;
1610 * so skip to generate branch sample to avoid stale packet.
1612 * For this case only flush branch stack and generate a last branch
1613 * event for the branches left in the circular buffer at the end of
1616 if (etmq->etm->synth_opts.last_branch &&
1617 tidq->prev_packet->sample_type == CS_ETM_RANGE) {
1620 /* Prepare last branches for instruction sample */
1621 cs_etm__copy_last_branch_rb(etmq, tidq);
1624 * Use the address of the end of the last reported execution
1627 addr = cs_etm__last_executed_instr(tidq->prev_packet);
1629 err = cs_etm__synth_instruction_sample(
1631 tidq->period_instructions);
1635 tidq->period_instructions = 0;
1641 * cs_etm__get_data_block: Fetch a block from the auxtrace_buffer queue
1643 * Returns: < 0 if error
1644 * = 0 if no more auxtrace_buffer to read
1645 * > 0 if the current buffer isn't empty yet
1647 static int cs_etm__get_data_block(struct cs_etm_queue *etmq)
1651 if (!etmq->buf_len) {
1652 ret = cs_etm__get_trace(etmq);
1656 * We cannot assume consecutive blocks in the data file
1657 * are contiguous, reset the decoder to force re-sync.
1659 ret = cs_etm_decoder__reset(etmq->decoder);
1664 return etmq->buf_len;
1667 static bool cs_etm__is_svc_instr(struct cs_etm_queue *etmq, u8 trace_chan_id,
1668 struct cs_etm_packet *packet,
1671 /* Initialise to keep compiler happy */
1676 switch (packet->isa) {
1677 case CS_ETM_ISA_T32:
1679 * The SVC of T32 is defined in ARM DDI 0487D.a, F5.1.247:
1682 * +-----------------+--------+
1683 * | 1 1 0 1 1 1 1 1 | imm8 |
1684 * +-----------------+--------+
1686 * According to the specification, it only defines SVC for T32
1687 * with 16 bits instruction and has no definition for 32bits;
1688 * so below only read 2 bytes as instruction size for T32.
1690 addr = end_addr - 2;
1691 cs_etm__mem_access(etmq, trace_chan_id, addr,
1692 sizeof(instr16), (u8 *)&instr16);
1693 if ((instr16 & 0xFF00) == 0xDF00)
1697 case CS_ETM_ISA_A32:
1699 * The SVC of A32 is defined in ARM DDI 0487D.a, F5.1.247:
1701 * b'31 b'28 b'27 b'24
1702 * +---------+---------+-------------------------+
1703 * | !1111 | 1 1 1 1 | imm24 |
1704 * +---------+---------+-------------------------+
1706 addr = end_addr - 4;
1707 cs_etm__mem_access(etmq, trace_chan_id, addr,
1708 sizeof(instr32), (u8 *)&instr32);
1709 if ((instr32 & 0x0F000000) == 0x0F000000 &&
1710 (instr32 & 0xF0000000) != 0xF0000000)
1714 case CS_ETM_ISA_A64:
1716 * The SVC of A64 is defined in ARM DDI 0487D.a, C6.2.294:
1719 * +-----------------------+---------+-----------+
1720 * | 1 1 0 1 0 1 0 0 0 0 0 | imm16 | 0 0 0 0 1 |
1721 * +-----------------------+---------+-----------+
1723 addr = end_addr - 4;
1724 cs_etm__mem_access(etmq, trace_chan_id, addr,
1725 sizeof(instr32), (u8 *)&instr32);
1726 if ((instr32 & 0xFFE0001F) == 0xd4000001)
1730 case CS_ETM_ISA_UNKNOWN:
1738 static bool cs_etm__is_syscall(struct cs_etm_queue *etmq,
1739 struct cs_etm_traceid_queue *tidq, u64 magic)
1741 u8 trace_chan_id = tidq->trace_chan_id;
1742 struct cs_etm_packet *packet = tidq->packet;
1743 struct cs_etm_packet *prev_packet = tidq->prev_packet;
1745 if (magic == __perf_cs_etmv3_magic)
1746 if (packet->exception_number == CS_ETMV3_EXC_SVC)
1750 * ETMv4 exception type CS_ETMV4_EXC_CALL covers SVC, SMC and
1751 * HVC cases; need to check if it's SVC instruction based on
1754 if (magic == __perf_cs_etmv4_magic) {
1755 if (packet->exception_number == CS_ETMV4_EXC_CALL &&
1756 cs_etm__is_svc_instr(etmq, trace_chan_id, prev_packet,
1757 prev_packet->end_addr))
1764 static bool cs_etm__is_async_exception(struct cs_etm_traceid_queue *tidq,
1767 struct cs_etm_packet *packet = tidq->packet;
1769 if (magic == __perf_cs_etmv3_magic)
1770 if (packet->exception_number == CS_ETMV3_EXC_DEBUG_HALT ||
1771 packet->exception_number == CS_ETMV3_EXC_ASYNC_DATA_ABORT ||
1772 packet->exception_number == CS_ETMV3_EXC_PE_RESET ||
1773 packet->exception_number == CS_ETMV3_EXC_IRQ ||
1774 packet->exception_number == CS_ETMV3_EXC_FIQ)
1777 if (magic == __perf_cs_etmv4_magic)
1778 if (packet->exception_number == CS_ETMV4_EXC_RESET ||
1779 packet->exception_number == CS_ETMV4_EXC_DEBUG_HALT ||
1780 packet->exception_number == CS_ETMV4_EXC_SYSTEM_ERROR ||
1781 packet->exception_number == CS_ETMV4_EXC_INST_DEBUG ||
1782 packet->exception_number == CS_ETMV4_EXC_DATA_DEBUG ||
1783 packet->exception_number == CS_ETMV4_EXC_IRQ ||
1784 packet->exception_number == CS_ETMV4_EXC_FIQ)
1790 static bool cs_etm__is_sync_exception(struct cs_etm_queue *etmq,
1791 struct cs_etm_traceid_queue *tidq,
1794 u8 trace_chan_id = tidq->trace_chan_id;
1795 struct cs_etm_packet *packet = tidq->packet;
1796 struct cs_etm_packet *prev_packet = tidq->prev_packet;
1798 if (magic == __perf_cs_etmv3_magic)
1799 if (packet->exception_number == CS_ETMV3_EXC_SMC ||
1800 packet->exception_number == CS_ETMV3_EXC_HYP ||
1801 packet->exception_number == CS_ETMV3_EXC_JAZELLE_THUMBEE ||
1802 packet->exception_number == CS_ETMV3_EXC_UNDEFINED_INSTR ||
1803 packet->exception_number == CS_ETMV3_EXC_PREFETCH_ABORT ||
1804 packet->exception_number == CS_ETMV3_EXC_DATA_FAULT ||
1805 packet->exception_number == CS_ETMV3_EXC_GENERIC)
1808 if (magic == __perf_cs_etmv4_magic) {
1809 if (packet->exception_number == CS_ETMV4_EXC_TRAP ||
1810 packet->exception_number == CS_ETMV4_EXC_ALIGNMENT ||
1811 packet->exception_number == CS_ETMV4_EXC_INST_FAULT ||
1812 packet->exception_number == CS_ETMV4_EXC_DATA_FAULT)
1816 * For CS_ETMV4_EXC_CALL, except SVC other instructions
1817 * (SMC, HVC) are taken as sync exceptions.
1819 if (packet->exception_number == CS_ETMV4_EXC_CALL &&
1820 !cs_etm__is_svc_instr(etmq, trace_chan_id, prev_packet,
1821 prev_packet->end_addr))
1825 * ETMv4 has 5 bits for exception number; if the numbers
1826 * are in the range ( CS_ETMV4_EXC_FIQ, CS_ETMV4_EXC_END ]
1827 * they are implementation defined exceptions.
1829 * For this case, simply take it as sync exception.
1831 if (packet->exception_number > CS_ETMV4_EXC_FIQ &&
1832 packet->exception_number <= CS_ETMV4_EXC_END)
1839 static int cs_etm__set_sample_flags(struct cs_etm_queue *etmq,
1840 struct cs_etm_traceid_queue *tidq)
1842 struct cs_etm_packet *packet = tidq->packet;
1843 struct cs_etm_packet *prev_packet = tidq->prev_packet;
1844 u8 trace_chan_id = tidq->trace_chan_id;
1848 switch (packet->sample_type) {
1851 * Immediate branch instruction without neither link nor
1852 * return flag, it's normal branch instruction within
1855 if (packet->last_instr_type == OCSD_INSTR_BR &&
1856 packet->last_instr_subtype == OCSD_S_INSTR_NONE) {
1857 packet->flags = PERF_IP_FLAG_BRANCH;
1859 if (packet->last_instr_cond)
1860 packet->flags |= PERF_IP_FLAG_CONDITIONAL;
1864 * Immediate branch instruction with link (e.g. BL), this is
1865 * branch instruction for function call.
1867 if (packet->last_instr_type == OCSD_INSTR_BR &&
1868 packet->last_instr_subtype == OCSD_S_INSTR_BR_LINK)
1869 packet->flags = PERF_IP_FLAG_BRANCH |
1873 * Indirect branch instruction with link (e.g. BLR), this is
1874 * branch instruction for function call.
1876 if (packet->last_instr_type == OCSD_INSTR_BR_INDIRECT &&
1877 packet->last_instr_subtype == OCSD_S_INSTR_BR_LINK)
1878 packet->flags = PERF_IP_FLAG_BRANCH |
1882 * Indirect branch instruction with subtype of
1883 * OCSD_S_INSTR_V7_IMPLIED_RET, this is explicit hint for
1884 * function return for A32/T32.
1886 if (packet->last_instr_type == OCSD_INSTR_BR_INDIRECT &&
1887 packet->last_instr_subtype == OCSD_S_INSTR_V7_IMPLIED_RET)
1888 packet->flags = PERF_IP_FLAG_BRANCH |
1889 PERF_IP_FLAG_RETURN;
1892 * Indirect branch instruction without link (e.g. BR), usually
1893 * this is used for function return, especially for functions
1894 * within dynamic link lib.
1896 if (packet->last_instr_type == OCSD_INSTR_BR_INDIRECT &&
1897 packet->last_instr_subtype == OCSD_S_INSTR_NONE)
1898 packet->flags = PERF_IP_FLAG_BRANCH |
1899 PERF_IP_FLAG_RETURN;
1901 /* Return instruction for function return. */
1902 if (packet->last_instr_type == OCSD_INSTR_BR_INDIRECT &&
1903 packet->last_instr_subtype == OCSD_S_INSTR_V8_RET)
1904 packet->flags = PERF_IP_FLAG_BRANCH |
1905 PERF_IP_FLAG_RETURN;
1908 * Decoder might insert a discontinuity in the middle of
1909 * instruction packets, fixup prev_packet with flag
1910 * PERF_IP_FLAG_TRACE_BEGIN to indicate restarting trace.
1912 if (prev_packet->sample_type == CS_ETM_DISCONTINUITY)
1913 prev_packet->flags |= PERF_IP_FLAG_BRANCH |
1914 PERF_IP_FLAG_TRACE_BEGIN;
1917 * If the previous packet is an exception return packet
1918 * and the return address just follows SVC instruction,
1919 * it needs to calibrate the previous packet sample flags
1920 * as PERF_IP_FLAG_SYSCALLRET.
1922 if (prev_packet->flags == (PERF_IP_FLAG_BRANCH |
1923 PERF_IP_FLAG_RETURN |
1924 PERF_IP_FLAG_INTERRUPT) &&
1925 cs_etm__is_svc_instr(etmq, trace_chan_id,
1926 packet, packet->start_addr))
1927 prev_packet->flags = PERF_IP_FLAG_BRANCH |
1928 PERF_IP_FLAG_RETURN |
1929 PERF_IP_FLAG_SYSCALLRET;
1931 case CS_ETM_DISCONTINUITY:
1933 * The trace is discontinuous, if the previous packet is
1934 * instruction packet, set flag PERF_IP_FLAG_TRACE_END
1935 * for previous packet.
1937 if (prev_packet->sample_type == CS_ETM_RANGE)
1938 prev_packet->flags |= PERF_IP_FLAG_BRANCH |
1939 PERF_IP_FLAG_TRACE_END;
1941 case CS_ETM_EXCEPTION:
1942 ret = cs_etm__get_magic(packet->trace_chan_id, &magic);
1946 /* The exception is for system call. */
1947 if (cs_etm__is_syscall(etmq, tidq, magic))
1948 packet->flags = PERF_IP_FLAG_BRANCH |
1950 PERF_IP_FLAG_SYSCALLRET;
1952 * The exceptions are triggered by external signals from bus,
1953 * interrupt controller, debug module, PE reset or halt.
1955 else if (cs_etm__is_async_exception(tidq, magic))
1956 packet->flags = PERF_IP_FLAG_BRANCH |
1958 PERF_IP_FLAG_ASYNC |
1959 PERF_IP_FLAG_INTERRUPT;
1961 * Otherwise, exception is caused by trap, instruction &
1962 * data fault, or alignment errors.
1964 else if (cs_etm__is_sync_exception(etmq, tidq, magic))
1965 packet->flags = PERF_IP_FLAG_BRANCH |
1967 PERF_IP_FLAG_INTERRUPT;
1970 * When the exception packet is inserted, since exception
1971 * packet is not used standalone for generating samples
1972 * and it's affiliation to the previous instruction range
1973 * packet; so set previous range packet flags to tell perf
1974 * it is an exception taken branch.
1976 if (prev_packet->sample_type == CS_ETM_RANGE)
1977 prev_packet->flags = packet->flags;
1979 case CS_ETM_EXCEPTION_RET:
1981 * When the exception return packet is inserted, since
1982 * exception return packet is not used standalone for
1983 * generating samples and it's affiliation to the previous
1984 * instruction range packet; so set previous range packet
1985 * flags to tell perf it is an exception return branch.
1987 * The exception return can be for either system call or
1988 * other exception types; unfortunately the packet doesn't
1989 * contain exception type related info so we cannot decide
1990 * the exception type purely based on exception return packet.
1991 * If we record the exception number from exception packet and
1992 * reuse it for exception return packet, this is not reliable
1993 * due the trace can be discontinuity or the interrupt can
1994 * be nested, thus the recorded exception number cannot be
1995 * used for exception return packet for these two cases.
1997 * For exception return packet, we only need to distinguish the
1998 * packet is for system call or for other types. Thus the
1999 * decision can be deferred when receive the next packet which
2000 * contains the return address, based on the return address we
2001 * can read out the previous instruction and check if it's a
2002 * system call instruction and then calibrate the sample flag
2005 if (prev_packet->sample_type == CS_ETM_RANGE)
2006 prev_packet->flags = PERF_IP_FLAG_BRANCH |
2007 PERF_IP_FLAG_RETURN |
2008 PERF_IP_FLAG_INTERRUPT;
2018 static int cs_etm__decode_data_block(struct cs_etm_queue *etmq)
2021 size_t processed = 0;
2024 * Packets are decoded and added to the decoder's packet queue
2025 * until the decoder packet processing callback has requested that
2026 * processing stops or there is nothing left in the buffer. Normal
2027 * operations that stop processing are a timestamp packet or a full
2028 * decoder buffer queue.
2030 ret = cs_etm_decoder__process_data_block(etmq->decoder,
2032 &etmq->buf[etmq->buf_used],
2038 etmq->offset += processed;
2039 etmq->buf_used += processed;
2040 etmq->buf_len -= processed;
2046 static int cs_etm__process_traceid_queue(struct cs_etm_queue *etmq,
2047 struct cs_etm_traceid_queue *tidq)
2050 struct cs_etm_packet_queue *packet_queue;
2052 packet_queue = &tidq->packet_queue;
2054 /* Process each packet in this chunk */
2056 ret = cs_etm_decoder__get_packet(packet_queue,
2060 * Stop processing this chunk on
2061 * end of data or error
2066 * Since packet addresses are swapped in packet
2067 * handling within below switch() statements,
2068 * thus setting sample flags must be called
2069 * prior to switch() statement to use address
2070 * information before packets swapping.
2072 ret = cs_etm__set_sample_flags(etmq, tidq);
2076 switch (tidq->packet->sample_type) {
2079 * If the packet contains an instruction
2080 * range, generate instruction sequence
2083 cs_etm__sample(etmq, tidq);
2085 case CS_ETM_EXCEPTION:
2086 case CS_ETM_EXCEPTION_RET:
2088 * If the exception packet is coming,
2089 * make sure the previous instruction
2090 * range packet to be handled properly.
2092 cs_etm__exception(tidq);
2094 case CS_ETM_DISCONTINUITY:
2096 * Discontinuity in trace, flush
2097 * previous branch stack
2099 cs_etm__flush(etmq, tidq);
2103 * Should not receive empty packet,
2106 pr_err("CS ETM Trace: empty packet\n");
2116 static void cs_etm__clear_all_traceid_queues(struct cs_etm_queue *etmq)
2119 struct int_node *inode;
2120 struct cs_etm_traceid_queue *tidq;
2121 struct intlist *traceid_queues_list = etmq->traceid_queues_list;
2123 intlist__for_each_entry(inode, traceid_queues_list) {
2124 idx = (int)(intptr_t)inode->priv;
2125 tidq = etmq->traceid_queues[idx];
2127 /* Ignore return value */
2128 cs_etm__process_traceid_queue(etmq, tidq);
2131 * Generate an instruction sample with the remaining
2132 * branchstack entries.
2134 cs_etm__flush(etmq, tidq);
2138 static int cs_etm__run_decoder(struct cs_etm_queue *etmq)
2141 struct cs_etm_traceid_queue *tidq;
2143 tidq = cs_etm__etmq_get_traceid_queue(etmq, CS_ETM_PER_THREAD_TRACEID);
2147 /* Go through each buffer in the queue and decode them one by one */
2149 err = cs_etm__get_data_block(etmq);
2153 /* Run trace decoder until buffer consumed or end of trace */
2155 err = cs_etm__decode_data_block(etmq);
2160 * Process each packet in this chunk, nothing to do if
2161 * an error occurs other than hoping the next one will
2164 err = cs_etm__process_traceid_queue(etmq, tidq);
2166 } while (etmq->buf_len);
2169 /* Flush any remaining branch stack entries */
2170 err = cs_etm__end_block(etmq, tidq);
2176 static int cs_etm__process_timeless_queues(struct cs_etm_auxtrace *etm,
2180 struct auxtrace_queues *queues = &etm->queues;
2182 for (i = 0; i < queues->nr_queues; i++) {
2183 struct auxtrace_queue *queue = &etm->queues.queue_array[i];
2184 struct cs_etm_queue *etmq = queue->priv;
2185 struct cs_etm_traceid_queue *tidq;
2190 tidq = cs_etm__etmq_get_traceid_queue(etmq,
2191 CS_ETM_PER_THREAD_TRACEID);
2196 if ((tid == -1) || (tidq->tid == tid)) {
2197 cs_etm__set_pid_tid_cpu(etm, tidq);
2198 cs_etm__run_decoder(etmq);
2205 static int cs_etm__process_queues(struct cs_etm_auxtrace *etm)
2208 unsigned int cs_queue_nr, queue_nr, i;
2211 struct auxtrace_queue *queue;
2212 struct cs_etm_queue *etmq;
2213 struct cs_etm_traceid_queue *tidq;
2216 * Pre-populate the heap with one entry from each queue so that we can
2217 * start processing in time order across all queues.
2219 for (i = 0; i < etm->queues.nr_queues; i++) {
2220 etmq = etm->queues.queue_array[i].priv;
2224 ret = cs_etm__queue_first_cs_timestamp(etm, etmq, i);
2230 if (!etm->heap.heap_cnt)
2233 /* Take the entry at the top of the min heap */
2234 cs_queue_nr = etm->heap.heap_array[0].queue_nr;
2235 queue_nr = TO_QUEUE_NR(cs_queue_nr);
2236 trace_chan_id = TO_TRACE_CHAN_ID(cs_queue_nr);
2237 queue = &etm->queues.queue_array[queue_nr];
2241 * Remove the top entry from the heap since we are about
2244 auxtrace_heap__pop(&etm->heap);
2246 tidq = cs_etm__etmq_get_traceid_queue(etmq, trace_chan_id);
2249 * No traceID queue has been allocated for this traceID,
2250 * which means something somewhere went very wrong. No
2251 * other choice than simply exit.
2258 * Packets associated with this timestamp are already in
2259 * the etmq's traceID queue, so process them.
2261 ret = cs_etm__process_traceid_queue(etmq, tidq);
2266 * Packets for this timestamp have been processed, time to
2267 * move on to the next timestamp, fetching a new auxtrace_buffer
2271 ret = cs_etm__get_data_block(etmq);
2276 * No more auxtrace_buffers to process in this etmq, simply
2277 * move on to another entry in the auxtrace_heap.
2282 ret = cs_etm__decode_data_block(etmq);
2286 cs_timestamp = cs_etm__etmq_get_timestamp(etmq, &trace_chan_id);
2288 if (!cs_timestamp) {
2290 * Function cs_etm__decode_data_block() returns when
2291 * there is no more traces to decode in the current
2292 * auxtrace_buffer OR when a timestamp has been
2293 * encountered on any of the traceID queues. Since we
2294 * did not get a timestamp, there is no more traces to
2295 * process in this auxtrace_buffer. As such empty and
2296 * flush all traceID queues.
2298 cs_etm__clear_all_traceid_queues(etmq);
2300 /* Fetch another auxtrace_buffer for this etmq */
2305 * Add to the min heap the timestamp for packets that have
2306 * just been decoded. They will be processed and synthesized
2307 * during the next call to cs_etm__process_traceid_queue() for
2308 * this queue/traceID.
2310 cs_queue_nr = TO_CS_QUEUE_NR(queue_nr, trace_chan_id);
2311 ret = auxtrace_heap__add(&etm->heap, cs_queue_nr, cs_timestamp);
2318 static int cs_etm__process_itrace_start(struct cs_etm_auxtrace *etm,
2319 union perf_event *event)
2323 if (etm->timeless_decoding)
2327 * Add the tid/pid to the log so that we can get a match when
2328 * we get a contextID from the decoder.
2330 th = machine__findnew_thread(etm->machine,
2331 event->itrace_start.pid,
2332 event->itrace_start.tid);
2341 static int cs_etm__process_switch_cpu_wide(struct cs_etm_auxtrace *etm,
2342 union perf_event *event)
2345 bool out = event->header.misc & PERF_RECORD_MISC_SWITCH_OUT;
2348 * Context switch in per-thread mode are irrelevant since perf
2349 * will start/stop tracing as the process is scheduled.
2351 if (etm->timeless_decoding)
2355 * SWITCH_IN events carry the next process to be switched out while
2356 * SWITCH_OUT events carry the process to be switched in. As such
2357 * we don't care about IN events.
2363 * Add the tid/pid to the log so that we can get a match when
2364 * we get a contextID from the decoder.
2366 th = machine__findnew_thread(etm->machine,
2367 event->context_switch.next_prev_pid,
2368 event->context_switch.next_prev_tid);
2377 static int cs_etm__process_event(struct perf_session *session,
2378 union perf_event *event,
2379 struct perf_sample *sample,
2380 struct perf_tool *tool)
2382 u64 sample_kernel_timestamp;
2383 struct cs_etm_auxtrace *etm = container_of(session->auxtrace,
2384 struct cs_etm_auxtrace,
2390 if (!tool->ordered_events) {
2391 pr_err("CoreSight ETM Trace requires ordered events\n");
2395 if (sample->time && (sample->time != (u64) -1))
2396 sample_kernel_timestamp = sample->time;
2398 sample_kernel_timestamp = 0;
2401 * Don't wait for cs_etm__flush_events() in per-thread/timeless mode to start the decode. We
2402 * need the tid of the PERF_RECORD_EXIT event to assign to the synthesised samples because
2403 * ETM_OPT_CTXTID is not enabled.
2405 if (etm->timeless_decoding &&
2406 event->header.type == PERF_RECORD_EXIT)
2407 return cs_etm__process_timeless_queues(etm,
2410 if (event->header.type == PERF_RECORD_ITRACE_START)
2411 return cs_etm__process_itrace_start(etm, event);
2412 else if (event->header.type == PERF_RECORD_SWITCH_CPU_WIDE)
2413 return cs_etm__process_switch_cpu_wide(etm, event);
2415 if (!etm->timeless_decoding && event->header.type == PERF_RECORD_AUX) {
2417 * Record the latest kernel timestamp available in the header
2418 * for samples so that synthesised samples occur from this point
2421 etm->latest_kernel_timestamp = sample_kernel_timestamp;
2427 static void dump_queued_data(struct cs_etm_auxtrace *etm,
2428 struct perf_record_auxtrace *event)
2430 struct auxtrace_buffer *buf;
2433 * Find all buffers with same reference in the queues and dump them.
2434 * This is because the queues can contain multiple entries of the same
2435 * buffer that were split on aux records.
2437 for (i = 0; i < etm->queues.nr_queues; ++i)
2438 list_for_each_entry(buf, &etm->queues.queue_array[i].head, list)
2439 if (buf->reference == event->reference)
2440 cs_etm__dump_event(etm->queues.queue_array[i].priv, buf);
2443 static int cs_etm__process_auxtrace_event(struct perf_session *session,
2444 union perf_event *event,
2445 struct perf_tool *tool __maybe_unused)
2447 struct cs_etm_auxtrace *etm = container_of(session->auxtrace,
2448 struct cs_etm_auxtrace,
2450 if (!etm->data_queued) {
2451 struct auxtrace_buffer *buffer;
2453 int fd = perf_data__fd(session->data);
2454 bool is_pipe = perf_data__is_pipe(session->data);
2456 int idx = event->auxtrace.idx;
2461 data_offset = lseek(fd, 0, SEEK_CUR);
2462 if (data_offset == -1)
2466 err = auxtrace_queues__add_event(&etm->queues, session,
2467 event, data_offset, &buffer);
2472 * Knowing if the trace is formatted or not requires a lookup of
2473 * the aux record so only works in non-piped mode where data is
2474 * queued in cs_etm__queue_aux_records(). Always assume
2475 * formatted in piped mode (true).
2477 err = cs_etm__setup_queue(etm, &etm->queues.queue_array[idx],
2483 if (auxtrace_buffer__get_data(buffer, fd)) {
2484 cs_etm__dump_event(etm->queues.queue_array[idx].priv, buffer);
2485 auxtrace_buffer__put_data(buffer);
2487 } else if (dump_trace)
2488 dump_queued_data(etm, &event->auxtrace);
2493 static bool cs_etm__is_timeless_decoding(struct cs_etm_auxtrace *etm)
2495 struct evsel *evsel;
2496 struct evlist *evlist = etm->session->evlist;
2497 bool timeless_decoding = true;
2499 /* Override timeless mode with user input from --itrace=Z */
2500 if (etm->synth_opts.timeless_decoding)
2504 * Circle through the list of event and complain if we find one
2505 * with the time bit set.
2507 evlist__for_each_entry(evlist, evsel) {
2508 if ((evsel->core.attr.sample_type & PERF_SAMPLE_TIME))
2509 timeless_decoding = false;
2512 return timeless_decoding;
2515 static const char * const cs_etm_global_header_fmts[] = {
2516 [CS_HEADER_VERSION] = " Header version %llx\n",
2517 [CS_PMU_TYPE_CPUS] = " PMU type/num cpus %llx\n",
2518 [CS_ETM_SNAPSHOT] = " Snapshot %llx\n",
2521 static const char * const cs_etm_priv_fmts[] = {
2522 [CS_ETM_MAGIC] = " Magic number %llx\n",
2523 [CS_ETM_CPU] = " CPU %lld\n",
2524 [CS_ETM_NR_TRC_PARAMS] = " NR_TRC_PARAMS %llx\n",
2525 [CS_ETM_ETMCR] = " ETMCR %llx\n",
2526 [CS_ETM_ETMTRACEIDR] = " ETMTRACEIDR %llx\n",
2527 [CS_ETM_ETMCCER] = " ETMCCER %llx\n",
2528 [CS_ETM_ETMIDR] = " ETMIDR %llx\n",
2531 static const char * const cs_etmv4_priv_fmts[] = {
2532 [CS_ETM_MAGIC] = " Magic number %llx\n",
2533 [CS_ETM_CPU] = " CPU %lld\n",
2534 [CS_ETM_NR_TRC_PARAMS] = " NR_TRC_PARAMS %llx\n",
2535 [CS_ETMV4_TRCCONFIGR] = " TRCCONFIGR %llx\n",
2536 [CS_ETMV4_TRCTRACEIDR] = " TRCTRACEIDR %llx\n",
2537 [CS_ETMV4_TRCIDR0] = " TRCIDR0 %llx\n",
2538 [CS_ETMV4_TRCIDR1] = " TRCIDR1 %llx\n",
2539 [CS_ETMV4_TRCIDR2] = " TRCIDR2 %llx\n",
2540 [CS_ETMV4_TRCIDR8] = " TRCIDR8 %llx\n",
2541 [CS_ETMV4_TRCAUTHSTATUS] = " TRCAUTHSTATUS %llx\n",
2542 [CS_ETE_TRCDEVARCH] = " TRCDEVARCH %llx\n"
2545 static const char * const param_unk_fmt =
2546 " Unknown parameter [%d] %llx\n";
2547 static const char * const magic_unk_fmt =
2548 " Magic number Unknown %llx\n";
2550 static int cs_etm__print_cpu_metadata_v0(__u64 *val, int *offset)
2552 int i = *offset, j, nr_params = 0, fmt_offset;
2555 /* check magic value */
2556 magic = val[i + CS_ETM_MAGIC];
2557 if ((magic != __perf_cs_etmv3_magic) &&
2558 (magic != __perf_cs_etmv4_magic)) {
2559 /* failure - note bad magic value */
2560 fprintf(stdout, magic_unk_fmt, magic);
2564 /* print common header block */
2565 fprintf(stdout, cs_etm_priv_fmts[CS_ETM_MAGIC], val[i++]);
2566 fprintf(stdout, cs_etm_priv_fmts[CS_ETM_CPU], val[i++]);
2568 if (magic == __perf_cs_etmv3_magic) {
2569 nr_params = CS_ETM_NR_TRC_PARAMS_V0;
2570 fmt_offset = CS_ETM_ETMCR;
2571 /* after common block, offset format index past NR_PARAMS */
2572 for (j = fmt_offset; j < nr_params + fmt_offset; j++, i++)
2573 fprintf(stdout, cs_etm_priv_fmts[j], val[i]);
2574 } else if (magic == __perf_cs_etmv4_magic) {
2575 nr_params = CS_ETMV4_NR_TRC_PARAMS_V0;
2576 fmt_offset = CS_ETMV4_TRCCONFIGR;
2577 /* after common block, offset format index past NR_PARAMS */
2578 for (j = fmt_offset; j < nr_params + fmt_offset; j++, i++)
2579 fprintf(stdout, cs_etmv4_priv_fmts[j], val[i]);
2585 static int cs_etm__print_cpu_metadata_v1(__u64 *val, int *offset)
2587 int i = *offset, j, total_params = 0;
2590 magic = val[i + CS_ETM_MAGIC];
2591 /* total params to print is NR_PARAMS + common block size for v1 */
2592 total_params = val[i + CS_ETM_NR_TRC_PARAMS] + CS_ETM_COMMON_BLK_MAX_V1;
2594 if (magic == __perf_cs_etmv3_magic) {
2595 for (j = 0; j < total_params; j++, i++) {
2596 /* if newer record - could be excess params */
2597 if (j >= CS_ETM_PRIV_MAX)
2598 fprintf(stdout, param_unk_fmt, j, val[i]);
2600 fprintf(stdout, cs_etm_priv_fmts[j], val[i]);
2602 } else if (magic == __perf_cs_etmv4_magic || magic == __perf_cs_ete_magic) {
2604 * ETE and ETMv4 can be printed in the same block because the number of parameters
2605 * is saved and they share the list of parameter names. ETE is also only supported
2608 for (j = 0; j < total_params; j++, i++) {
2609 /* if newer record - could be excess params */
2610 if (j >= CS_ETE_PRIV_MAX)
2611 fprintf(stdout, param_unk_fmt, j, val[i]);
2613 fprintf(stdout, cs_etmv4_priv_fmts[j], val[i]);
2616 /* failure - note bad magic value and error out */
2617 fprintf(stdout, magic_unk_fmt, magic);
2624 static void cs_etm__print_auxtrace_info(__u64 *val, int num)
2626 int i, cpu = 0, version, err;
2628 /* bail out early on bad header version */
2630 if (version > CS_HEADER_CURRENT_VERSION) {
2631 /* failure.. return */
2632 fprintf(stdout, " Unknown Header Version = %x, ", version);
2633 fprintf(stdout, "Version supported <= %x\n", CS_HEADER_CURRENT_VERSION);
2637 for (i = 0; i < CS_HEADER_VERSION_MAX; i++)
2638 fprintf(stdout, cs_etm_global_header_fmts[i], val[i]);
2640 for (i = CS_HEADER_VERSION_MAX; cpu < num; cpu++) {
2642 err = cs_etm__print_cpu_metadata_v0(val, &i);
2643 else if (version == 1)
2644 err = cs_etm__print_cpu_metadata_v1(val, &i);
2651 * Read a single cpu parameter block from the auxtrace_info priv block.
2653 * For version 1 there is a per cpu nr_params entry. If we are handling
2654 * version 1 file, then there may be less, the same, or more params
2655 * indicated by this value than the compile time number we understand.
2657 * For a version 0 info block, there are a fixed number, and we need to
2658 * fill out the nr_param value in the metadata we create.
2660 static u64 *cs_etm__create_meta_blk(u64 *buff_in, int *buff_in_offset,
2661 int out_blk_size, int nr_params_v0)
2663 u64 *metadata = NULL;
2665 int nr_in_params, nr_out_params, nr_cmn_params;
2668 metadata = zalloc(sizeof(*metadata) * out_blk_size);
2672 /* read block current index & version */
2673 i = *buff_in_offset;
2674 hdr_version = buff_in[CS_HEADER_VERSION];
2677 /* read version 0 info block into a version 1 metadata block */
2678 nr_in_params = nr_params_v0;
2679 metadata[CS_ETM_MAGIC] = buff_in[i + CS_ETM_MAGIC];
2680 metadata[CS_ETM_CPU] = buff_in[i + CS_ETM_CPU];
2681 metadata[CS_ETM_NR_TRC_PARAMS] = nr_in_params;
2682 /* remaining block params at offset +1 from source */
2683 for (k = CS_ETM_COMMON_BLK_MAX_V1 - 1; k < nr_in_params; k++)
2684 metadata[k + 1] = buff_in[i + k];
2685 /* version 0 has 2 common params */
2688 /* read version 1 info block - input and output nr_params may differ */
2689 /* version 1 has 3 common params */
2691 nr_in_params = buff_in[i + CS_ETM_NR_TRC_PARAMS];
2693 /* if input has more params than output - skip excess */
2694 nr_out_params = nr_in_params + nr_cmn_params;
2695 if (nr_out_params > out_blk_size)
2696 nr_out_params = out_blk_size;
2698 for (k = CS_ETM_MAGIC; k < nr_out_params; k++)
2699 metadata[k] = buff_in[i + k];
2701 /* record the actual nr params we copied */
2702 metadata[CS_ETM_NR_TRC_PARAMS] = nr_out_params - nr_cmn_params;
2705 /* adjust in offset by number of in params used */
2706 i += nr_in_params + nr_cmn_params;
2707 *buff_in_offset = i;
2712 * Puts a fragment of an auxtrace buffer into the auxtrace queues based
2713 * on the bounds of aux_event, if it matches with the buffer that's at
2716 * Normally, whole auxtrace buffers would be added to the queue. But we
2717 * want to reset the decoder for every PERF_RECORD_AUX event, and the decoder
2718 * is reset across each buffer, so splitting the buffers up in advance has
2721 static int cs_etm__queue_aux_fragment(struct perf_session *session, off_t file_offset, size_t sz,
2722 struct perf_record_aux *aux_event, struct perf_sample *sample)
2725 char buf[PERF_SAMPLE_MAX_SIZE];
2726 union perf_event *auxtrace_event_union;
2727 struct perf_record_auxtrace *auxtrace_event;
2728 union perf_event auxtrace_fragment;
2729 __u64 aux_offset, aux_size;
2733 struct cs_etm_auxtrace *etm = container_of(session->auxtrace,
2734 struct cs_etm_auxtrace,
2738 * There should be a PERF_RECORD_AUXTRACE event at the file_offset that we got
2739 * from looping through the auxtrace index.
2741 err = perf_session__peek_event(session, file_offset, buf,
2742 PERF_SAMPLE_MAX_SIZE, &auxtrace_event_union, NULL);
2745 auxtrace_event = &auxtrace_event_union->auxtrace;
2746 if (auxtrace_event->header.type != PERF_RECORD_AUXTRACE)
2749 if (auxtrace_event->header.size < sizeof(struct perf_record_auxtrace) ||
2750 auxtrace_event->header.size != sz) {
2755 * In per-thread mode, CPU is set to -1, but TID will be set instead. See
2756 * auxtrace_mmap_params__set_idx(). Return 'not found' if neither CPU nor TID match.
2758 if ((auxtrace_event->cpu == (__u32) -1 && auxtrace_event->tid != sample->tid) ||
2759 auxtrace_event->cpu != sample->cpu)
2762 if (aux_event->flags & PERF_AUX_FLAG_OVERWRITE) {
2764 * Clamp size in snapshot mode. The buffer size is clamped in
2765 * __auxtrace_mmap__read() for snapshots, so the aux record size doesn't reflect
2768 aux_size = min(aux_event->aux_size, auxtrace_event->size);
2771 * In this mode, the head also points to the end of the buffer so aux_offset
2772 * needs to have the size subtracted so it points to the beginning as in normal mode
2774 aux_offset = aux_event->aux_offset - aux_size;
2776 aux_size = aux_event->aux_size;
2777 aux_offset = aux_event->aux_offset;
2780 if (aux_offset >= auxtrace_event->offset &&
2781 aux_offset + aux_size <= auxtrace_event->offset + auxtrace_event->size) {
2783 * If this AUX event was inside this buffer somewhere, create a new auxtrace event
2784 * based on the sizes of the aux event, and queue that fragment.
2786 auxtrace_fragment.auxtrace = *auxtrace_event;
2787 auxtrace_fragment.auxtrace.size = aux_size;
2788 auxtrace_fragment.auxtrace.offset = aux_offset;
2789 file_offset += aux_offset - auxtrace_event->offset + auxtrace_event->header.size;
2791 pr_debug3("CS ETM: Queue buffer size: %#"PRI_lx64" offset: %#"PRI_lx64
2792 " tid: %d cpu: %d\n", aux_size, aux_offset, sample->tid, sample->cpu);
2793 err = auxtrace_queues__add_event(&etm->queues, session, &auxtrace_fragment,
2798 idx = auxtrace_event->idx;
2799 formatted = !(aux_event->flags & PERF_AUX_FLAG_CORESIGHT_FORMAT_RAW);
2800 return cs_etm__setup_queue(etm, &etm->queues.queue_array[idx],
2804 /* Wasn't inside this buffer, but there were no parse errors. 1 == 'not found' */
2808 static int cs_etm__queue_aux_records_cb(struct perf_session *session, union perf_event *event,
2809 u64 offset __maybe_unused, void *data __maybe_unused)
2811 struct perf_sample sample;
2813 struct auxtrace_index_entry *ent;
2814 struct auxtrace_index *auxtrace_index;
2815 struct evsel *evsel;
2818 /* Don't care about any other events, we're only queuing buffers for AUX events */
2819 if (event->header.type != PERF_RECORD_AUX)
2822 if (event->header.size < sizeof(struct perf_record_aux))
2825 /* Truncated Aux records can have 0 size and shouldn't result in anything being queued. */
2826 if (!event->aux.aux_size)
2830 * Parse the sample, we need the sample_id_all data that comes after the event so that the
2831 * CPU or PID can be matched to an AUXTRACE buffer's CPU or PID.
2833 evsel = evlist__event2evsel(session->evlist, event);
2836 ret = evsel__parse_sample(evsel, event, &sample);
2841 * Loop through the auxtrace index to find the buffer that matches up with this aux event.
2843 list_for_each_entry(auxtrace_index, &session->auxtrace_index, list) {
2844 for (i = 0; i < auxtrace_index->nr; i++) {
2845 ent = &auxtrace_index->entries[i];
2846 ret = cs_etm__queue_aux_fragment(session, ent->file_offset,
2847 ent->sz, &event->aux, &sample);
2849 * Stop search on error or successful values. Continue search on
2858 * Couldn't find the buffer corresponding to this aux record, something went wrong. Warn but
2859 * don't exit with an error because it will still be possible to decode other aux records.
2861 pr_err("CS ETM: Couldn't find auxtrace buffer for aux_offset: %#"PRI_lx64
2862 " tid: %d cpu: %d\n", event->aux.aux_offset, sample.tid, sample.cpu);
2866 static int cs_etm__queue_aux_records(struct perf_session *session)
2868 struct auxtrace_index *index = list_first_entry_or_null(&session->auxtrace_index,
2869 struct auxtrace_index, list);
2870 if (index && index->nr > 0)
2871 return perf_session__peek_events(session, session->header.data_offset,
2872 session->header.data_size,
2873 cs_etm__queue_aux_records_cb, NULL);
2876 * We would get here if there are no entries in the index (either no auxtrace
2877 * buffers or no index at all). Fail silently as there is the possibility of
2878 * queueing them in cs_etm__process_auxtrace_event() if etm->data_queued is still
2881 * In that scenario, buffers will not be split by AUX records.
2886 int cs_etm__process_auxtrace_info(union perf_event *event,
2887 struct perf_session *session)
2889 struct perf_record_auxtrace_info *auxtrace_info = &event->auxtrace_info;
2890 struct cs_etm_auxtrace *etm = NULL;
2891 struct int_node *inode;
2892 unsigned int pmu_type;
2893 int event_header_size = sizeof(struct perf_event_header);
2894 int info_header_size;
2895 int total_size = auxtrace_info->header.size;
2897 int num_cpu, trcidr_idx;
2900 u64 *ptr, *hdr = NULL;
2901 u64 **metadata = NULL;
2905 * sizeof(auxtrace_info_event::type) +
2906 * sizeof(auxtrace_info_event::reserved) == 8
2908 info_header_size = 8;
2910 if (total_size < (event_header_size + info_header_size))
2913 priv_size = total_size - event_header_size - info_header_size;
2915 /* First the global part */
2916 ptr = (u64 *) auxtrace_info->priv;
2918 /* Look for version of the header */
2919 hdr_version = ptr[0];
2920 if (hdr_version > CS_HEADER_CURRENT_VERSION) {
2921 /* print routine will print an error on bad version */
2923 cs_etm__print_auxtrace_info(auxtrace_info->priv, 0);
2927 hdr = zalloc(sizeof(*hdr) * CS_HEADER_VERSION_MAX);
2931 /* Extract header information - see cs-etm.h for format */
2932 for (i = 0; i < CS_HEADER_VERSION_MAX; i++)
2934 num_cpu = hdr[CS_PMU_TYPE_CPUS] & 0xffffffff;
2935 pmu_type = (unsigned int) ((hdr[CS_PMU_TYPE_CPUS] >> 32) &
2939 * Create an RB tree for traceID-metadata tuple. Since the conversion
2940 * has to be made for each packet that gets decoded, optimizing access
2941 * in anything other than a sequential array is worth doing.
2943 traceid_list = intlist__new(NULL);
2944 if (!traceid_list) {
2949 metadata = zalloc(sizeof(*metadata) * num_cpu);
2952 goto err_free_traceid_list;
2956 * The metadata is stored in the auxtrace_info section and encodes
2957 * the configuration of the ARM embedded trace macrocell which is
2958 * required by the trace decoder to properly decode the trace due
2959 * to its highly compressed nature.
2961 for (j = 0; j < num_cpu; j++) {
2962 if (ptr[i] == __perf_cs_etmv3_magic) {
2964 cs_etm__create_meta_blk(ptr, &i,
2966 CS_ETM_NR_TRC_PARAMS_V0);
2968 /* The traceID is our handle */
2969 trcidr_idx = CS_ETM_ETMTRACEIDR;
2971 } else if (ptr[i] == __perf_cs_etmv4_magic) {
2973 cs_etm__create_meta_blk(ptr, &i,
2975 CS_ETMV4_NR_TRC_PARAMS_V0);
2977 /* The traceID is our handle */
2978 trcidr_idx = CS_ETMV4_TRCTRACEIDR;
2979 } else if (ptr[i] == __perf_cs_ete_magic) {
2980 metadata[j] = cs_etm__create_meta_blk(ptr, &i, CS_ETE_PRIV_MAX, -1);
2982 /* ETE shares first part of metadata with ETMv4 */
2983 trcidr_idx = CS_ETMV4_TRCTRACEIDR;
2985 ui__error("CS ETM Trace: Unrecognised magic number %#"PRIx64". File could be from a newer version of perf.\n",
2988 goto err_free_metadata;
2993 goto err_free_metadata;
2996 /* Get an RB node for this CPU */
2997 inode = intlist__findnew(traceid_list, metadata[j][trcidr_idx]);
2999 /* Something went wrong, no need to continue */
3002 goto err_free_metadata;
3006 * The node for that CPU should not be taken.
3007 * Back out if that's the case.
3011 goto err_free_metadata;
3013 /* All good, associate the traceID with the metadata pointer */
3014 inode->priv = metadata[j];
3018 * Each of CS_HEADER_VERSION_MAX, CS_ETM_PRIV_MAX and
3019 * CS_ETMV4_PRIV_MAX mark how many double words are in the
3020 * global metadata, and each cpu's metadata respectively.
3021 * The following tests if the correct number of double words was
3022 * present in the auxtrace info section.
3024 if (i * 8 != priv_size) {
3026 goto err_free_metadata;
3029 etm = zalloc(sizeof(*etm));
3033 goto err_free_metadata;
3036 err = auxtrace_queues__init(&etm->queues);
3040 if (session->itrace_synth_opts->set) {
3041 etm->synth_opts = *session->itrace_synth_opts;
3043 itrace_synth_opts__set_default(&etm->synth_opts,
3044 session->itrace_synth_opts->default_no_sample);
3045 etm->synth_opts.callchain = false;
3048 etm->session = session;
3049 etm->machine = &session->machines.host;
3051 etm->num_cpu = num_cpu;
3052 etm->pmu_type = pmu_type;
3053 etm->snapshot_mode = (hdr[CS_ETM_SNAPSHOT] != 0);
3054 etm->metadata = metadata;
3055 etm->auxtrace_type = auxtrace_info->type;
3056 etm->timeless_decoding = cs_etm__is_timeless_decoding(etm);
3058 etm->auxtrace.process_event = cs_etm__process_event;
3059 etm->auxtrace.process_auxtrace_event = cs_etm__process_auxtrace_event;
3060 etm->auxtrace.flush_events = cs_etm__flush_events;
3061 etm->auxtrace.free_events = cs_etm__free_events;
3062 etm->auxtrace.free = cs_etm__free;
3063 etm->auxtrace.evsel_is_auxtrace = cs_etm__evsel_is_auxtrace;
3064 session->auxtrace = &etm->auxtrace;
3066 etm->unknown_thread = thread__new(999999999, 999999999);
3067 if (!etm->unknown_thread) {
3069 goto err_free_queues;
3073 * Initialize list node so that at thread__zput() we can avoid
3074 * segmentation fault at list_del_init().
3076 INIT_LIST_HEAD(&etm->unknown_thread->node);
3078 err = thread__set_comm(etm->unknown_thread, "unknown", 0);
3080 goto err_delete_thread;
3082 if (thread__init_maps(etm->unknown_thread, etm->machine)) {
3084 goto err_delete_thread;
3088 cs_etm__print_auxtrace_info(auxtrace_info->priv, num_cpu);
3091 err = cs_etm__synth_events(etm, session);
3093 goto err_delete_thread;
3095 err = cs_etm__queue_aux_records(session);
3097 goto err_delete_thread;
3099 etm->data_queued = etm->queues.populated;
3101 * Print warning in pipe mode, see cs_etm__process_auxtrace_event() and
3102 * cs_etm__queue_aux_fragment() for details relating to limitations.
3104 if (!etm->data_queued)
3105 pr_warning("CS ETM warning: Coresight decode and TRBE support requires random file access.\n"
3106 "Continuing with best effort decoding in piped mode.\n\n");
3111 thread__zput(etm->unknown_thread);
3113 auxtrace_queues__free(&etm->queues);
3114 session->auxtrace = NULL;
3118 /* No need to check @metadata[j], free(NULL) is supported */
3119 for (j = 0; j < num_cpu; j++)
3120 zfree(&metadata[j]);
3122 err_free_traceid_list:
3123 intlist__delete(traceid_list);
3127 * At this point, as a minimum we have valid header. Dump the rest of
3128 * the info section - the print routines will error out on structural
3132 cs_etm__print_auxtrace_info(auxtrace_info->priv, num_cpu);