1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright(C) 2015-2018 Linaro Limited.
5 * Author: Tor Jeremiassen <tor@ti.com>
6 * Author: Mathieu Poirier <mathieu.poirier@linaro.org>
9 #include <linux/bitops.h>
10 #include <linux/coresight-pmu.h>
11 #include <linux/err.h>
12 #include <linux/kernel.h>
13 #include <linux/log2.h>
14 #include <linux/types.h>
15 #include <linux/zalloc.h>
17 #include <opencsd/ocsd_if_types.h>
23 #include "cs-etm-decoder/cs-etm-decoder.h"
32 #include "map_symbol.h"
37 #include "thread-stack.h"
38 #include <tools/libc_compat.h>
39 #include "util/synthetic-events.h"
41 struct cs_etm_auxtrace {
42 struct auxtrace auxtrace;
43 struct auxtrace_queues queues;
44 struct auxtrace_heap heap;
45 struct itrace_synth_opts synth_opts;
46 struct perf_session *session;
47 struct machine *machine;
48 struct thread *unknown_thread;
54 u8 sample_instructions;
57 u64 latest_kernel_timestamp;
59 u64 branches_sample_type;
61 u64 instructions_sample_type;
62 u64 instructions_sample_period;
65 unsigned int pmu_type;
68 struct cs_etm_traceid_queue {
71 u64 period_instructions;
72 size_t last_branch_pos;
73 union perf_event *event_buf;
74 struct thread *thread;
75 struct branch_stack *last_branch;
76 struct branch_stack *last_branch_rb;
77 struct cs_etm_packet *prev_packet;
78 struct cs_etm_packet *packet;
79 struct cs_etm_packet_queue packet_queue;
83 struct cs_etm_auxtrace *etm;
84 struct cs_etm_decoder *decoder;
85 struct auxtrace_buffer *buffer;
86 unsigned int queue_nr;
87 u8 pending_timestamp_chan_id;
89 const unsigned char *buf;
90 size_t buf_len, buf_used;
91 /* Conversion between traceID and index in traceid_queues array */
92 struct intlist *traceid_queues_list;
93 struct cs_etm_traceid_queue **traceid_queues;
96 /* RB tree for quick conversion between traceID and metadata pointers */
97 static struct intlist *traceid_list;
99 static int cs_etm__process_queues(struct cs_etm_auxtrace *etm);
100 static int cs_etm__process_timeless_queues(struct cs_etm_auxtrace *etm,
102 static int cs_etm__get_data_block(struct cs_etm_queue *etmq);
103 static int cs_etm__decode_data_block(struct cs_etm_queue *etmq);
105 /* PTMs ETMIDR [11:8] set to b0011 */
106 #define ETMIDR_PTM_VERSION 0x00000300
109 * A struct auxtrace_heap_item only has a queue_nr and a timestamp to
110 * work with. One option is to modify to auxtrace_heap_XYZ() API or simply
111 * encode the etm queue number as the upper 16 bit and the channel as
114 #define TO_CS_QUEUE_NR(queue_nr, trace_chan_id) \
115 (queue_nr << 16 | trace_chan_id)
116 #define TO_QUEUE_NR(cs_queue_nr) (cs_queue_nr >> 16)
117 #define TO_TRACE_CHAN_ID(cs_queue_nr) (cs_queue_nr & 0x0000ffff)
119 static u32 cs_etm__get_v7_protocol_version(u32 etmidr)
121 etmidr &= ETMIDR_PTM_VERSION;
123 if (etmidr == ETMIDR_PTM_VERSION)
124 return CS_ETM_PROTO_PTM;
126 return CS_ETM_PROTO_ETMV3;
129 static int cs_etm__get_magic(u8 trace_chan_id, u64 *magic)
131 struct int_node *inode;
134 inode = intlist__find(traceid_list, trace_chan_id);
138 metadata = inode->priv;
139 *magic = metadata[CS_ETM_MAGIC];
143 int cs_etm__get_cpu(u8 trace_chan_id, int *cpu)
145 struct int_node *inode;
148 inode = intlist__find(traceid_list, trace_chan_id);
152 metadata = inode->priv;
153 *cpu = (int)metadata[CS_ETM_CPU];
158 * The returned PID format is presented by two bits:
160 * Bit ETM_OPT_CTXTID: CONTEXTIDR or CONTEXTIDR_EL1 is traced;
161 * Bit ETM_OPT_CTXTID2: CONTEXTIDR_EL2 is traced.
163 * It's possible that the two bits ETM_OPT_CTXTID and ETM_OPT_CTXTID2
164 * are enabled at the same time when the session runs on an EL2 kernel.
165 * This means the CONTEXTIDR_EL1 and CONTEXTIDR_EL2 both will be
166 * recorded in the trace data, the tool will selectively use
167 * CONTEXTIDR_EL2 as PID.
169 int cs_etm__get_pid_fmt(u8 trace_chan_id, u64 *pid_fmt)
171 struct int_node *inode;
174 inode = intlist__find(traceid_list, trace_chan_id);
178 metadata = inode->priv;
180 if (metadata[CS_ETM_MAGIC] == __perf_cs_etmv3_magic) {
181 val = metadata[CS_ETM_ETMCR];
182 /* CONTEXTIDR is traced */
183 if (val & BIT(ETM_OPT_CTXTID))
184 *pid_fmt = BIT(ETM_OPT_CTXTID);
186 val = metadata[CS_ETMV4_TRCCONFIGR];
187 /* CONTEXTIDR_EL2 is traced */
188 if (val & (BIT(ETM4_CFG_BIT_VMID) | BIT(ETM4_CFG_BIT_VMID_OPT)))
189 *pid_fmt = BIT(ETM_OPT_CTXTID2);
190 /* CONTEXTIDR_EL1 is traced */
191 else if (val & BIT(ETM4_CFG_BIT_CTXTID))
192 *pid_fmt = BIT(ETM_OPT_CTXTID);
198 void cs_etm__etmq_set_traceid_queue_timestamp(struct cs_etm_queue *etmq,
202 * When a timestamp packet is encountered the backend code
203 * is stopped so that the front end has time to process packets
204 * that were accumulated in the traceID queue. Since there can
205 * be more than one channel per cs_etm_queue, we need to specify
206 * what traceID queue needs servicing.
208 etmq->pending_timestamp_chan_id = trace_chan_id;
211 static u64 cs_etm__etmq_get_timestamp(struct cs_etm_queue *etmq,
214 struct cs_etm_packet_queue *packet_queue;
216 if (!etmq->pending_timestamp_chan_id)
220 *trace_chan_id = etmq->pending_timestamp_chan_id;
222 packet_queue = cs_etm__etmq_get_packet_queue(etmq,
223 etmq->pending_timestamp_chan_id);
227 /* Acknowledge pending status */
228 etmq->pending_timestamp_chan_id = 0;
230 /* See function cs_etm_decoder__do_{hard|soft}_timestamp() */
231 return packet_queue->cs_timestamp;
234 static void cs_etm__clear_packet_queue(struct cs_etm_packet_queue *queue)
240 queue->packet_count = 0;
241 for (i = 0; i < CS_ETM_PACKET_MAX_BUFFER; i++) {
242 queue->packet_buffer[i].isa = CS_ETM_ISA_UNKNOWN;
243 queue->packet_buffer[i].start_addr = CS_ETM_INVAL_ADDR;
244 queue->packet_buffer[i].end_addr = CS_ETM_INVAL_ADDR;
245 queue->packet_buffer[i].instr_count = 0;
246 queue->packet_buffer[i].last_instr_taken_branch = false;
247 queue->packet_buffer[i].last_instr_size = 0;
248 queue->packet_buffer[i].last_instr_type = 0;
249 queue->packet_buffer[i].last_instr_subtype = 0;
250 queue->packet_buffer[i].last_instr_cond = 0;
251 queue->packet_buffer[i].flags = 0;
252 queue->packet_buffer[i].exception_number = UINT32_MAX;
253 queue->packet_buffer[i].trace_chan_id = UINT8_MAX;
254 queue->packet_buffer[i].cpu = INT_MIN;
258 static void cs_etm__clear_all_packet_queues(struct cs_etm_queue *etmq)
261 struct int_node *inode;
262 struct cs_etm_traceid_queue *tidq;
263 struct intlist *traceid_queues_list = etmq->traceid_queues_list;
265 intlist__for_each_entry(inode, traceid_queues_list) {
266 idx = (int)(intptr_t)inode->priv;
267 tidq = etmq->traceid_queues[idx];
268 cs_etm__clear_packet_queue(&tidq->packet_queue);
272 static int cs_etm__init_traceid_queue(struct cs_etm_queue *etmq,
273 struct cs_etm_traceid_queue *tidq,
277 struct auxtrace_queue *queue;
278 struct cs_etm_auxtrace *etm = etmq->etm;
280 cs_etm__clear_packet_queue(&tidq->packet_queue);
282 queue = &etmq->etm->queues.queue_array[etmq->queue_nr];
283 tidq->tid = queue->tid;
285 tidq->trace_chan_id = trace_chan_id;
287 tidq->packet = zalloc(sizeof(struct cs_etm_packet));
291 tidq->prev_packet = zalloc(sizeof(struct cs_etm_packet));
292 if (!tidq->prev_packet)
295 if (etm->synth_opts.last_branch) {
296 size_t sz = sizeof(struct branch_stack);
298 sz += etm->synth_opts.last_branch_sz *
299 sizeof(struct branch_entry);
300 tidq->last_branch = zalloc(sz);
301 if (!tidq->last_branch)
303 tidq->last_branch_rb = zalloc(sz);
304 if (!tidq->last_branch_rb)
308 tidq->event_buf = malloc(PERF_SAMPLE_MAX_SIZE);
309 if (!tidq->event_buf)
315 zfree(&tidq->last_branch_rb);
316 zfree(&tidq->last_branch);
317 zfree(&tidq->prev_packet);
318 zfree(&tidq->packet);
323 static struct cs_etm_traceid_queue
324 *cs_etm__etmq_get_traceid_queue(struct cs_etm_queue *etmq, u8 trace_chan_id)
327 struct int_node *inode;
328 struct intlist *traceid_queues_list;
329 struct cs_etm_traceid_queue *tidq, **traceid_queues;
330 struct cs_etm_auxtrace *etm = etmq->etm;
332 if (etm->timeless_decoding)
333 trace_chan_id = CS_ETM_PER_THREAD_TRACEID;
335 traceid_queues_list = etmq->traceid_queues_list;
338 * Check if the traceid_queue exist for this traceID by looking
341 inode = intlist__find(traceid_queues_list, trace_chan_id);
343 idx = (int)(intptr_t)inode->priv;
344 return etmq->traceid_queues[idx];
347 /* We couldn't find a traceid_queue for this traceID, allocate one */
348 tidq = malloc(sizeof(*tidq));
352 memset(tidq, 0, sizeof(*tidq));
354 /* Get a valid index for the new traceid_queue */
355 idx = intlist__nr_entries(traceid_queues_list);
356 /* Memory for the inode is free'ed in cs_etm_free_traceid_queues () */
357 inode = intlist__findnew(traceid_queues_list, trace_chan_id);
361 /* Associate this traceID with this index */
362 inode->priv = (void *)(intptr_t)idx;
364 if (cs_etm__init_traceid_queue(etmq, tidq, trace_chan_id))
367 /* Grow the traceid_queues array by one unit */
368 traceid_queues = etmq->traceid_queues;
369 traceid_queues = reallocarray(traceid_queues,
371 sizeof(*traceid_queues));
374 * On failure reallocarray() returns NULL and the original block of
375 * memory is left untouched.
380 traceid_queues[idx] = tidq;
381 etmq->traceid_queues = traceid_queues;
383 return etmq->traceid_queues[idx];
387 * Function intlist__remove() removes the inode from the list
388 * and delete the memory associated to it.
390 intlist__remove(traceid_queues_list, inode);
396 struct cs_etm_packet_queue
397 *cs_etm__etmq_get_packet_queue(struct cs_etm_queue *etmq, u8 trace_chan_id)
399 struct cs_etm_traceid_queue *tidq;
401 tidq = cs_etm__etmq_get_traceid_queue(etmq, trace_chan_id);
403 return &tidq->packet_queue;
408 static void cs_etm__packet_swap(struct cs_etm_auxtrace *etm,
409 struct cs_etm_traceid_queue *tidq)
411 struct cs_etm_packet *tmp;
413 if (etm->sample_branches || etm->synth_opts.last_branch ||
414 etm->sample_instructions) {
416 * Swap PACKET with PREV_PACKET: PACKET becomes PREV_PACKET for
417 * the next incoming packet.
420 tidq->packet = tidq->prev_packet;
421 tidq->prev_packet = tmp;
425 static void cs_etm__packet_dump(const char *pkt_string)
427 const char *color = PERF_COLOR_BLUE;
428 int len = strlen(pkt_string);
430 if (len && (pkt_string[len-1] == '\n'))
431 color_fprintf(stdout, color, " %s", pkt_string);
433 color_fprintf(stdout, color, " %s\n", pkt_string);
438 static void cs_etm__set_trace_param_etmv3(struct cs_etm_trace_params *t_params,
439 struct cs_etm_auxtrace *etm, int idx,
442 u64 **metadata = etm->metadata;
444 t_params[idx].protocol = cs_etm__get_v7_protocol_version(etmidr);
445 t_params[idx].etmv3.reg_ctrl = metadata[idx][CS_ETM_ETMCR];
446 t_params[idx].etmv3.reg_trc_id = metadata[idx][CS_ETM_ETMTRACEIDR];
449 static void cs_etm__set_trace_param_etmv4(struct cs_etm_trace_params *t_params,
450 struct cs_etm_auxtrace *etm, int idx)
452 u64 **metadata = etm->metadata;
454 t_params[idx].protocol = CS_ETM_PROTO_ETMV4i;
455 t_params[idx].etmv4.reg_idr0 = metadata[idx][CS_ETMV4_TRCIDR0];
456 t_params[idx].etmv4.reg_idr1 = metadata[idx][CS_ETMV4_TRCIDR1];
457 t_params[idx].etmv4.reg_idr2 = metadata[idx][CS_ETMV4_TRCIDR2];
458 t_params[idx].etmv4.reg_idr8 = metadata[idx][CS_ETMV4_TRCIDR8];
459 t_params[idx].etmv4.reg_configr = metadata[idx][CS_ETMV4_TRCCONFIGR];
460 t_params[idx].etmv4.reg_traceidr = metadata[idx][CS_ETMV4_TRCTRACEIDR];
463 static int cs_etm__init_trace_params(struct cs_etm_trace_params *t_params,
464 struct cs_etm_auxtrace *etm,
471 for (i = 0; i < decoders; i++) {
472 architecture = etm->metadata[i][CS_ETM_MAGIC];
474 switch (architecture) {
475 case __perf_cs_etmv3_magic:
476 etmidr = etm->metadata[i][CS_ETM_ETMIDR];
477 cs_etm__set_trace_param_etmv3(t_params, etm, i, etmidr);
479 case __perf_cs_etmv4_magic:
480 cs_etm__set_trace_param_etmv4(t_params, etm, i);
490 static int cs_etm__init_decoder_params(struct cs_etm_decoder_params *d_params,
491 struct cs_etm_queue *etmq,
492 enum cs_etm_decoder_operation mode,
497 if (!(mode < CS_ETM_OPERATION_MAX))
500 d_params->packet_printer = cs_etm__packet_dump;
501 d_params->operation = mode;
502 d_params->data = etmq;
503 d_params->formatted = formatted;
504 d_params->fsyncs = false;
505 d_params->hsyncs = false;
506 d_params->frame_aligned = true;
513 static void cs_etm__dump_event(struct cs_etm_queue *etmq,
514 struct auxtrace_buffer *buffer)
517 const char *color = PERF_COLOR_BLUE;
518 size_t buffer_used = 0;
520 fprintf(stdout, "\n");
521 color_fprintf(stdout, color,
522 ". ... CoreSight ETM Trace data: size %zu bytes\n",
528 ret = cs_etm_decoder__process_data_block(
529 etmq->decoder, buffer->offset,
530 &((u8 *)buffer->data)[buffer_used],
531 buffer->size - buffer_used, &consumed);
535 buffer_used += consumed;
536 } while (buffer_used < buffer->size);
538 cs_etm_decoder__reset(etmq->decoder);
541 static int cs_etm__flush_events(struct perf_session *session,
542 struct perf_tool *tool)
544 struct cs_etm_auxtrace *etm = container_of(session->auxtrace,
545 struct cs_etm_auxtrace,
550 if (!tool->ordered_events)
553 if (etm->timeless_decoding)
554 return cs_etm__process_timeless_queues(etm, -1);
556 return cs_etm__process_queues(etm);
559 static void cs_etm__free_traceid_queues(struct cs_etm_queue *etmq)
563 struct int_node *inode, *tmp;
564 struct cs_etm_traceid_queue *tidq;
565 struct intlist *traceid_queues_list = etmq->traceid_queues_list;
567 intlist__for_each_entry_safe(inode, tmp, traceid_queues_list) {
568 priv = (uintptr_t)inode->priv;
571 /* Free this traceid_queue from the array */
572 tidq = etmq->traceid_queues[idx];
573 thread__zput(tidq->thread);
574 zfree(&tidq->event_buf);
575 zfree(&tidq->last_branch);
576 zfree(&tidq->last_branch_rb);
577 zfree(&tidq->prev_packet);
578 zfree(&tidq->packet);
582 * Function intlist__remove() removes the inode from the list
583 * and delete the memory associated to it.
585 intlist__remove(traceid_queues_list, inode);
588 /* Then the RB tree itself */
589 intlist__delete(traceid_queues_list);
590 etmq->traceid_queues_list = NULL;
592 /* finally free the traceid_queues array */
593 zfree(&etmq->traceid_queues);
596 static void cs_etm__free_queue(void *priv)
598 struct cs_etm_queue *etmq = priv;
603 cs_etm_decoder__free(etmq->decoder);
604 cs_etm__free_traceid_queues(etmq);
608 static void cs_etm__free_events(struct perf_session *session)
611 struct cs_etm_auxtrace *aux = container_of(session->auxtrace,
612 struct cs_etm_auxtrace,
614 struct auxtrace_queues *queues = &aux->queues;
616 for (i = 0; i < queues->nr_queues; i++) {
617 cs_etm__free_queue(queues->queue_array[i].priv);
618 queues->queue_array[i].priv = NULL;
621 auxtrace_queues__free(queues);
624 static void cs_etm__free(struct perf_session *session)
627 struct int_node *inode, *tmp;
628 struct cs_etm_auxtrace *aux = container_of(session->auxtrace,
629 struct cs_etm_auxtrace,
631 cs_etm__free_events(session);
632 session->auxtrace = NULL;
634 /* First remove all traceID/metadata nodes for the RB tree */
635 intlist__for_each_entry_safe(inode, tmp, traceid_list)
636 intlist__remove(traceid_list, inode);
637 /* Then the RB tree itself */
638 intlist__delete(traceid_list);
640 for (i = 0; i < aux->num_cpu; i++)
641 zfree(&aux->metadata[i]);
643 thread__zput(aux->unknown_thread);
644 zfree(&aux->metadata);
648 static bool cs_etm__evsel_is_auxtrace(struct perf_session *session,
651 struct cs_etm_auxtrace *aux = container_of(session->auxtrace,
652 struct cs_etm_auxtrace,
655 return evsel->core.attr.type == aux->pmu_type;
658 static u8 cs_etm__cpu_mode(struct cs_etm_queue *etmq, u64 address)
660 struct machine *machine;
662 machine = etmq->etm->machine;
664 if (address >= machine__kernel_start(machine)) {
665 if (machine__is_host(machine))
666 return PERF_RECORD_MISC_KERNEL;
668 return PERF_RECORD_MISC_GUEST_KERNEL;
670 if (machine__is_host(machine))
671 return PERF_RECORD_MISC_USER;
673 return PERF_RECORD_MISC_GUEST_USER;
675 return PERF_RECORD_MISC_HYPERVISOR;
679 static u32 cs_etm__mem_access(struct cs_etm_queue *etmq, u8 trace_chan_id,
680 u64 address, size_t size, u8 *buffer)
685 struct thread *thread;
686 struct machine *machine;
687 struct addr_location al;
688 struct cs_etm_traceid_queue *tidq;
693 machine = etmq->etm->machine;
694 cpumode = cs_etm__cpu_mode(etmq, address);
695 tidq = cs_etm__etmq_get_traceid_queue(etmq, trace_chan_id);
699 thread = tidq->thread;
701 if (cpumode != PERF_RECORD_MISC_KERNEL)
703 thread = etmq->etm->unknown_thread;
706 if (!thread__find_map(thread, cpumode, address, &al) || !al.map->dso)
709 if (al.map->dso->data.status == DSO_DATA_STATUS_ERROR &&
710 dso__data_status_seen(al.map->dso, DSO_DATA_STATUS_SEEN_ITRACE))
713 offset = al.map->map_ip(al.map, address);
717 len = dso__data_read_offset(al.map->dso, machine, offset, buffer, size);
720 ui__warning_once("CS ETM Trace: Missing DSO. Use 'perf archive' or debuginfod to export data from the traced system.\n"
721 " Enable CONFIG_PROC_KCORE or use option '-k /path/to/vmlinux' for kernel symbols.\n");
722 if (!al.map->dso->auxtrace_warned) {
723 pr_err("CS ETM Trace: Debug data not found for address %#"PRIx64" in %s\n",
725 al.map->dso->long_name ? al.map->dso->long_name : "Unknown");
726 al.map->dso->auxtrace_warned = true;
734 static struct cs_etm_queue *cs_etm__alloc_queue(struct cs_etm_auxtrace *etm,
737 struct cs_etm_decoder_params d_params;
738 struct cs_etm_trace_params *t_params = NULL;
739 struct cs_etm_queue *etmq;
741 * Each queue can only contain data from one CPU when unformatted, so only one decoder is
744 int decoders = formatted ? etm->num_cpu : 1;
746 etmq = zalloc(sizeof(*etmq));
750 etmq->traceid_queues_list = intlist__new(NULL);
751 if (!etmq->traceid_queues_list)
754 /* Use metadata to fill in trace parameters for trace decoder */
755 t_params = zalloc(sizeof(*t_params) * decoders);
760 if (cs_etm__init_trace_params(t_params, etm, decoders))
763 /* Set decoder parameters to decode trace packets */
764 if (cs_etm__init_decoder_params(&d_params, etmq,
765 dump_trace ? CS_ETM_OPERATION_PRINT :
766 CS_ETM_OPERATION_DECODE,
770 etmq->decoder = cs_etm_decoder__new(decoders, &d_params,
777 * Register a function to handle all memory accesses required by
778 * the trace decoder library.
780 if (cs_etm_decoder__add_mem_access_cb(etmq->decoder,
783 goto out_free_decoder;
789 cs_etm_decoder__free(etmq->decoder);
791 intlist__delete(etmq->traceid_queues_list);
797 static int cs_etm__setup_queue(struct cs_etm_auxtrace *etm,
798 struct auxtrace_queue *queue,
799 unsigned int queue_nr,
802 struct cs_etm_queue *etmq = queue->priv;
804 if (list_empty(&queue->head) || etmq)
807 etmq = cs_etm__alloc_queue(etm, formatted);
814 etmq->queue_nr = queue_nr;
820 static int cs_etm__queue_first_cs_timestamp(struct cs_etm_auxtrace *etm,
821 struct cs_etm_queue *etmq,
822 unsigned int queue_nr)
825 unsigned int cs_queue_nr;
830 * We are under a CPU-wide trace scenario. As such we need to know
831 * when the code that generated the traces started to execute so that
832 * it can be correlated with execution on other CPUs. So we get a
833 * handle on the beginning of traces and decode until we find a
834 * timestamp. The timestamp is then added to the auxtrace min heap
835 * in order to know what nibble (of all the etmqs) to decode first.
839 * Fetch an aux_buffer from this etmq. Bail if no more
840 * blocks or an error has been encountered.
842 ret = cs_etm__get_data_block(etmq);
847 * Run decoder on the trace block. The decoder will stop when
848 * encountering a CS timestamp, a full packet queue or the end of
849 * trace for that block.
851 ret = cs_etm__decode_data_block(etmq);
856 * Function cs_etm_decoder__do_{hard|soft}_timestamp() does all
857 * the timestamp calculation for us.
859 cs_timestamp = cs_etm__etmq_get_timestamp(etmq, &trace_chan_id);
861 /* We found a timestamp, no need to continue. */
866 * We didn't find a timestamp so empty all the traceid packet
867 * queues before looking for another timestamp packet, either
868 * in the current data block or a new one. Packets that were
869 * just decoded are useless since no timestamp has been
870 * associated with them. As such simply discard them.
872 cs_etm__clear_all_packet_queues(etmq);
876 * We have a timestamp. Add it to the min heap to reflect when
877 * instructions conveyed by the range packets of this traceID queue
878 * started to execute. Once the same has been done for all the traceID
879 * queues of each etmq, redenring and decoding can start in
880 * chronological order.
882 * Note that packets decoded above are still in the traceID's packet
883 * queue and will be processed in cs_etm__process_queues().
885 cs_queue_nr = TO_CS_QUEUE_NR(queue_nr, trace_chan_id);
886 ret = auxtrace_heap__add(&etm->heap, cs_queue_nr, cs_timestamp);
892 void cs_etm__copy_last_branch_rb(struct cs_etm_queue *etmq,
893 struct cs_etm_traceid_queue *tidq)
895 struct branch_stack *bs_src = tidq->last_branch_rb;
896 struct branch_stack *bs_dst = tidq->last_branch;
900 * Set the number of records before early exit: ->nr is used to
901 * determine how many branches to copy from ->entries.
903 bs_dst->nr = bs_src->nr;
906 * Early exit when there is nothing to copy.
912 * As bs_src->entries is a circular buffer, we need to copy from it in
913 * two steps. First, copy the branches from the most recently inserted
914 * branch ->last_branch_pos until the end of bs_src->entries buffer.
916 nr = etmq->etm->synth_opts.last_branch_sz - tidq->last_branch_pos;
917 memcpy(&bs_dst->entries[0],
918 &bs_src->entries[tidq->last_branch_pos],
919 sizeof(struct branch_entry) * nr);
922 * If we wrapped around at least once, the branches from the beginning
923 * of the bs_src->entries buffer and until the ->last_branch_pos element
924 * are older valid branches: copy them over. The total number of
925 * branches copied over will be equal to the number of branches asked by
926 * the user in last_branch_sz.
928 if (bs_src->nr >= etmq->etm->synth_opts.last_branch_sz) {
929 memcpy(&bs_dst->entries[nr],
931 sizeof(struct branch_entry) * tidq->last_branch_pos);
936 void cs_etm__reset_last_branch_rb(struct cs_etm_traceid_queue *tidq)
938 tidq->last_branch_pos = 0;
939 tidq->last_branch_rb->nr = 0;
942 static inline int cs_etm__t32_instr_size(struct cs_etm_queue *etmq,
943 u8 trace_chan_id, u64 addr)
947 cs_etm__mem_access(etmq, trace_chan_id, addr,
948 ARRAY_SIZE(instrBytes), instrBytes);
950 * T32 instruction size is indicated by bits[15:11] of the first
951 * 16-bit word of the instruction: 0b11101, 0b11110 and 0b11111
952 * denote a 32-bit instruction.
954 return ((instrBytes[1] & 0xF8) >= 0xE8) ? 4 : 2;
957 static inline u64 cs_etm__first_executed_instr(struct cs_etm_packet *packet)
959 /* Returns 0 for the CS_ETM_DISCONTINUITY packet */
960 if (packet->sample_type == CS_ETM_DISCONTINUITY)
963 return packet->start_addr;
967 u64 cs_etm__last_executed_instr(const struct cs_etm_packet *packet)
969 /* Returns 0 for the CS_ETM_DISCONTINUITY packet */
970 if (packet->sample_type == CS_ETM_DISCONTINUITY)
973 return packet->end_addr - packet->last_instr_size;
976 static inline u64 cs_etm__instr_addr(struct cs_etm_queue *etmq,
978 const struct cs_etm_packet *packet,
981 if (packet->isa == CS_ETM_ISA_T32) {
982 u64 addr = packet->start_addr;
985 addr += cs_etm__t32_instr_size(etmq,
986 trace_chan_id, addr);
992 /* Assume a 4 byte instruction size (A32/A64) */
993 return packet->start_addr + offset * 4;
996 static void cs_etm__update_last_branch_rb(struct cs_etm_queue *etmq,
997 struct cs_etm_traceid_queue *tidq)
999 struct branch_stack *bs = tidq->last_branch_rb;
1000 struct branch_entry *be;
1003 * The branches are recorded in a circular buffer in reverse
1004 * chronological order: we start recording from the last element of the
1005 * buffer down. After writing the first element of the stack, move the
1006 * insert position back to the end of the buffer.
1008 if (!tidq->last_branch_pos)
1009 tidq->last_branch_pos = etmq->etm->synth_opts.last_branch_sz;
1011 tidq->last_branch_pos -= 1;
1013 be = &bs->entries[tidq->last_branch_pos];
1014 be->from = cs_etm__last_executed_instr(tidq->prev_packet);
1015 be->to = cs_etm__first_executed_instr(tidq->packet);
1016 /* No support for mispredict */
1017 be->flags.mispred = 0;
1018 be->flags.predicted = 1;
1021 * Increment bs->nr until reaching the number of last branches asked by
1022 * the user on the command line.
1024 if (bs->nr < etmq->etm->synth_opts.last_branch_sz)
1028 static int cs_etm__inject_event(union perf_event *event,
1029 struct perf_sample *sample, u64 type)
1031 event->header.size = perf_event__sample_event_size(sample, type, 0);
1032 return perf_event__synthesize_sample(event, type, 0, sample);
1037 cs_etm__get_trace(struct cs_etm_queue *etmq)
1039 struct auxtrace_buffer *aux_buffer = etmq->buffer;
1040 struct auxtrace_buffer *old_buffer = aux_buffer;
1041 struct auxtrace_queue *queue;
1043 queue = &etmq->etm->queues.queue_array[etmq->queue_nr];
1045 aux_buffer = auxtrace_buffer__next(queue, aux_buffer);
1047 /* If no more data, drop the previous auxtrace_buffer and return */
1050 auxtrace_buffer__drop_data(old_buffer);
1055 etmq->buffer = aux_buffer;
1057 /* If the aux_buffer doesn't have data associated, try to load it */
1058 if (!aux_buffer->data) {
1059 /* get the file desc associated with the perf data file */
1060 int fd = perf_data__fd(etmq->etm->session->data);
1062 aux_buffer->data = auxtrace_buffer__get_data(aux_buffer, fd);
1063 if (!aux_buffer->data)
1067 /* If valid, drop the previous buffer */
1069 auxtrace_buffer__drop_data(old_buffer);
1072 etmq->buf_len = aux_buffer->size;
1073 etmq->buf = aux_buffer->data;
1075 return etmq->buf_len;
1078 static void cs_etm__set_pid_tid_cpu(struct cs_etm_auxtrace *etm,
1079 struct cs_etm_traceid_queue *tidq)
1081 if ((!tidq->thread) && (tidq->tid != -1))
1082 tidq->thread = machine__find_thread(etm->machine, -1,
1086 tidq->pid = tidq->thread->pid_;
1089 int cs_etm__etmq_set_tid(struct cs_etm_queue *etmq,
1090 pid_t tid, u8 trace_chan_id)
1092 int cpu, err = -EINVAL;
1093 struct cs_etm_auxtrace *etm = etmq->etm;
1094 struct cs_etm_traceid_queue *tidq;
1096 tidq = cs_etm__etmq_get_traceid_queue(etmq, trace_chan_id);
1100 if (cs_etm__get_cpu(trace_chan_id, &cpu) < 0)
1103 err = machine__set_current_tid(etm->machine, cpu, tid, tid);
1108 thread__zput(tidq->thread);
1110 cs_etm__set_pid_tid_cpu(etm, tidq);
1114 bool cs_etm__etmq_is_timeless(struct cs_etm_queue *etmq)
1116 return !!etmq->etm->timeless_decoding;
1119 static void cs_etm__copy_insn(struct cs_etm_queue *etmq,
1121 const struct cs_etm_packet *packet,
1122 struct perf_sample *sample)
1125 * It's pointless to read instructions for the CS_ETM_DISCONTINUITY
1126 * packet, so directly bail out with 'insn_len' = 0.
1128 if (packet->sample_type == CS_ETM_DISCONTINUITY) {
1129 sample->insn_len = 0;
1134 * T32 instruction size might be 32-bit or 16-bit, decide by calling
1135 * cs_etm__t32_instr_size().
1137 if (packet->isa == CS_ETM_ISA_T32)
1138 sample->insn_len = cs_etm__t32_instr_size(etmq, trace_chan_id,
1140 /* Otherwise, A64 and A32 instruction size are always 32-bit. */
1142 sample->insn_len = 4;
1144 cs_etm__mem_access(etmq, trace_chan_id, sample->ip,
1145 sample->insn_len, (void *)sample->insn);
1148 static int cs_etm__synth_instruction_sample(struct cs_etm_queue *etmq,
1149 struct cs_etm_traceid_queue *tidq,
1150 u64 addr, u64 period)
1153 struct cs_etm_auxtrace *etm = etmq->etm;
1154 union perf_event *event = tidq->event_buf;
1155 struct perf_sample sample = {.ip = 0,};
1157 event->sample.header.type = PERF_RECORD_SAMPLE;
1158 event->sample.header.misc = cs_etm__cpu_mode(etmq, addr);
1159 event->sample.header.size = sizeof(struct perf_event_header);
1161 if (!etm->timeless_decoding)
1162 sample.time = etm->latest_kernel_timestamp;
1164 sample.pid = tidq->pid;
1165 sample.tid = tidq->tid;
1166 sample.id = etmq->etm->instructions_id;
1167 sample.stream_id = etmq->etm->instructions_id;
1168 sample.period = period;
1169 sample.cpu = tidq->packet->cpu;
1170 sample.flags = tidq->prev_packet->flags;
1171 sample.cpumode = event->sample.header.misc;
1173 cs_etm__copy_insn(etmq, tidq->trace_chan_id, tidq->packet, &sample);
1175 if (etm->synth_opts.last_branch)
1176 sample.branch_stack = tidq->last_branch;
1178 if (etm->synth_opts.inject) {
1179 ret = cs_etm__inject_event(event, &sample,
1180 etm->instructions_sample_type);
1185 ret = perf_session__deliver_synth_event(etm->session, event, &sample);
1189 "CS ETM Trace: failed to deliver instruction event, error %d\n",
1196 * The cs etm packet encodes an instruction range between a branch target
1197 * and the next taken branch. Generate sample accordingly.
1199 static int cs_etm__synth_branch_sample(struct cs_etm_queue *etmq,
1200 struct cs_etm_traceid_queue *tidq)
1203 struct cs_etm_auxtrace *etm = etmq->etm;
1204 struct perf_sample sample = {.ip = 0,};
1205 union perf_event *event = tidq->event_buf;
1206 struct dummy_branch_stack {
1209 struct branch_entry entries;
1213 ip = cs_etm__last_executed_instr(tidq->prev_packet);
1215 event->sample.header.type = PERF_RECORD_SAMPLE;
1216 event->sample.header.misc = cs_etm__cpu_mode(etmq, ip);
1217 event->sample.header.size = sizeof(struct perf_event_header);
1219 if (!etm->timeless_decoding)
1220 sample.time = etm->latest_kernel_timestamp;
1222 sample.pid = tidq->pid;
1223 sample.tid = tidq->tid;
1224 sample.addr = cs_etm__first_executed_instr(tidq->packet);
1225 sample.id = etmq->etm->branches_id;
1226 sample.stream_id = etmq->etm->branches_id;
1228 sample.cpu = tidq->packet->cpu;
1229 sample.flags = tidq->prev_packet->flags;
1230 sample.cpumode = event->sample.header.misc;
1232 cs_etm__copy_insn(etmq, tidq->trace_chan_id, tidq->prev_packet,
1236 * perf report cannot handle events without a branch stack
1238 if (etm->synth_opts.last_branch) {
1239 dummy_bs = (struct dummy_branch_stack){
1247 sample.branch_stack = (struct branch_stack *)&dummy_bs;
1250 if (etm->synth_opts.inject) {
1251 ret = cs_etm__inject_event(event, &sample,
1252 etm->branches_sample_type);
1257 ret = perf_session__deliver_synth_event(etm->session, event, &sample);
1261 "CS ETM Trace: failed to deliver instruction event, error %d\n",
1267 struct cs_etm_synth {
1268 struct perf_tool dummy_tool;
1269 struct perf_session *session;
1272 static int cs_etm__event_synth(struct perf_tool *tool,
1273 union perf_event *event,
1274 struct perf_sample *sample __maybe_unused,
1275 struct machine *machine __maybe_unused)
1277 struct cs_etm_synth *cs_etm_synth =
1278 container_of(tool, struct cs_etm_synth, dummy_tool);
1280 return perf_session__deliver_synth_event(cs_etm_synth->session,
1284 static int cs_etm__synth_event(struct perf_session *session,
1285 struct perf_event_attr *attr, u64 id)
1287 struct cs_etm_synth cs_etm_synth;
1289 memset(&cs_etm_synth, 0, sizeof(struct cs_etm_synth));
1290 cs_etm_synth.session = session;
1292 return perf_event__synthesize_attr(&cs_etm_synth.dummy_tool, attr, 1,
1293 &id, cs_etm__event_synth);
1296 static int cs_etm__synth_events(struct cs_etm_auxtrace *etm,
1297 struct perf_session *session)
1299 struct evlist *evlist = session->evlist;
1300 struct evsel *evsel;
1301 struct perf_event_attr attr;
1306 evlist__for_each_entry(evlist, evsel) {
1307 if (evsel->core.attr.type == etm->pmu_type) {
1314 pr_debug("No selected events with CoreSight Trace data\n");
1318 memset(&attr, 0, sizeof(struct perf_event_attr));
1319 attr.size = sizeof(struct perf_event_attr);
1320 attr.type = PERF_TYPE_HARDWARE;
1321 attr.sample_type = evsel->core.attr.sample_type & PERF_SAMPLE_MASK;
1322 attr.sample_type |= PERF_SAMPLE_IP | PERF_SAMPLE_TID |
1324 if (etm->timeless_decoding)
1325 attr.sample_type &= ~(u64)PERF_SAMPLE_TIME;
1327 attr.sample_type |= PERF_SAMPLE_TIME;
1329 attr.exclude_user = evsel->core.attr.exclude_user;
1330 attr.exclude_kernel = evsel->core.attr.exclude_kernel;
1331 attr.exclude_hv = evsel->core.attr.exclude_hv;
1332 attr.exclude_host = evsel->core.attr.exclude_host;
1333 attr.exclude_guest = evsel->core.attr.exclude_guest;
1334 attr.sample_id_all = evsel->core.attr.sample_id_all;
1335 attr.read_format = evsel->core.attr.read_format;
1337 /* create new id val to be a fixed offset from evsel id */
1338 id = evsel->core.id[0] + 1000000000;
1343 if (etm->synth_opts.branches) {
1344 attr.config = PERF_COUNT_HW_BRANCH_INSTRUCTIONS;
1345 attr.sample_period = 1;
1346 attr.sample_type |= PERF_SAMPLE_ADDR;
1347 err = cs_etm__synth_event(session, &attr, id);
1350 etm->sample_branches = true;
1351 etm->branches_sample_type = attr.sample_type;
1352 etm->branches_id = id;
1354 attr.sample_type &= ~(u64)PERF_SAMPLE_ADDR;
1357 if (etm->synth_opts.last_branch) {
1358 attr.sample_type |= PERF_SAMPLE_BRANCH_STACK;
1360 * We don't use the hardware index, but the sample generation
1361 * code uses the new format branch_stack with this field,
1362 * so the event attributes must indicate that it's present.
1364 attr.branch_sample_type |= PERF_SAMPLE_BRANCH_HW_INDEX;
1367 if (etm->synth_opts.instructions) {
1368 attr.config = PERF_COUNT_HW_INSTRUCTIONS;
1369 attr.sample_period = etm->synth_opts.period;
1370 etm->instructions_sample_period = attr.sample_period;
1371 err = cs_etm__synth_event(session, &attr, id);
1374 etm->sample_instructions = true;
1375 etm->instructions_sample_type = attr.sample_type;
1376 etm->instructions_id = id;
1383 static int cs_etm__sample(struct cs_etm_queue *etmq,
1384 struct cs_etm_traceid_queue *tidq)
1386 struct cs_etm_auxtrace *etm = etmq->etm;
1388 u8 trace_chan_id = tidq->trace_chan_id;
1391 /* Get instructions remainder from previous packet */
1392 instrs_prev = tidq->period_instructions;
1394 tidq->period_instructions += tidq->packet->instr_count;
1397 * Record a branch when the last instruction in
1398 * PREV_PACKET is a branch.
1400 if (etm->synth_opts.last_branch &&
1401 tidq->prev_packet->sample_type == CS_ETM_RANGE &&
1402 tidq->prev_packet->last_instr_taken_branch)
1403 cs_etm__update_last_branch_rb(etmq, tidq);
1405 if (etm->sample_instructions &&
1406 tidq->period_instructions >= etm->instructions_sample_period) {
1408 * Emit instruction sample periodically
1409 * TODO: allow period to be defined in cycles and clock time
1413 * Below diagram demonstrates the instruction samples
1416 * Instrs Instrs Instrs Instrs
1417 * Sample(n) Sample(n+1) Sample(n+2) Sample(n+3)
1420 * --------------------------------------------------
1424 * instructions(Pi) instructions(Pi')
1427 * \---------------- -----------------/
1429 * tidq->packet->instr_count
1431 * Instrs Sample(n...) are the synthesised samples occurring
1432 * every etm->instructions_sample_period instructions - as
1433 * defined on the perf command line. Sample(n) is being the
1434 * last sample before the current etm packet, n+1 to n+3
1435 * samples are generated from the current etm packet.
1437 * tidq->packet->instr_count represents the number of
1438 * instructions in the current etm packet.
1440 * Period instructions (Pi) contains the the number of
1441 * instructions executed after the sample point(n) from the
1442 * previous etm packet. This will always be less than
1443 * etm->instructions_sample_period.
1445 * When generate new samples, it combines with two parts
1446 * instructions, one is the tail of the old packet and another
1447 * is the head of the new coming packet, to generate
1448 * sample(n+1); sample(n+2) and sample(n+3) consume the
1449 * instructions with sample period. After sample(n+3), the rest
1450 * instructions will be used by later packet and it is assigned
1451 * to tidq->period_instructions for next round calculation.
1455 * Get the initial offset into the current packet instructions;
1456 * entry conditions ensure that instrs_prev is less than
1457 * etm->instructions_sample_period.
1459 u64 offset = etm->instructions_sample_period - instrs_prev;
1462 /* Prepare last branches for instruction sample */
1463 if (etm->synth_opts.last_branch)
1464 cs_etm__copy_last_branch_rb(etmq, tidq);
1466 while (tidq->period_instructions >=
1467 etm->instructions_sample_period) {
1469 * Calculate the address of the sampled instruction (-1
1470 * as sample is reported as though instruction has just
1471 * been executed, but PC has not advanced to next
1474 addr = cs_etm__instr_addr(etmq, trace_chan_id,
1475 tidq->packet, offset - 1);
1476 ret = cs_etm__synth_instruction_sample(
1478 etm->instructions_sample_period);
1482 offset += etm->instructions_sample_period;
1483 tidq->period_instructions -=
1484 etm->instructions_sample_period;
1488 if (etm->sample_branches) {
1489 bool generate_sample = false;
1491 /* Generate sample for tracing on packet */
1492 if (tidq->prev_packet->sample_type == CS_ETM_DISCONTINUITY)
1493 generate_sample = true;
1495 /* Generate sample for branch taken packet */
1496 if (tidq->prev_packet->sample_type == CS_ETM_RANGE &&
1497 tidq->prev_packet->last_instr_taken_branch)
1498 generate_sample = true;
1500 if (generate_sample) {
1501 ret = cs_etm__synth_branch_sample(etmq, tidq);
1507 cs_etm__packet_swap(etm, tidq);
1512 static int cs_etm__exception(struct cs_etm_traceid_queue *tidq)
1515 * When the exception packet is inserted, whether the last instruction
1516 * in previous range packet is taken branch or not, we need to force
1517 * to set 'prev_packet->last_instr_taken_branch' to true. This ensures
1518 * to generate branch sample for the instruction range before the
1519 * exception is trapped to kernel or before the exception returning.
1521 * The exception packet includes the dummy address values, so don't
1522 * swap PACKET with PREV_PACKET. This keeps PREV_PACKET to be useful
1523 * for generating instruction and branch samples.
1525 if (tidq->prev_packet->sample_type == CS_ETM_RANGE)
1526 tidq->prev_packet->last_instr_taken_branch = true;
1531 static int cs_etm__flush(struct cs_etm_queue *etmq,
1532 struct cs_etm_traceid_queue *tidq)
1535 struct cs_etm_auxtrace *etm = etmq->etm;
1537 /* Handle start tracing packet */
1538 if (tidq->prev_packet->sample_type == CS_ETM_EMPTY)
1541 if (etmq->etm->synth_opts.last_branch &&
1542 tidq->prev_packet->sample_type == CS_ETM_RANGE) {
1545 /* Prepare last branches for instruction sample */
1546 cs_etm__copy_last_branch_rb(etmq, tidq);
1549 * Generate a last branch event for the branches left in the
1550 * circular buffer at the end of the trace.
1552 * Use the address of the end of the last reported execution
1555 addr = cs_etm__last_executed_instr(tidq->prev_packet);
1557 err = cs_etm__synth_instruction_sample(
1559 tidq->period_instructions);
1563 tidq->period_instructions = 0;
1567 if (etm->sample_branches &&
1568 tidq->prev_packet->sample_type == CS_ETM_RANGE) {
1569 err = cs_etm__synth_branch_sample(etmq, tidq);
1575 cs_etm__packet_swap(etm, tidq);
1577 /* Reset last branches after flush the trace */
1578 if (etm->synth_opts.last_branch)
1579 cs_etm__reset_last_branch_rb(tidq);
1584 static int cs_etm__end_block(struct cs_etm_queue *etmq,
1585 struct cs_etm_traceid_queue *tidq)
1590 * It has no new packet coming and 'etmq->packet' contains the stale
1591 * packet which was set at the previous time with packets swapping;
1592 * so skip to generate branch sample to avoid stale packet.
1594 * For this case only flush branch stack and generate a last branch
1595 * event for the branches left in the circular buffer at the end of
1598 if (etmq->etm->synth_opts.last_branch &&
1599 tidq->prev_packet->sample_type == CS_ETM_RANGE) {
1602 /* Prepare last branches for instruction sample */
1603 cs_etm__copy_last_branch_rb(etmq, tidq);
1606 * Use the address of the end of the last reported execution
1609 addr = cs_etm__last_executed_instr(tidq->prev_packet);
1611 err = cs_etm__synth_instruction_sample(
1613 tidq->period_instructions);
1617 tidq->period_instructions = 0;
1623 * cs_etm__get_data_block: Fetch a block from the auxtrace_buffer queue
1625 * Returns: < 0 if error
1626 * = 0 if no more auxtrace_buffer to read
1627 * > 0 if the current buffer isn't empty yet
1629 static int cs_etm__get_data_block(struct cs_etm_queue *etmq)
1633 if (!etmq->buf_len) {
1634 ret = cs_etm__get_trace(etmq);
1638 * We cannot assume consecutive blocks in the data file
1639 * are contiguous, reset the decoder to force re-sync.
1641 ret = cs_etm_decoder__reset(etmq->decoder);
1646 return etmq->buf_len;
1649 static bool cs_etm__is_svc_instr(struct cs_etm_queue *etmq, u8 trace_chan_id,
1650 struct cs_etm_packet *packet,
1653 /* Initialise to keep compiler happy */
1658 switch (packet->isa) {
1659 case CS_ETM_ISA_T32:
1661 * The SVC of T32 is defined in ARM DDI 0487D.a, F5.1.247:
1664 * +-----------------+--------+
1665 * | 1 1 0 1 1 1 1 1 | imm8 |
1666 * +-----------------+--------+
1668 * According to the specification, it only defines SVC for T32
1669 * with 16 bits instruction and has no definition for 32bits;
1670 * so below only read 2 bytes as instruction size for T32.
1672 addr = end_addr - 2;
1673 cs_etm__mem_access(etmq, trace_chan_id, addr,
1674 sizeof(instr16), (u8 *)&instr16);
1675 if ((instr16 & 0xFF00) == 0xDF00)
1679 case CS_ETM_ISA_A32:
1681 * The SVC of A32 is defined in ARM DDI 0487D.a, F5.1.247:
1683 * b'31 b'28 b'27 b'24
1684 * +---------+---------+-------------------------+
1685 * | !1111 | 1 1 1 1 | imm24 |
1686 * +---------+---------+-------------------------+
1688 addr = end_addr - 4;
1689 cs_etm__mem_access(etmq, trace_chan_id, addr,
1690 sizeof(instr32), (u8 *)&instr32);
1691 if ((instr32 & 0x0F000000) == 0x0F000000 &&
1692 (instr32 & 0xF0000000) != 0xF0000000)
1696 case CS_ETM_ISA_A64:
1698 * The SVC of A64 is defined in ARM DDI 0487D.a, C6.2.294:
1701 * +-----------------------+---------+-----------+
1702 * | 1 1 0 1 0 1 0 0 0 0 0 | imm16 | 0 0 0 0 1 |
1703 * +-----------------------+---------+-----------+
1705 addr = end_addr - 4;
1706 cs_etm__mem_access(etmq, trace_chan_id, addr,
1707 sizeof(instr32), (u8 *)&instr32);
1708 if ((instr32 & 0xFFE0001F) == 0xd4000001)
1712 case CS_ETM_ISA_UNKNOWN:
1720 static bool cs_etm__is_syscall(struct cs_etm_queue *etmq,
1721 struct cs_etm_traceid_queue *tidq, u64 magic)
1723 u8 trace_chan_id = tidq->trace_chan_id;
1724 struct cs_etm_packet *packet = tidq->packet;
1725 struct cs_etm_packet *prev_packet = tidq->prev_packet;
1727 if (magic == __perf_cs_etmv3_magic)
1728 if (packet->exception_number == CS_ETMV3_EXC_SVC)
1732 * ETMv4 exception type CS_ETMV4_EXC_CALL covers SVC, SMC and
1733 * HVC cases; need to check if it's SVC instruction based on
1736 if (magic == __perf_cs_etmv4_magic) {
1737 if (packet->exception_number == CS_ETMV4_EXC_CALL &&
1738 cs_etm__is_svc_instr(etmq, trace_chan_id, prev_packet,
1739 prev_packet->end_addr))
1746 static bool cs_etm__is_async_exception(struct cs_etm_traceid_queue *tidq,
1749 struct cs_etm_packet *packet = tidq->packet;
1751 if (magic == __perf_cs_etmv3_magic)
1752 if (packet->exception_number == CS_ETMV3_EXC_DEBUG_HALT ||
1753 packet->exception_number == CS_ETMV3_EXC_ASYNC_DATA_ABORT ||
1754 packet->exception_number == CS_ETMV3_EXC_PE_RESET ||
1755 packet->exception_number == CS_ETMV3_EXC_IRQ ||
1756 packet->exception_number == CS_ETMV3_EXC_FIQ)
1759 if (magic == __perf_cs_etmv4_magic)
1760 if (packet->exception_number == CS_ETMV4_EXC_RESET ||
1761 packet->exception_number == CS_ETMV4_EXC_DEBUG_HALT ||
1762 packet->exception_number == CS_ETMV4_EXC_SYSTEM_ERROR ||
1763 packet->exception_number == CS_ETMV4_EXC_INST_DEBUG ||
1764 packet->exception_number == CS_ETMV4_EXC_DATA_DEBUG ||
1765 packet->exception_number == CS_ETMV4_EXC_IRQ ||
1766 packet->exception_number == CS_ETMV4_EXC_FIQ)
1772 static bool cs_etm__is_sync_exception(struct cs_etm_queue *etmq,
1773 struct cs_etm_traceid_queue *tidq,
1776 u8 trace_chan_id = tidq->trace_chan_id;
1777 struct cs_etm_packet *packet = tidq->packet;
1778 struct cs_etm_packet *prev_packet = tidq->prev_packet;
1780 if (magic == __perf_cs_etmv3_magic)
1781 if (packet->exception_number == CS_ETMV3_EXC_SMC ||
1782 packet->exception_number == CS_ETMV3_EXC_HYP ||
1783 packet->exception_number == CS_ETMV3_EXC_JAZELLE_THUMBEE ||
1784 packet->exception_number == CS_ETMV3_EXC_UNDEFINED_INSTR ||
1785 packet->exception_number == CS_ETMV3_EXC_PREFETCH_ABORT ||
1786 packet->exception_number == CS_ETMV3_EXC_DATA_FAULT ||
1787 packet->exception_number == CS_ETMV3_EXC_GENERIC)
1790 if (magic == __perf_cs_etmv4_magic) {
1791 if (packet->exception_number == CS_ETMV4_EXC_TRAP ||
1792 packet->exception_number == CS_ETMV4_EXC_ALIGNMENT ||
1793 packet->exception_number == CS_ETMV4_EXC_INST_FAULT ||
1794 packet->exception_number == CS_ETMV4_EXC_DATA_FAULT)
1798 * For CS_ETMV4_EXC_CALL, except SVC other instructions
1799 * (SMC, HVC) are taken as sync exceptions.
1801 if (packet->exception_number == CS_ETMV4_EXC_CALL &&
1802 !cs_etm__is_svc_instr(etmq, trace_chan_id, prev_packet,
1803 prev_packet->end_addr))
1807 * ETMv4 has 5 bits for exception number; if the numbers
1808 * are in the range ( CS_ETMV4_EXC_FIQ, CS_ETMV4_EXC_END ]
1809 * they are implementation defined exceptions.
1811 * For this case, simply take it as sync exception.
1813 if (packet->exception_number > CS_ETMV4_EXC_FIQ &&
1814 packet->exception_number <= CS_ETMV4_EXC_END)
1821 static int cs_etm__set_sample_flags(struct cs_etm_queue *etmq,
1822 struct cs_etm_traceid_queue *tidq)
1824 struct cs_etm_packet *packet = tidq->packet;
1825 struct cs_etm_packet *prev_packet = tidq->prev_packet;
1826 u8 trace_chan_id = tidq->trace_chan_id;
1830 switch (packet->sample_type) {
1833 * Immediate branch instruction without neither link nor
1834 * return flag, it's normal branch instruction within
1837 if (packet->last_instr_type == OCSD_INSTR_BR &&
1838 packet->last_instr_subtype == OCSD_S_INSTR_NONE) {
1839 packet->flags = PERF_IP_FLAG_BRANCH;
1841 if (packet->last_instr_cond)
1842 packet->flags |= PERF_IP_FLAG_CONDITIONAL;
1846 * Immediate branch instruction with link (e.g. BL), this is
1847 * branch instruction for function call.
1849 if (packet->last_instr_type == OCSD_INSTR_BR &&
1850 packet->last_instr_subtype == OCSD_S_INSTR_BR_LINK)
1851 packet->flags = PERF_IP_FLAG_BRANCH |
1855 * Indirect branch instruction with link (e.g. BLR), this is
1856 * branch instruction for function call.
1858 if (packet->last_instr_type == OCSD_INSTR_BR_INDIRECT &&
1859 packet->last_instr_subtype == OCSD_S_INSTR_BR_LINK)
1860 packet->flags = PERF_IP_FLAG_BRANCH |
1864 * Indirect branch instruction with subtype of
1865 * OCSD_S_INSTR_V7_IMPLIED_RET, this is explicit hint for
1866 * function return for A32/T32.
1868 if (packet->last_instr_type == OCSD_INSTR_BR_INDIRECT &&
1869 packet->last_instr_subtype == OCSD_S_INSTR_V7_IMPLIED_RET)
1870 packet->flags = PERF_IP_FLAG_BRANCH |
1871 PERF_IP_FLAG_RETURN;
1874 * Indirect branch instruction without link (e.g. BR), usually
1875 * this is used for function return, especially for functions
1876 * within dynamic link lib.
1878 if (packet->last_instr_type == OCSD_INSTR_BR_INDIRECT &&
1879 packet->last_instr_subtype == OCSD_S_INSTR_NONE)
1880 packet->flags = PERF_IP_FLAG_BRANCH |
1881 PERF_IP_FLAG_RETURN;
1883 /* Return instruction for function return. */
1884 if (packet->last_instr_type == OCSD_INSTR_BR_INDIRECT &&
1885 packet->last_instr_subtype == OCSD_S_INSTR_V8_RET)
1886 packet->flags = PERF_IP_FLAG_BRANCH |
1887 PERF_IP_FLAG_RETURN;
1890 * Decoder might insert a discontinuity in the middle of
1891 * instruction packets, fixup prev_packet with flag
1892 * PERF_IP_FLAG_TRACE_BEGIN to indicate restarting trace.
1894 if (prev_packet->sample_type == CS_ETM_DISCONTINUITY)
1895 prev_packet->flags |= PERF_IP_FLAG_BRANCH |
1896 PERF_IP_FLAG_TRACE_BEGIN;
1899 * If the previous packet is an exception return packet
1900 * and the return address just follows SVC instruction,
1901 * it needs to calibrate the previous packet sample flags
1902 * as PERF_IP_FLAG_SYSCALLRET.
1904 if (prev_packet->flags == (PERF_IP_FLAG_BRANCH |
1905 PERF_IP_FLAG_RETURN |
1906 PERF_IP_FLAG_INTERRUPT) &&
1907 cs_etm__is_svc_instr(etmq, trace_chan_id,
1908 packet, packet->start_addr))
1909 prev_packet->flags = PERF_IP_FLAG_BRANCH |
1910 PERF_IP_FLAG_RETURN |
1911 PERF_IP_FLAG_SYSCALLRET;
1913 case CS_ETM_DISCONTINUITY:
1915 * The trace is discontinuous, if the previous packet is
1916 * instruction packet, set flag PERF_IP_FLAG_TRACE_END
1917 * for previous packet.
1919 if (prev_packet->sample_type == CS_ETM_RANGE)
1920 prev_packet->flags |= PERF_IP_FLAG_BRANCH |
1921 PERF_IP_FLAG_TRACE_END;
1923 case CS_ETM_EXCEPTION:
1924 ret = cs_etm__get_magic(packet->trace_chan_id, &magic);
1928 /* The exception is for system call. */
1929 if (cs_etm__is_syscall(etmq, tidq, magic))
1930 packet->flags = PERF_IP_FLAG_BRANCH |
1932 PERF_IP_FLAG_SYSCALLRET;
1934 * The exceptions are triggered by external signals from bus,
1935 * interrupt controller, debug module, PE reset or halt.
1937 else if (cs_etm__is_async_exception(tidq, magic))
1938 packet->flags = PERF_IP_FLAG_BRANCH |
1940 PERF_IP_FLAG_ASYNC |
1941 PERF_IP_FLAG_INTERRUPT;
1943 * Otherwise, exception is caused by trap, instruction &
1944 * data fault, or alignment errors.
1946 else if (cs_etm__is_sync_exception(etmq, tidq, magic))
1947 packet->flags = PERF_IP_FLAG_BRANCH |
1949 PERF_IP_FLAG_INTERRUPT;
1952 * When the exception packet is inserted, since exception
1953 * packet is not used standalone for generating samples
1954 * and it's affiliation to the previous instruction range
1955 * packet; so set previous range packet flags to tell perf
1956 * it is an exception taken branch.
1958 if (prev_packet->sample_type == CS_ETM_RANGE)
1959 prev_packet->flags = packet->flags;
1961 case CS_ETM_EXCEPTION_RET:
1963 * When the exception return packet is inserted, since
1964 * exception return packet is not used standalone for
1965 * generating samples and it's affiliation to the previous
1966 * instruction range packet; so set previous range packet
1967 * flags to tell perf it is an exception return branch.
1969 * The exception return can be for either system call or
1970 * other exception types; unfortunately the packet doesn't
1971 * contain exception type related info so we cannot decide
1972 * the exception type purely based on exception return packet.
1973 * If we record the exception number from exception packet and
1974 * reuse it for exception return packet, this is not reliable
1975 * due the trace can be discontinuity or the interrupt can
1976 * be nested, thus the recorded exception number cannot be
1977 * used for exception return packet for these two cases.
1979 * For exception return packet, we only need to distinguish the
1980 * packet is for system call or for other types. Thus the
1981 * decision can be deferred when receive the next packet which
1982 * contains the return address, based on the return address we
1983 * can read out the previous instruction and check if it's a
1984 * system call instruction and then calibrate the sample flag
1987 if (prev_packet->sample_type == CS_ETM_RANGE)
1988 prev_packet->flags = PERF_IP_FLAG_BRANCH |
1989 PERF_IP_FLAG_RETURN |
1990 PERF_IP_FLAG_INTERRUPT;
2000 static int cs_etm__decode_data_block(struct cs_etm_queue *etmq)
2003 size_t processed = 0;
2006 * Packets are decoded and added to the decoder's packet queue
2007 * until the decoder packet processing callback has requested that
2008 * processing stops or there is nothing left in the buffer. Normal
2009 * operations that stop processing are a timestamp packet or a full
2010 * decoder buffer queue.
2012 ret = cs_etm_decoder__process_data_block(etmq->decoder,
2014 &etmq->buf[etmq->buf_used],
2020 etmq->offset += processed;
2021 etmq->buf_used += processed;
2022 etmq->buf_len -= processed;
2028 static int cs_etm__process_traceid_queue(struct cs_etm_queue *etmq,
2029 struct cs_etm_traceid_queue *tidq)
2032 struct cs_etm_packet_queue *packet_queue;
2034 packet_queue = &tidq->packet_queue;
2036 /* Process each packet in this chunk */
2038 ret = cs_etm_decoder__get_packet(packet_queue,
2042 * Stop processing this chunk on
2043 * end of data or error
2048 * Since packet addresses are swapped in packet
2049 * handling within below switch() statements,
2050 * thus setting sample flags must be called
2051 * prior to switch() statement to use address
2052 * information before packets swapping.
2054 ret = cs_etm__set_sample_flags(etmq, tidq);
2058 switch (tidq->packet->sample_type) {
2061 * If the packet contains an instruction
2062 * range, generate instruction sequence
2065 cs_etm__sample(etmq, tidq);
2067 case CS_ETM_EXCEPTION:
2068 case CS_ETM_EXCEPTION_RET:
2070 * If the exception packet is coming,
2071 * make sure the previous instruction
2072 * range packet to be handled properly.
2074 cs_etm__exception(tidq);
2076 case CS_ETM_DISCONTINUITY:
2078 * Discontinuity in trace, flush
2079 * previous branch stack
2081 cs_etm__flush(etmq, tidq);
2085 * Should not receive empty packet,
2088 pr_err("CS ETM Trace: empty packet\n");
2098 static void cs_etm__clear_all_traceid_queues(struct cs_etm_queue *etmq)
2101 struct int_node *inode;
2102 struct cs_etm_traceid_queue *tidq;
2103 struct intlist *traceid_queues_list = etmq->traceid_queues_list;
2105 intlist__for_each_entry(inode, traceid_queues_list) {
2106 idx = (int)(intptr_t)inode->priv;
2107 tidq = etmq->traceid_queues[idx];
2109 /* Ignore return value */
2110 cs_etm__process_traceid_queue(etmq, tidq);
2113 * Generate an instruction sample with the remaining
2114 * branchstack entries.
2116 cs_etm__flush(etmq, tidq);
2120 static int cs_etm__run_decoder(struct cs_etm_queue *etmq)
2123 struct cs_etm_traceid_queue *tidq;
2125 tidq = cs_etm__etmq_get_traceid_queue(etmq, CS_ETM_PER_THREAD_TRACEID);
2129 /* Go through each buffer in the queue and decode them one by one */
2131 err = cs_etm__get_data_block(etmq);
2135 /* Run trace decoder until buffer consumed or end of trace */
2137 err = cs_etm__decode_data_block(etmq);
2142 * Process each packet in this chunk, nothing to do if
2143 * an error occurs other than hoping the next one will
2146 err = cs_etm__process_traceid_queue(etmq, tidq);
2148 } while (etmq->buf_len);
2151 /* Flush any remaining branch stack entries */
2152 err = cs_etm__end_block(etmq, tidq);
2158 static int cs_etm__process_timeless_queues(struct cs_etm_auxtrace *etm,
2162 struct auxtrace_queues *queues = &etm->queues;
2164 for (i = 0; i < queues->nr_queues; i++) {
2165 struct auxtrace_queue *queue = &etm->queues.queue_array[i];
2166 struct cs_etm_queue *etmq = queue->priv;
2167 struct cs_etm_traceid_queue *tidq;
2172 tidq = cs_etm__etmq_get_traceid_queue(etmq,
2173 CS_ETM_PER_THREAD_TRACEID);
2178 if ((tid == -1) || (tidq->tid == tid)) {
2179 cs_etm__set_pid_tid_cpu(etm, tidq);
2180 cs_etm__run_decoder(etmq);
2187 static int cs_etm__process_queues(struct cs_etm_auxtrace *etm)
2190 unsigned int cs_queue_nr, queue_nr, i;
2193 struct auxtrace_queue *queue;
2194 struct cs_etm_queue *etmq;
2195 struct cs_etm_traceid_queue *tidq;
2198 * Pre-populate the heap with one entry from each queue so that we can
2199 * start processing in time order across all queues.
2201 for (i = 0; i < etm->queues.nr_queues; i++) {
2202 etmq = etm->queues.queue_array[i].priv;
2206 ret = cs_etm__queue_first_cs_timestamp(etm, etmq, i);
2212 if (!etm->heap.heap_cnt)
2215 /* Take the entry at the top of the min heap */
2216 cs_queue_nr = etm->heap.heap_array[0].queue_nr;
2217 queue_nr = TO_QUEUE_NR(cs_queue_nr);
2218 trace_chan_id = TO_TRACE_CHAN_ID(cs_queue_nr);
2219 queue = &etm->queues.queue_array[queue_nr];
2223 * Remove the top entry from the heap since we are about
2226 auxtrace_heap__pop(&etm->heap);
2228 tidq = cs_etm__etmq_get_traceid_queue(etmq, trace_chan_id);
2231 * No traceID queue has been allocated for this traceID,
2232 * which means something somewhere went very wrong. No
2233 * other choice than simply exit.
2240 * Packets associated with this timestamp are already in
2241 * the etmq's traceID queue, so process them.
2243 ret = cs_etm__process_traceid_queue(etmq, tidq);
2248 * Packets for this timestamp have been processed, time to
2249 * move on to the next timestamp, fetching a new auxtrace_buffer
2253 ret = cs_etm__get_data_block(etmq);
2258 * No more auxtrace_buffers to process in this etmq, simply
2259 * move on to another entry in the auxtrace_heap.
2264 ret = cs_etm__decode_data_block(etmq);
2268 cs_timestamp = cs_etm__etmq_get_timestamp(etmq, &trace_chan_id);
2270 if (!cs_timestamp) {
2272 * Function cs_etm__decode_data_block() returns when
2273 * there is no more traces to decode in the current
2274 * auxtrace_buffer OR when a timestamp has been
2275 * encountered on any of the traceID queues. Since we
2276 * did not get a timestamp, there is no more traces to
2277 * process in this auxtrace_buffer. As such empty and
2278 * flush all traceID queues.
2280 cs_etm__clear_all_traceid_queues(etmq);
2282 /* Fetch another auxtrace_buffer for this etmq */
2287 * Add to the min heap the timestamp for packets that have
2288 * just been decoded. They will be processed and synthesized
2289 * during the next call to cs_etm__process_traceid_queue() for
2290 * this queue/traceID.
2292 cs_queue_nr = TO_CS_QUEUE_NR(queue_nr, trace_chan_id);
2293 ret = auxtrace_heap__add(&etm->heap, cs_queue_nr, cs_timestamp);
2300 static int cs_etm__process_itrace_start(struct cs_etm_auxtrace *etm,
2301 union perf_event *event)
2305 if (etm->timeless_decoding)
2309 * Add the tid/pid to the log so that we can get a match when
2310 * we get a contextID from the decoder.
2312 th = machine__findnew_thread(etm->machine,
2313 event->itrace_start.pid,
2314 event->itrace_start.tid);
2323 static int cs_etm__process_switch_cpu_wide(struct cs_etm_auxtrace *etm,
2324 union perf_event *event)
2327 bool out = event->header.misc & PERF_RECORD_MISC_SWITCH_OUT;
2330 * Context switch in per-thread mode are irrelevant since perf
2331 * will start/stop tracing as the process is scheduled.
2333 if (etm->timeless_decoding)
2337 * SWITCH_IN events carry the next process to be switched out while
2338 * SWITCH_OUT events carry the process to be switched in. As such
2339 * we don't care about IN events.
2345 * Add the tid/pid to the log so that we can get a match when
2346 * we get a contextID from the decoder.
2348 th = machine__findnew_thread(etm->machine,
2349 event->context_switch.next_prev_pid,
2350 event->context_switch.next_prev_tid);
2359 static int cs_etm__process_event(struct perf_session *session,
2360 union perf_event *event,
2361 struct perf_sample *sample,
2362 struct perf_tool *tool)
2364 u64 sample_kernel_timestamp;
2365 struct cs_etm_auxtrace *etm = container_of(session->auxtrace,
2366 struct cs_etm_auxtrace,
2372 if (!tool->ordered_events) {
2373 pr_err("CoreSight ETM Trace requires ordered events\n");
2377 if (sample->time && (sample->time != (u64) -1))
2378 sample_kernel_timestamp = sample->time;
2380 sample_kernel_timestamp = 0;
2383 * Don't wait for cs_etm__flush_events() in per-thread/timeless mode to start the decode. We
2384 * need the tid of the PERF_RECORD_EXIT event to assign to the synthesised samples because
2385 * ETM_OPT_CTXTID is not enabled.
2387 if (etm->timeless_decoding &&
2388 event->header.type == PERF_RECORD_EXIT)
2389 return cs_etm__process_timeless_queues(etm,
2392 if (event->header.type == PERF_RECORD_ITRACE_START)
2393 return cs_etm__process_itrace_start(etm, event);
2394 else if (event->header.type == PERF_RECORD_SWITCH_CPU_WIDE)
2395 return cs_etm__process_switch_cpu_wide(etm, event);
2397 if (!etm->timeless_decoding && event->header.type == PERF_RECORD_AUX) {
2399 * Record the latest kernel timestamp available in the header
2400 * for samples so that synthesised samples occur from this point
2403 etm->latest_kernel_timestamp = sample_kernel_timestamp;
2409 static void dump_queued_data(struct cs_etm_auxtrace *etm,
2410 struct perf_record_auxtrace *event)
2412 struct auxtrace_buffer *buf;
2415 * Find all buffers with same reference in the queues and dump them.
2416 * This is because the queues can contain multiple entries of the same
2417 * buffer that were split on aux records.
2419 for (i = 0; i < etm->queues.nr_queues; ++i)
2420 list_for_each_entry(buf, &etm->queues.queue_array[i].head, list)
2421 if (buf->reference == event->reference)
2422 cs_etm__dump_event(etm->queues.queue_array[i].priv, buf);
2425 static int cs_etm__process_auxtrace_event(struct perf_session *session,
2426 union perf_event *event,
2427 struct perf_tool *tool __maybe_unused)
2429 struct cs_etm_auxtrace *etm = container_of(session->auxtrace,
2430 struct cs_etm_auxtrace,
2432 if (!etm->data_queued) {
2433 struct auxtrace_buffer *buffer;
2435 int fd = perf_data__fd(session->data);
2436 bool is_pipe = perf_data__is_pipe(session->data);
2438 int idx = event->auxtrace.idx;
2443 data_offset = lseek(fd, 0, SEEK_CUR);
2444 if (data_offset == -1)
2448 err = auxtrace_queues__add_event(&etm->queues, session,
2449 event, data_offset, &buffer);
2454 * Knowing if the trace is formatted or not requires a lookup of
2455 * the aux record so only works in non-piped mode where data is
2456 * queued in cs_etm__queue_aux_records(). Always assume
2457 * formatted in piped mode (true).
2459 err = cs_etm__setup_queue(etm, &etm->queues.queue_array[idx],
2465 if (auxtrace_buffer__get_data(buffer, fd)) {
2466 cs_etm__dump_event(etm->queues.queue_array[idx].priv, buffer);
2467 auxtrace_buffer__put_data(buffer);
2469 } else if (dump_trace)
2470 dump_queued_data(etm, &event->auxtrace);
2475 static bool cs_etm__is_timeless_decoding(struct cs_etm_auxtrace *etm)
2477 struct evsel *evsel;
2478 struct evlist *evlist = etm->session->evlist;
2479 bool timeless_decoding = true;
2481 /* Override timeless mode with user input from --itrace=Z */
2482 if (etm->synth_opts.timeless_decoding)
2486 * Circle through the list of event and complain if we find one
2487 * with the time bit set.
2489 evlist__for_each_entry(evlist, evsel) {
2490 if ((evsel->core.attr.sample_type & PERF_SAMPLE_TIME))
2491 timeless_decoding = false;
2494 return timeless_decoding;
2497 static const char * const cs_etm_global_header_fmts[] = {
2498 [CS_HEADER_VERSION] = " Header version %llx\n",
2499 [CS_PMU_TYPE_CPUS] = " PMU type/num cpus %llx\n",
2500 [CS_ETM_SNAPSHOT] = " Snapshot %llx\n",
2503 static const char * const cs_etm_priv_fmts[] = {
2504 [CS_ETM_MAGIC] = " Magic number %llx\n",
2505 [CS_ETM_CPU] = " CPU %lld\n",
2506 [CS_ETM_NR_TRC_PARAMS] = " NR_TRC_PARAMS %llx\n",
2507 [CS_ETM_ETMCR] = " ETMCR %llx\n",
2508 [CS_ETM_ETMTRACEIDR] = " ETMTRACEIDR %llx\n",
2509 [CS_ETM_ETMCCER] = " ETMCCER %llx\n",
2510 [CS_ETM_ETMIDR] = " ETMIDR %llx\n",
2513 static const char * const cs_etmv4_priv_fmts[] = {
2514 [CS_ETM_MAGIC] = " Magic number %llx\n",
2515 [CS_ETM_CPU] = " CPU %lld\n",
2516 [CS_ETM_NR_TRC_PARAMS] = " NR_TRC_PARAMS %llx\n",
2517 [CS_ETMV4_TRCCONFIGR] = " TRCCONFIGR %llx\n",
2518 [CS_ETMV4_TRCTRACEIDR] = " TRCTRACEIDR %llx\n",
2519 [CS_ETMV4_TRCIDR0] = " TRCIDR0 %llx\n",
2520 [CS_ETMV4_TRCIDR1] = " TRCIDR1 %llx\n",
2521 [CS_ETMV4_TRCIDR2] = " TRCIDR2 %llx\n",
2522 [CS_ETMV4_TRCIDR8] = " TRCIDR8 %llx\n",
2523 [CS_ETMV4_TRCAUTHSTATUS] = " TRCAUTHSTATUS %llx\n",
2526 static const char * const param_unk_fmt =
2527 " Unknown parameter [%d] %llx\n";
2528 static const char * const magic_unk_fmt =
2529 " Magic number Unknown %llx\n";
2531 static int cs_etm__print_cpu_metadata_v0(__u64 *val, int *offset)
2533 int i = *offset, j, nr_params = 0, fmt_offset;
2536 /* check magic value */
2537 magic = val[i + CS_ETM_MAGIC];
2538 if ((magic != __perf_cs_etmv3_magic) &&
2539 (magic != __perf_cs_etmv4_magic)) {
2540 /* failure - note bad magic value */
2541 fprintf(stdout, magic_unk_fmt, magic);
2545 /* print common header block */
2546 fprintf(stdout, cs_etm_priv_fmts[CS_ETM_MAGIC], val[i++]);
2547 fprintf(stdout, cs_etm_priv_fmts[CS_ETM_CPU], val[i++]);
2549 if (magic == __perf_cs_etmv3_magic) {
2550 nr_params = CS_ETM_NR_TRC_PARAMS_V0;
2551 fmt_offset = CS_ETM_ETMCR;
2552 /* after common block, offset format index past NR_PARAMS */
2553 for (j = fmt_offset; j < nr_params + fmt_offset; j++, i++)
2554 fprintf(stdout, cs_etm_priv_fmts[j], val[i]);
2555 } else if (magic == __perf_cs_etmv4_magic) {
2556 nr_params = CS_ETMV4_NR_TRC_PARAMS_V0;
2557 fmt_offset = CS_ETMV4_TRCCONFIGR;
2558 /* after common block, offset format index past NR_PARAMS */
2559 for (j = fmt_offset; j < nr_params + fmt_offset; j++, i++)
2560 fprintf(stdout, cs_etmv4_priv_fmts[j], val[i]);
2566 static int cs_etm__print_cpu_metadata_v1(__u64 *val, int *offset)
2568 int i = *offset, j, total_params = 0;
2571 magic = val[i + CS_ETM_MAGIC];
2572 /* total params to print is NR_PARAMS + common block size for v1 */
2573 total_params = val[i + CS_ETM_NR_TRC_PARAMS] + CS_ETM_COMMON_BLK_MAX_V1;
2575 if (magic == __perf_cs_etmv3_magic) {
2576 for (j = 0; j < total_params; j++, i++) {
2577 /* if newer record - could be excess params */
2578 if (j >= CS_ETM_PRIV_MAX)
2579 fprintf(stdout, param_unk_fmt, j, val[i]);
2581 fprintf(stdout, cs_etm_priv_fmts[j], val[i]);
2583 } else if (magic == __perf_cs_etmv4_magic) {
2584 for (j = 0; j < total_params; j++, i++) {
2585 /* if newer record - could be excess params */
2586 if (j >= CS_ETMV4_PRIV_MAX)
2587 fprintf(stdout, param_unk_fmt, j, val[i]);
2589 fprintf(stdout, cs_etmv4_priv_fmts[j], val[i]);
2592 /* failure - note bad magic value and error out */
2593 fprintf(stdout, magic_unk_fmt, magic);
2600 static void cs_etm__print_auxtrace_info(__u64 *val, int num)
2602 int i, cpu = 0, version, err;
2604 /* bail out early on bad header version */
2606 if (version > CS_HEADER_CURRENT_VERSION) {
2607 /* failure.. return */
2608 fprintf(stdout, " Unknown Header Version = %x, ", version);
2609 fprintf(stdout, "Version supported <= %x\n", CS_HEADER_CURRENT_VERSION);
2613 for (i = 0; i < CS_HEADER_VERSION_MAX; i++)
2614 fprintf(stdout, cs_etm_global_header_fmts[i], val[i]);
2616 for (i = CS_HEADER_VERSION_MAX; cpu < num; cpu++) {
2618 err = cs_etm__print_cpu_metadata_v0(val, &i);
2619 else if (version == 1)
2620 err = cs_etm__print_cpu_metadata_v1(val, &i);
2627 * Read a single cpu parameter block from the auxtrace_info priv block.
2629 * For version 1 there is a per cpu nr_params entry. If we are handling
2630 * version 1 file, then there may be less, the same, or more params
2631 * indicated by this value than the compile time number we understand.
2633 * For a version 0 info block, there are a fixed number, and we need to
2634 * fill out the nr_param value in the metadata we create.
2636 static u64 *cs_etm__create_meta_blk(u64 *buff_in, int *buff_in_offset,
2637 int out_blk_size, int nr_params_v0)
2639 u64 *metadata = NULL;
2641 int nr_in_params, nr_out_params, nr_cmn_params;
2644 metadata = zalloc(sizeof(*metadata) * out_blk_size);
2648 /* read block current index & version */
2649 i = *buff_in_offset;
2650 hdr_version = buff_in[CS_HEADER_VERSION];
2653 /* read version 0 info block into a version 1 metadata block */
2654 nr_in_params = nr_params_v0;
2655 metadata[CS_ETM_MAGIC] = buff_in[i + CS_ETM_MAGIC];
2656 metadata[CS_ETM_CPU] = buff_in[i + CS_ETM_CPU];
2657 metadata[CS_ETM_NR_TRC_PARAMS] = nr_in_params;
2658 /* remaining block params at offset +1 from source */
2659 for (k = CS_ETM_COMMON_BLK_MAX_V1 - 1; k < nr_in_params; k++)
2660 metadata[k + 1] = buff_in[i + k];
2661 /* version 0 has 2 common params */
2664 /* read version 1 info block - input and output nr_params may differ */
2665 /* version 1 has 3 common params */
2667 nr_in_params = buff_in[i + CS_ETM_NR_TRC_PARAMS];
2669 /* if input has more params than output - skip excess */
2670 nr_out_params = nr_in_params + nr_cmn_params;
2671 if (nr_out_params > out_blk_size)
2672 nr_out_params = out_blk_size;
2674 for (k = CS_ETM_MAGIC; k < nr_out_params; k++)
2675 metadata[k] = buff_in[i + k];
2677 /* record the actual nr params we copied */
2678 metadata[CS_ETM_NR_TRC_PARAMS] = nr_out_params - nr_cmn_params;
2681 /* adjust in offset by number of in params used */
2682 i += nr_in_params + nr_cmn_params;
2683 *buff_in_offset = i;
2688 * Puts a fragment of an auxtrace buffer into the auxtrace queues based
2689 * on the bounds of aux_event, if it matches with the buffer that's at
2692 * Normally, whole auxtrace buffers would be added to the queue. But we
2693 * want to reset the decoder for every PERF_RECORD_AUX event, and the decoder
2694 * is reset across each buffer, so splitting the buffers up in advance has
2697 static int cs_etm__queue_aux_fragment(struct perf_session *session, off_t file_offset, size_t sz,
2698 struct perf_record_aux *aux_event, struct perf_sample *sample)
2701 char buf[PERF_SAMPLE_MAX_SIZE];
2702 union perf_event *auxtrace_event_union;
2703 struct perf_record_auxtrace *auxtrace_event;
2704 union perf_event auxtrace_fragment;
2705 __u64 aux_offset, aux_size;
2709 struct cs_etm_auxtrace *etm = container_of(session->auxtrace,
2710 struct cs_etm_auxtrace,
2714 * There should be a PERF_RECORD_AUXTRACE event at the file_offset that we got
2715 * from looping through the auxtrace index.
2717 err = perf_session__peek_event(session, file_offset, buf,
2718 PERF_SAMPLE_MAX_SIZE, &auxtrace_event_union, NULL);
2721 auxtrace_event = &auxtrace_event_union->auxtrace;
2722 if (auxtrace_event->header.type != PERF_RECORD_AUXTRACE)
2725 if (auxtrace_event->header.size < sizeof(struct perf_record_auxtrace) ||
2726 auxtrace_event->header.size != sz) {
2731 * In per-thread mode, CPU is set to -1, but TID will be set instead. See
2732 * auxtrace_mmap_params__set_idx(). Return 'not found' if neither CPU nor TID match.
2734 if ((auxtrace_event->cpu == (__u32) -1 && auxtrace_event->tid != sample->tid) ||
2735 auxtrace_event->cpu != sample->cpu)
2738 if (aux_event->flags & PERF_AUX_FLAG_OVERWRITE) {
2740 * Clamp size in snapshot mode. The buffer size is clamped in
2741 * __auxtrace_mmap__read() for snapshots, so the aux record size doesn't reflect
2744 aux_size = min(aux_event->aux_size, auxtrace_event->size);
2747 * In this mode, the head also points to the end of the buffer so aux_offset
2748 * needs to have the size subtracted so it points to the beginning as in normal mode
2750 aux_offset = aux_event->aux_offset - aux_size;
2752 aux_size = aux_event->aux_size;
2753 aux_offset = aux_event->aux_offset;
2756 if (aux_offset >= auxtrace_event->offset &&
2757 aux_offset + aux_size <= auxtrace_event->offset + auxtrace_event->size) {
2759 * If this AUX event was inside this buffer somewhere, create a new auxtrace event
2760 * based on the sizes of the aux event, and queue that fragment.
2762 auxtrace_fragment.auxtrace = *auxtrace_event;
2763 auxtrace_fragment.auxtrace.size = aux_size;
2764 auxtrace_fragment.auxtrace.offset = aux_offset;
2765 file_offset += aux_offset - auxtrace_event->offset + auxtrace_event->header.size;
2767 pr_debug3("CS ETM: Queue buffer size: %#"PRI_lx64" offset: %#"PRI_lx64
2768 " tid: %d cpu: %d\n", aux_size, aux_offset, sample->tid, sample->cpu);
2769 err = auxtrace_queues__add_event(&etm->queues, session, &auxtrace_fragment,
2774 idx = auxtrace_event->idx;
2775 formatted = !(aux_event->flags & PERF_AUX_FLAG_CORESIGHT_FORMAT_RAW);
2776 return cs_etm__setup_queue(etm, &etm->queues.queue_array[idx],
2780 /* Wasn't inside this buffer, but there were no parse errors. 1 == 'not found' */
2784 static int cs_etm__queue_aux_records_cb(struct perf_session *session, union perf_event *event,
2785 u64 offset __maybe_unused, void *data __maybe_unused)
2787 struct perf_sample sample;
2789 struct auxtrace_index_entry *ent;
2790 struct auxtrace_index *auxtrace_index;
2791 struct evsel *evsel;
2794 /* Don't care about any other events, we're only queuing buffers for AUX events */
2795 if (event->header.type != PERF_RECORD_AUX)
2798 if (event->header.size < sizeof(struct perf_record_aux))
2801 /* Truncated Aux records can have 0 size and shouldn't result in anything being queued. */
2802 if (!event->aux.aux_size)
2806 * Parse the sample, we need the sample_id_all data that comes after the event so that the
2807 * CPU or PID can be matched to an AUXTRACE buffer's CPU or PID.
2809 evsel = evlist__event2evsel(session->evlist, event);
2812 ret = evsel__parse_sample(evsel, event, &sample);
2817 * Loop through the auxtrace index to find the buffer that matches up with this aux event.
2819 list_for_each_entry(auxtrace_index, &session->auxtrace_index, list) {
2820 for (i = 0; i < auxtrace_index->nr; i++) {
2821 ent = &auxtrace_index->entries[i];
2822 ret = cs_etm__queue_aux_fragment(session, ent->file_offset,
2823 ent->sz, &event->aux, &sample);
2825 * Stop search on error or successful values. Continue search on
2834 * Couldn't find the buffer corresponding to this aux record, something went wrong. Warn but
2835 * don't exit with an error because it will still be possible to decode other aux records.
2837 pr_err("CS ETM: Couldn't find auxtrace buffer for aux_offset: %#"PRI_lx64
2838 " tid: %d cpu: %d\n", event->aux.aux_offset, sample.tid, sample.cpu);
2842 static int cs_etm__queue_aux_records(struct perf_session *session)
2844 struct auxtrace_index *index = list_first_entry_or_null(&session->auxtrace_index,
2845 struct auxtrace_index, list);
2846 if (index && index->nr > 0)
2847 return perf_session__peek_events(session, session->header.data_offset,
2848 session->header.data_size,
2849 cs_etm__queue_aux_records_cb, NULL);
2852 * We would get here if there are no entries in the index (either no auxtrace
2853 * buffers or no index at all). Fail silently as there is the possibility of
2854 * queueing them in cs_etm__process_auxtrace_event() if etm->data_queued is still
2857 * In that scenario, buffers will not be split by AUX records.
2862 int cs_etm__process_auxtrace_info(union perf_event *event,
2863 struct perf_session *session)
2865 struct perf_record_auxtrace_info *auxtrace_info = &event->auxtrace_info;
2866 struct cs_etm_auxtrace *etm = NULL;
2867 struct int_node *inode;
2868 unsigned int pmu_type;
2869 int event_header_size = sizeof(struct perf_event_header);
2870 int info_header_size;
2871 int total_size = auxtrace_info->header.size;
2873 int num_cpu, trcidr_idx;
2876 u64 *ptr, *hdr = NULL;
2877 u64 **metadata = NULL;
2881 * sizeof(auxtrace_info_event::type) +
2882 * sizeof(auxtrace_info_event::reserved) == 8
2884 info_header_size = 8;
2886 if (total_size < (event_header_size + info_header_size))
2889 priv_size = total_size - event_header_size - info_header_size;
2891 /* First the global part */
2892 ptr = (u64 *) auxtrace_info->priv;
2894 /* Look for version of the header */
2895 hdr_version = ptr[0];
2896 if (hdr_version > CS_HEADER_CURRENT_VERSION) {
2897 /* print routine will print an error on bad version */
2899 cs_etm__print_auxtrace_info(auxtrace_info->priv, 0);
2903 hdr = zalloc(sizeof(*hdr) * CS_HEADER_VERSION_MAX);
2907 /* Extract header information - see cs-etm.h for format */
2908 for (i = 0; i < CS_HEADER_VERSION_MAX; i++)
2910 num_cpu = hdr[CS_PMU_TYPE_CPUS] & 0xffffffff;
2911 pmu_type = (unsigned int) ((hdr[CS_PMU_TYPE_CPUS] >> 32) &
2915 * Create an RB tree for traceID-metadata tuple. Since the conversion
2916 * has to be made for each packet that gets decoded, optimizing access
2917 * in anything other than a sequential array is worth doing.
2919 traceid_list = intlist__new(NULL);
2920 if (!traceid_list) {
2925 metadata = zalloc(sizeof(*metadata) * num_cpu);
2928 goto err_free_traceid_list;
2932 * The metadata is stored in the auxtrace_info section and encodes
2933 * the configuration of the ARM embedded trace macrocell which is
2934 * required by the trace decoder to properly decode the trace due
2935 * to its highly compressed nature.
2937 for (j = 0; j < num_cpu; j++) {
2938 if (ptr[i] == __perf_cs_etmv3_magic) {
2940 cs_etm__create_meta_blk(ptr, &i,
2942 CS_ETM_NR_TRC_PARAMS_V0);
2944 /* The traceID is our handle */
2945 trcidr_idx = CS_ETM_ETMTRACEIDR;
2947 } else if (ptr[i] == __perf_cs_etmv4_magic) {
2949 cs_etm__create_meta_blk(ptr, &i,
2951 CS_ETMV4_NR_TRC_PARAMS_V0);
2953 /* The traceID is our handle */
2954 trcidr_idx = CS_ETMV4_TRCTRACEIDR;
2959 goto err_free_metadata;
2962 /* Get an RB node for this CPU */
2963 inode = intlist__findnew(traceid_list, metadata[j][trcidr_idx]);
2965 /* Something went wrong, no need to continue */
2968 goto err_free_metadata;
2972 * The node for that CPU should not be taken.
2973 * Back out if that's the case.
2977 goto err_free_metadata;
2979 /* All good, associate the traceID with the metadata pointer */
2980 inode->priv = metadata[j];
2984 * Each of CS_HEADER_VERSION_MAX, CS_ETM_PRIV_MAX and
2985 * CS_ETMV4_PRIV_MAX mark how many double words are in the
2986 * global metadata, and each cpu's metadata respectively.
2987 * The following tests if the correct number of double words was
2988 * present in the auxtrace info section.
2990 if (i * 8 != priv_size) {
2992 goto err_free_metadata;
2995 etm = zalloc(sizeof(*etm));
2999 goto err_free_metadata;
3002 err = auxtrace_queues__init(&etm->queues);
3006 if (session->itrace_synth_opts->set) {
3007 etm->synth_opts = *session->itrace_synth_opts;
3009 itrace_synth_opts__set_default(&etm->synth_opts,
3010 session->itrace_synth_opts->default_no_sample);
3011 etm->synth_opts.callchain = false;
3014 etm->session = session;
3015 etm->machine = &session->machines.host;
3017 etm->num_cpu = num_cpu;
3018 etm->pmu_type = pmu_type;
3019 etm->snapshot_mode = (hdr[CS_ETM_SNAPSHOT] != 0);
3020 etm->metadata = metadata;
3021 etm->auxtrace_type = auxtrace_info->type;
3022 etm->timeless_decoding = cs_etm__is_timeless_decoding(etm);
3024 etm->auxtrace.process_event = cs_etm__process_event;
3025 etm->auxtrace.process_auxtrace_event = cs_etm__process_auxtrace_event;
3026 etm->auxtrace.flush_events = cs_etm__flush_events;
3027 etm->auxtrace.free_events = cs_etm__free_events;
3028 etm->auxtrace.free = cs_etm__free;
3029 etm->auxtrace.evsel_is_auxtrace = cs_etm__evsel_is_auxtrace;
3030 session->auxtrace = &etm->auxtrace;
3032 etm->unknown_thread = thread__new(999999999, 999999999);
3033 if (!etm->unknown_thread) {
3035 goto err_free_queues;
3039 * Initialize list node so that at thread__zput() we can avoid
3040 * segmentation fault at list_del_init().
3042 INIT_LIST_HEAD(&etm->unknown_thread->node);
3044 err = thread__set_comm(etm->unknown_thread, "unknown", 0);
3046 goto err_delete_thread;
3048 if (thread__init_maps(etm->unknown_thread, etm->machine)) {
3050 goto err_delete_thread;
3054 cs_etm__print_auxtrace_info(auxtrace_info->priv, num_cpu);
3057 err = cs_etm__synth_events(etm, session);
3059 goto err_delete_thread;
3061 err = cs_etm__queue_aux_records(session);
3063 goto err_delete_thread;
3065 etm->data_queued = etm->queues.populated;
3067 * Print warning in pipe mode, see cs_etm__process_auxtrace_event() and
3068 * cs_etm__queue_aux_fragment() for details relating to limitations.
3070 if (!etm->data_queued)
3071 pr_warning("CS ETM warning: Coresight decode and TRBE support requires random file access.\n"
3072 "Continuing with best effort decoding in piped mode.\n\n");
3077 thread__zput(etm->unknown_thread);
3079 auxtrace_queues__free(&etm->queues);
3080 session->auxtrace = NULL;
3084 /* No need to check @metadata[j], free(NULL) is supported */
3085 for (j = 0; j < num_cpu; j++)
3086 zfree(&metadata[j]);
3088 err_free_traceid_list:
3089 intlist__delete(traceid_list);
3093 * At this point, as a minimum we have valid header. Dump the rest of
3094 * the info section - the print routines will error out on structural
3098 cs_etm__print_auxtrace_info(auxtrace_info->priv, num_cpu);