1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright(C) 2015-2018 Linaro Limited.
5 * Author: Tor Jeremiassen <tor@ti.com>
6 * Author: Mathieu Poirier <mathieu.poirier@linaro.org>
9 #include <linux/bitops.h>
10 #include <linux/err.h>
11 #include <linux/kernel.h>
12 #include <linux/log2.h>
13 #include <linux/types.h>
20 #include "cs-etm-decoder/cs-etm-decoder.h"
28 #include "thread_map.h"
29 #include "thread-stack.h"
32 #define MAX_TIMESTAMP (~0ULL)
35 * A64 instructions are always 4 bytes
37 * Only A64 is supported, so can use this constant for converting between
38 * addresses and instruction counts, calculting offsets etc
40 #define A64_INSTR_SIZE 4
42 struct cs_etm_auxtrace {
43 struct auxtrace auxtrace;
44 struct auxtrace_queues queues;
45 struct auxtrace_heap heap;
46 struct itrace_synth_opts synth_opts;
47 struct perf_session *session;
48 struct machine *machine;
49 struct thread *unknown_thread;
55 u8 sample_instructions;
59 u64 branches_sample_type;
61 u64 instructions_sample_type;
62 u64 instructions_sample_period;
66 unsigned int pmu_type;
70 struct cs_etm_auxtrace *etm;
71 struct thread *thread;
72 struct cs_etm_decoder *decoder;
73 struct auxtrace_buffer *buffer;
74 const struct cs_etm_state *state;
75 union perf_event *event_buf;
76 unsigned int queue_nr;
82 u64 period_instructions;
83 struct branch_stack *last_branch;
84 struct branch_stack *last_branch_rb;
85 size_t last_branch_pos;
86 struct cs_etm_packet *prev_packet;
87 struct cs_etm_packet *packet;
90 static int cs_etm__update_queues(struct cs_etm_auxtrace *etm);
91 static int cs_etm__process_timeless_queues(struct cs_etm_auxtrace *etm,
92 pid_t tid, u64 time_);
94 static void cs_etm__packet_dump(const char *pkt_string)
96 const char *color = PERF_COLOR_BLUE;
97 int len = strlen(pkt_string);
99 if (len && (pkt_string[len-1] == '\n'))
100 color_fprintf(stdout, color, " %s", pkt_string);
102 color_fprintf(stdout, color, " %s\n", pkt_string);
107 static void cs_etm__dump_event(struct cs_etm_auxtrace *etm,
108 struct auxtrace_buffer *buffer)
111 const char *color = PERF_COLOR_BLUE;
112 struct cs_etm_decoder_params d_params;
113 struct cs_etm_trace_params *t_params;
114 struct cs_etm_decoder *decoder;
115 size_t buffer_used = 0;
117 fprintf(stdout, "\n");
118 color_fprintf(stdout, color,
119 ". ... CoreSight ETM Trace data: size %zu bytes\n",
122 /* Use metadata to fill in trace parameters for trace decoder */
123 t_params = zalloc(sizeof(*t_params) * etm->num_cpu);
124 for (i = 0; i < etm->num_cpu; i++) {
125 t_params[i].protocol = CS_ETM_PROTO_ETMV4i;
126 t_params[i].etmv4.reg_idr0 = etm->metadata[i][CS_ETMV4_TRCIDR0];
127 t_params[i].etmv4.reg_idr1 = etm->metadata[i][CS_ETMV4_TRCIDR1];
128 t_params[i].etmv4.reg_idr2 = etm->metadata[i][CS_ETMV4_TRCIDR2];
129 t_params[i].etmv4.reg_idr8 = etm->metadata[i][CS_ETMV4_TRCIDR8];
130 t_params[i].etmv4.reg_configr =
131 etm->metadata[i][CS_ETMV4_TRCCONFIGR];
132 t_params[i].etmv4.reg_traceidr =
133 etm->metadata[i][CS_ETMV4_TRCTRACEIDR];
136 /* Set decoder parameters to simply print the trace packets */
137 d_params.packet_printer = cs_etm__packet_dump;
138 d_params.operation = CS_ETM_OPERATION_PRINT;
139 d_params.formatted = true;
140 d_params.fsyncs = false;
141 d_params.hsyncs = false;
142 d_params.frame_aligned = true;
144 decoder = cs_etm_decoder__new(etm->num_cpu, &d_params, t_params);
153 ret = cs_etm_decoder__process_data_block(
154 decoder, buffer->offset,
155 &((u8 *)buffer->data)[buffer_used],
156 buffer->size - buffer_used, &consumed);
160 buffer_used += consumed;
161 } while (buffer_used < buffer->size);
163 cs_etm_decoder__free(decoder);
166 static int cs_etm__flush_events(struct perf_session *session,
167 struct perf_tool *tool)
170 struct cs_etm_auxtrace *etm = container_of(session->auxtrace,
171 struct cs_etm_auxtrace,
176 if (!tool->ordered_events)
179 if (!etm->timeless_decoding)
182 ret = cs_etm__update_queues(etm);
187 return cs_etm__process_timeless_queues(etm, -1, MAX_TIMESTAMP - 1);
190 static void cs_etm__free_queue(void *priv)
192 struct cs_etm_queue *etmq = priv;
197 thread__zput(etmq->thread);
198 cs_etm_decoder__free(etmq->decoder);
199 zfree(&etmq->event_buf);
200 zfree(&etmq->last_branch);
201 zfree(&etmq->last_branch_rb);
202 zfree(&etmq->prev_packet);
203 zfree(&etmq->packet);
207 static void cs_etm__free_events(struct perf_session *session)
210 struct cs_etm_auxtrace *aux = container_of(session->auxtrace,
211 struct cs_etm_auxtrace,
213 struct auxtrace_queues *queues = &aux->queues;
215 for (i = 0; i < queues->nr_queues; i++) {
216 cs_etm__free_queue(queues->queue_array[i].priv);
217 queues->queue_array[i].priv = NULL;
220 auxtrace_queues__free(queues);
223 static void cs_etm__free(struct perf_session *session)
226 struct int_node *inode, *tmp;
227 struct cs_etm_auxtrace *aux = container_of(session->auxtrace,
228 struct cs_etm_auxtrace,
230 cs_etm__free_events(session);
231 session->auxtrace = NULL;
233 /* First remove all traceID/CPU# nodes for the RB tree */
234 intlist__for_each_entry_safe(inode, tmp, traceid_list)
235 intlist__remove(traceid_list, inode);
236 /* Then the RB tree itself */
237 intlist__delete(traceid_list);
239 for (i = 0; i < aux->num_cpu; i++)
240 zfree(&aux->metadata[i]);
242 thread__zput(aux->unknown_thread);
243 zfree(&aux->metadata);
247 static u32 cs_etm__mem_access(struct cs_etm_queue *etmq, u64 address,
248 size_t size, u8 *buffer)
253 struct thread *thread;
254 struct machine *machine;
255 struct addr_location al;
260 machine = etmq->etm->machine;
261 if (address >= etmq->etm->kernel_start)
262 cpumode = PERF_RECORD_MISC_KERNEL;
264 cpumode = PERF_RECORD_MISC_USER;
266 thread = etmq->thread;
268 if (cpumode != PERF_RECORD_MISC_KERNEL)
270 thread = etmq->etm->unknown_thread;
273 thread__find_addr_map(thread, cpumode, MAP__FUNCTION, address, &al);
275 if (!al.map || !al.map->dso)
278 if (al.map->dso->data.status == DSO_DATA_STATUS_ERROR &&
279 dso__data_status_seen(al.map->dso, DSO_DATA_STATUS_SEEN_ITRACE))
282 offset = al.map->map_ip(al.map, address);
286 len = dso__data_read_offset(al.map->dso, machine, offset, buffer, size);
294 static struct cs_etm_queue *cs_etm__alloc_queue(struct cs_etm_auxtrace *etm,
295 unsigned int queue_nr)
298 struct cs_etm_decoder_params d_params;
299 struct cs_etm_trace_params *t_params;
300 struct cs_etm_queue *etmq;
301 size_t szp = sizeof(struct cs_etm_packet);
303 etmq = zalloc(sizeof(*etmq));
307 etmq->packet = zalloc(szp);
311 if (etm->synth_opts.last_branch || etm->sample_branches) {
312 etmq->prev_packet = zalloc(szp);
313 if (!etmq->prev_packet)
317 if (etm->synth_opts.last_branch) {
318 size_t sz = sizeof(struct branch_stack);
320 sz += etm->synth_opts.last_branch_sz *
321 sizeof(struct branch_entry);
322 etmq->last_branch = zalloc(sz);
323 if (!etmq->last_branch)
325 etmq->last_branch_rb = zalloc(sz);
326 if (!etmq->last_branch_rb)
330 etmq->event_buf = malloc(PERF_SAMPLE_MAX_SIZE);
331 if (!etmq->event_buf)
335 etmq->queue_nr = queue_nr;
340 /* Use metadata to fill in trace parameters for trace decoder */
341 t_params = zalloc(sizeof(*t_params) * etm->num_cpu);
346 for (i = 0; i < etm->num_cpu; i++) {
347 t_params[i].protocol = CS_ETM_PROTO_ETMV4i;
348 t_params[i].etmv4.reg_idr0 = etm->metadata[i][CS_ETMV4_TRCIDR0];
349 t_params[i].etmv4.reg_idr1 = etm->metadata[i][CS_ETMV4_TRCIDR1];
350 t_params[i].etmv4.reg_idr2 = etm->metadata[i][CS_ETMV4_TRCIDR2];
351 t_params[i].etmv4.reg_idr8 = etm->metadata[i][CS_ETMV4_TRCIDR8];
352 t_params[i].etmv4.reg_configr =
353 etm->metadata[i][CS_ETMV4_TRCCONFIGR];
354 t_params[i].etmv4.reg_traceidr =
355 etm->metadata[i][CS_ETMV4_TRCTRACEIDR];
358 /* Set decoder parameters to simply print the trace packets */
359 d_params.packet_printer = cs_etm__packet_dump;
360 d_params.operation = CS_ETM_OPERATION_DECODE;
361 d_params.formatted = true;
362 d_params.fsyncs = false;
363 d_params.hsyncs = false;
364 d_params.frame_aligned = true;
365 d_params.data = etmq;
367 etmq->decoder = cs_etm_decoder__new(etm->num_cpu, &d_params, t_params);
375 * Register a function to handle all memory accesses required by
376 * the trace decoder library.
378 if (cs_etm_decoder__add_mem_access_cb(etmq->decoder,
381 goto out_free_decoder;
384 etmq->period_instructions = 0;
389 cs_etm_decoder__free(etmq->decoder);
391 zfree(&etmq->event_buf);
392 zfree(&etmq->last_branch);
393 zfree(&etmq->last_branch_rb);
394 zfree(&etmq->prev_packet);
395 zfree(&etmq->packet);
401 static int cs_etm__setup_queue(struct cs_etm_auxtrace *etm,
402 struct auxtrace_queue *queue,
403 unsigned int queue_nr)
405 struct cs_etm_queue *etmq = queue->priv;
407 if (list_empty(&queue->head) || etmq)
410 etmq = cs_etm__alloc_queue(etm, queue_nr);
417 if (queue->cpu != -1)
418 etmq->cpu = queue->cpu;
420 etmq->tid = queue->tid;
425 static int cs_etm__setup_queues(struct cs_etm_auxtrace *etm)
430 for (i = 0; i < etm->queues.nr_queues; i++) {
431 ret = cs_etm__setup_queue(etm, &etm->queues.queue_array[i], i);
439 static int cs_etm__update_queues(struct cs_etm_auxtrace *etm)
441 if (etm->queues.new_data) {
442 etm->queues.new_data = false;
443 return cs_etm__setup_queues(etm);
449 static inline void cs_etm__copy_last_branch_rb(struct cs_etm_queue *etmq)
451 struct branch_stack *bs_src = etmq->last_branch_rb;
452 struct branch_stack *bs_dst = etmq->last_branch;
456 * Set the number of records before early exit: ->nr is used to
457 * determine how many branches to copy from ->entries.
459 bs_dst->nr = bs_src->nr;
462 * Early exit when there is nothing to copy.
468 * As bs_src->entries is a circular buffer, we need to copy from it in
469 * two steps. First, copy the branches from the most recently inserted
470 * branch ->last_branch_pos until the end of bs_src->entries buffer.
472 nr = etmq->etm->synth_opts.last_branch_sz - etmq->last_branch_pos;
473 memcpy(&bs_dst->entries[0],
474 &bs_src->entries[etmq->last_branch_pos],
475 sizeof(struct branch_entry) * nr);
478 * If we wrapped around at least once, the branches from the beginning
479 * of the bs_src->entries buffer and until the ->last_branch_pos element
480 * are older valid branches: copy them over. The total number of
481 * branches copied over will be equal to the number of branches asked by
482 * the user in last_branch_sz.
484 if (bs_src->nr >= etmq->etm->synth_opts.last_branch_sz) {
485 memcpy(&bs_dst->entries[nr],
487 sizeof(struct branch_entry) * etmq->last_branch_pos);
491 static inline void cs_etm__reset_last_branch_rb(struct cs_etm_queue *etmq)
493 etmq->last_branch_pos = 0;
494 etmq->last_branch_rb->nr = 0;
497 static inline u64 cs_etm__last_executed_instr(struct cs_etm_packet *packet)
500 * The packet records the execution range with an exclusive end address
502 * A64 instructions are constant size, so the last executed
503 * instruction is A64_INSTR_SIZE before the end address
504 * Will need to do instruction level decode for T32 instructions as
505 * they can be variable size (not yet supported).
507 return packet->end_addr - A64_INSTR_SIZE;
510 static inline u64 cs_etm__instr_count(const struct cs_etm_packet *packet)
513 * Only A64 instructions are currently supported, so can get
514 * instruction count by dividing.
515 * Will need to do instruction level decode for T32 instructions as
516 * they can be variable size (not yet supported).
518 return (packet->end_addr - packet->start_addr) / A64_INSTR_SIZE;
521 static inline u64 cs_etm__instr_addr(const struct cs_etm_packet *packet,
525 * Only A64 instructions are currently supported, so can get
526 * instruction address by muliplying.
527 * Will need to do instruction level decode for T32 instructions as
528 * they can be variable size (not yet supported).
530 return packet->start_addr + offset * A64_INSTR_SIZE;
533 static void cs_etm__update_last_branch_rb(struct cs_etm_queue *etmq)
535 struct branch_stack *bs = etmq->last_branch_rb;
536 struct branch_entry *be;
539 * The branches are recorded in a circular buffer in reverse
540 * chronological order: we start recording from the last element of the
541 * buffer down. After writing the first element of the stack, move the
542 * insert position back to the end of the buffer.
544 if (!etmq->last_branch_pos)
545 etmq->last_branch_pos = etmq->etm->synth_opts.last_branch_sz;
547 etmq->last_branch_pos -= 1;
549 be = &bs->entries[etmq->last_branch_pos];
550 be->from = cs_etm__last_executed_instr(etmq->prev_packet);
551 be->to = etmq->packet->start_addr;
552 /* No support for mispredict */
553 be->flags.mispred = 0;
554 be->flags.predicted = 1;
557 * Increment bs->nr until reaching the number of last branches asked by
558 * the user on the command line.
560 if (bs->nr < etmq->etm->synth_opts.last_branch_sz)
564 static int cs_etm__inject_event(union perf_event *event,
565 struct perf_sample *sample, u64 type)
567 event->header.size = perf_event__sample_event_size(sample, type, 0);
568 return perf_event__synthesize_sample(event, type, 0, sample);
573 cs_etm__get_trace(struct cs_etm_buffer *buff, struct cs_etm_queue *etmq)
575 struct auxtrace_buffer *aux_buffer = etmq->buffer;
576 struct auxtrace_buffer *old_buffer = aux_buffer;
577 struct auxtrace_queue *queue;
579 queue = &etmq->etm->queues.queue_array[etmq->queue_nr];
581 aux_buffer = auxtrace_buffer__next(queue, aux_buffer);
583 /* If no more data, drop the previous auxtrace_buffer and return */
586 auxtrace_buffer__drop_data(old_buffer);
591 etmq->buffer = aux_buffer;
593 /* If the aux_buffer doesn't have data associated, try to load it */
594 if (!aux_buffer->data) {
595 /* get the file desc associated with the perf data file */
596 int fd = perf_data__fd(etmq->etm->session->data);
598 aux_buffer->data = auxtrace_buffer__get_data(aux_buffer, fd);
599 if (!aux_buffer->data)
603 /* If valid, drop the previous buffer */
605 auxtrace_buffer__drop_data(old_buffer);
607 buff->offset = aux_buffer->offset;
608 buff->len = aux_buffer->size;
609 buff->buf = aux_buffer->data;
611 buff->ref_timestamp = aux_buffer->reference;
616 static void cs_etm__set_pid_tid_cpu(struct cs_etm_auxtrace *etm,
617 struct auxtrace_queue *queue)
619 struct cs_etm_queue *etmq = queue->priv;
621 /* CPU-wide tracing isn't supported yet */
622 if (queue->tid == -1)
625 if ((!etmq->thread) && (etmq->tid != -1))
626 etmq->thread = machine__find_thread(etm->machine, -1,
630 etmq->pid = etmq->thread->pid_;
631 if (queue->cpu == -1)
632 etmq->cpu = etmq->thread->cpu;
636 static int cs_etm__synth_instruction_sample(struct cs_etm_queue *etmq,
637 u64 addr, u64 period)
640 struct cs_etm_auxtrace *etm = etmq->etm;
641 union perf_event *event = etmq->event_buf;
642 struct perf_sample sample = {.ip = 0,};
644 event->sample.header.type = PERF_RECORD_SAMPLE;
645 event->sample.header.misc = PERF_RECORD_MISC_USER;
646 event->sample.header.size = sizeof(struct perf_event_header);
649 sample.pid = etmq->pid;
650 sample.tid = etmq->tid;
651 sample.id = etmq->etm->instructions_id;
652 sample.stream_id = etmq->etm->instructions_id;
653 sample.period = period;
654 sample.cpu = etmq->packet->cpu;
657 sample.cpumode = event->header.misc;
659 if (etm->synth_opts.last_branch) {
660 cs_etm__copy_last_branch_rb(etmq);
661 sample.branch_stack = etmq->last_branch;
664 if (etm->synth_opts.inject) {
665 ret = cs_etm__inject_event(event, &sample,
666 etm->instructions_sample_type);
671 ret = perf_session__deliver_synth_event(etm->session, event, &sample);
675 "CS ETM Trace: failed to deliver instruction event, error %d\n",
678 if (etm->synth_opts.last_branch)
679 cs_etm__reset_last_branch_rb(etmq);
685 * The cs etm packet encodes an instruction range between a branch target
686 * and the next taken branch. Generate sample accordingly.
688 static int cs_etm__synth_branch_sample(struct cs_etm_queue *etmq)
691 struct cs_etm_auxtrace *etm = etmq->etm;
692 struct perf_sample sample = {.ip = 0,};
693 union perf_event *event = etmq->event_buf;
694 struct dummy_branch_stack {
696 struct branch_entry entries;
699 event->sample.header.type = PERF_RECORD_SAMPLE;
700 event->sample.header.misc = PERF_RECORD_MISC_USER;
701 event->sample.header.size = sizeof(struct perf_event_header);
703 sample.ip = cs_etm__last_executed_instr(etmq->prev_packet);
704 sample.pid = etmq->pid;
705 sample.tid = etmq->tid;
706 sample.addr = etmq->packet->start_addr;
707 sample.id = etmq->etm->branches_id;
708 sample.stream_id = etmq->etm->branches_id;
710 sample.cpu = etmq->packet->cpu;
712 sample.cpumode = PERF_RECORD_MISC_USER;
715 * perf report cannot handle events without a branch stack
717 if (etm->synth_opts.last_branch) {
718 dummy_bs = (struct dummy_branch_stack){
725 sample.branch_stack = (struct branch_stack *)&dummy_bs;
728 if (etm->synth_opts.inject) {
729 ret = cs_etm__inject_event(event, &sample,
730 etm->branches_sample_type);
735 ret = perf_session__deliver_synth_event(etm->session, event, &sample);
739 "CS ETM Trace: failed to deliver instruction event, error %d\n",
745 struct cs_etm_synth {
746 struct perf_tool dummy_tool;
747 struct perf_session *session;
750 static int cs_etm__event_synth(struct perf_tool *tool,
751 union perf_event *event,
752 struct perf_sample *sample __maybe_unused,
753 struct machine *machine __maybe_unused)
755 struct cs_etm_synth *cs_etm_synth =
756 container_of(tool, struct cs_etm_synth, dummy_tool);
758 return perf_session__deliver_synth_event(cs_etm_synth->session,
762 static int cs_etm__synth_event(struct perf_session *session,
763 struct perf_event_attr *attr, u64 id)
765 struct cs_etm_synth cs_etm_synth;
767 memset(&cs_etm_synth, 0, sizeof(struct cs_etm_synth));
768 cs_etm_synth.session = session;
770 return perf_event__synthesize_attr(&cs_etm_synth.dummy_tool, attr, 1,
771 &id, cs_etm__event_synth);
774 static int cs_etm__synth_events(struct cs_etm_auxtrace *etm,
775 struct perf_session *session)
777 struct perf_evlist *evlist = session->evlist;
778 struct perf_evsel *evsel;
779 struct perf_event_attr attr;
784 evlist__for_each_entry(evlist, evsel) {
785 if (evsel->attr.type == etm->pmu_type) {
792 pr_debug("No selected events with CoreSight Trace data\n");
796 memset(&attr, 0, sizeof(struct perf_event_attr));
797 attr.size = sizeof(struct perf_event_attr);
798 attr.type = PERF_TYPE_HARDWARE;
799 attr.sample_type = evsel->attr.sample_type & PERF_SAMPLE_MASK;
800 attr.sample_type |= PERF_SAMPLE_IP | PERF_SAMPLE_TID |
802 if (etm->timeless_decoding)
803 attr.sample_type &= ~(u64)PERF_SAMPLE_TIME;
805 attr.sample_type |= PERF_SAMPLE_TIME;
807 attr.exclude_user = evsel->attr.exclude_user;
808 attr.exclude_kernel = evsel->attr.exclude_kernel;
809 attr.exclude_hv = evsel->attr.exclude_hv;
810 attr.exclude_host = evsel->attr.exclude_host;
811 attr.exclude_guest = evsel->attr.exclude_guest;
812 attr.sample_id_all = evsel->attr.sample_id_all;
813 attr.read_format = evsel->attr.read_format;
815 /* create new id val to be a fixed offset from evsel id */
816 id = evsel->id[0] + 1000000000;
821 if (etm->synth_opts.branches) {
822 attr.config = PERF_COUNT_HW_BRANCH_INSTRUCTIONS;
823 attr.sample_period = 1;
824 attr.sample_type |= PERF_SAMPLE_ADDR;
825 err = cs_etm__synth_event(session, &attr, id);
828 etm->sample_branches = true;
829 etm->branches_sample_type = attr.sample_type;
830 etm->branches_id = id;
832 attr.sample_type &= ~(u64)PERF_SAMPLE_ADDR;
835 if (etm->synth_opts.last_branch)
836 attr.sample_type |= PERF_SAMPLE_BRANCH_STACK;
838 if (etm->synth_opts.instructions) {
839 attr.config = PERF_COUNT_HW_INSTRUCTIONS;
840 attr.sample_period = etm->synth_opts.period;
841 etm->instructions_sample_period = attr.sample_period;
842 err = cs_etm__synth_event(session, &attr, id);
845 etm->sample_instructions = true;
846 etm->instructions_sample_type = attr.sample_type;
847 etm->instructions_id = id;
854 static int cs_etm__sample(struct cs_etm_queue *etmq)
856 struct cs_etm_auxtrace *etm = etmq->etm;
857 struct cs_etm_packet *tmp;
861 instrs_executed = cs_etm__instr_count(etmq->packet);
862 etmq->period_instructions += instrs_executed;
865 * Record a branch when the last instruction in
866 * PREV_PACKET is a branch.
868 if (etm->synth_opts.last_branch &&
870 etmq->prev_packet->sample_type == CS_ETM_RANGE &&
871 etmq->prev_packet->last_instr_taken_branch)
872 cs_etm__update_last_branch_rb(etmq);
874 if (etm->sample_instructions &&
875 etmq->period_instructions >= etm->instructions_sample_period) {
877 * Emit instruction sample periodically
878 * TODO: allow period to be defined in cycles and clock time
881 /* Get number of instructions executed after the sample point */
882 u64 instrs_over = etmq->period_instructions -
883 etm->instructions_sample_period;
886 * Calculate the address of the sampled instruction (-1 as
887 * sample is reported as though instruction has just been
888 * executed, but PC has not advanced to next instruction)
890 u64 offset = (instrs_executed - instrs_over - 1);
891 u64 addr = cs_etm__instr_addr(etmq->packet, offset);
893 ret = cs_etm__synth_instruction_sample(
894 etmq, addr, etm->instructions_sample_period);
898 /* Carry remaining instructions into next sample period */
899 etmq->period_instructions = instrs_over;
902 if (etm->sample_branches &&
904 etmq->prev_packet->sample_type == CS_ETM_RANGE &&
905 etmq->prev_packet->last_instr_taken_branch) {
906 ret = cs_etm__synth_branch_sample(etmq);
911 if (etm->sample_branches || etm->synth_opts.last_branch) {
913 * Swap PACKET with PREV_PACKET: PACKET becomes PREV_PACKET for
914 * the next incoming packet.
917 etmq->packet = etmq->prev_packet;
918 etmq->prev_packet = tmp;
924 static int cs_etm__flush(struct cs_etm_queue *etmq)
927 struct cs_etm_packet *tmp;
929 if (etmq->etm->synth_opts.last_branch &&
931 etmq->prev_packet->sample_type == CS_ETM_RANGE) {
933 * Generate a last branch event for the branches left in the
934 * circular buffer at the end of the trace.
936 * Use the address of the end of the last reported execution
939 u64 addr = cs_etm__last_executed_instr(etmq->prev_packet);
941 err = cs_etm__synth_instruction_sample(
943 etmq->period_instructions);
944 etmq->period_instructions = 0;
947 * Swap PACKET with PREV_PACKET: PACKET becomes PREV_PACKET for
948 * the next incoming packet.
951 etmq->packet = etmq->prev_packet;
952 etmq->prev_packet = tmp;
958 static int cs_etm__run_decoder(struct cs_etm_queue *etmq)
960 struct cs_etm_auxtrace *etm = etmq->etm;
961 struct cs_etm_buffer buffer;
962 size_t buffer_used, processed;
965 if (!etm->kernel_start)
966 etm->kernel_start = machine__kernel_start(etm->machine);
968 /* Go through each buffer in the queue and decode them one by one */
971 memset(&buffer, 0, sizeof(buffer));
972 err = cs_etm__get_trace(&buffer, etmq);
976 * We cannot assume consecutive blocks in the data file are
977 * contiguous, reset the decoder to force re-sync.
979 err = cs_etm_decoder__reset(etmq->decoder);
983 /* Run trace decoder until buffer consumed or end of trace */
986 err = cs_etm_decoder__process_data_block(
989 &buffer.buf[buffer_used],
990 buffer.len - buffer_used,
995 etmq->offset += processed;
996 buffer_used += processed;
998 /* Process each packet in this chunk */
1000 err = cs_etm_decoder__get_packet(etmq->decoder,
1004 * Stop processing this chunk on
1005 * end of data or error
1009 switch (etmq->packet->sample_type) {
1012 * If the packet contains an instruction
1013 * range, generate instruction sequence
1016 cs_etm__sample(etmq);
1018 case CS_ETM_TRACE_ON:
1020 * Discontinuity in trace, flush
1021 * previous branch stack
1023 cs_etm__flush(etmq);
1029 } while (buffer.len > buffer_used);
1032 /* Flush any remaining branch stack entries */
1033 err = cs_etm__flush(etmq);
1039 static int cs_etm__process_timeless_queues(struct cs_etm_auxtrace *etm,
1040 pid_t tid, u64 time_)
1043 struct auxtrace_queues *queues = &etm->queues;
1045 for (i = 0; i < queues->nr_queues; i++) {
1046 struct auxtrace_queue *queue = &etm->queues.queue_array[i];
1047 struct cs_etm_queue *etmq = queue->priv;
1049 if (etmq && ((tid == -1) || (etmq->tid == tid))) {
1051 cs_etm__set_pid_tid_cpu(etm, queue);
1052 cs_etm__run_decoder(etmq);
1059 static int cs_etm__process_event(struct perf_session *session,
1060 union perf_event *event,
1061 struct perf_sample *sample,
1062 struct perf_tool *tool)
1066 struct cs_etm_auxtrace *etm = container_of(session->auxtrace,
1067 struct cs_etm_auxtrace,
1073 if (!tool->ordered_events) {
1074 pr_err("CoreSight ETM Trace requires ordered events\n");
1078 if (!etm->timeless_decoding)
1081 if (sample->time && (sample->time != (u64) -1))
1082 timestamp = sample->time;
1086 if (timestamp || etm->timeless_decoding) {
1087 err = cs_etm__update_queues(etm);
1092 if (event->header.type == PERF_RECORD_EXIT)
1093 return cs_etm__process_timeless_queues(etm,
1100 static int cs_etm__process_auxtrace_event(struct perf_session *session,
1101 union perf_event *event,
1102 struct perf_tool *tool __maybe_unused)
1104 struct cs_etm_auxtrace *etm = container_of(session->auxtrace,
1105 struct cs_etm_auxtrace,
1107 if (!etm->data_queued) {
1108 struct auxtrace_buffer *buffer;
1110 int fd = perf_data__fd(session->data);
1111 bool is_pipe = perf_data__is_pipe(session->data);
1117 data_offset = lseek(fd, 0, SEEK_CUR);
1118 if (data_offset == -1)
1122 err = auxtrace_queues__add_event(&etm->queues, session,
1123 event, data_offset, &buffer);
1128 if (auxtrace_buffer__get_data(buffer, fd)) {
1129 cs_etm__dump_event(etm, buffer);
1130 auxtrace_buffer__put_data(buffer);
1137 static bool cs_etm__is_timeless_decoding(struct cs_etm_auxtrace *etm)
1139 struct perf_evsel *evsel;
1140 struct perf_evlist *evlist = etm->session->evlist;
1141 bool timeless_decoding = true;
1144 * Circle through the list of event and complain if we find one
1145 * with the time bit set.
1147 evlist__for_each_entry(evlist, evsel) {
1148 if ((evsel->attr.sample_type & PERF_SAMPLE_TIME))
1149 timeless_decoding = false;
1152 return timeless_decoding;
1155 static const char * const cs_etm_global_header_fmts[] = {
1156 [CS_HEADER_VERSION_0] = " Header version %llx\n",
1157 [CS_PMU_TYPE_CPUS] = " PMU type/num cpus %llx\n",
1158 [CS_ETM_SNAPSHOT] = " Snapshot %llx\n",
1161 static const char * const cs_etm_priv_fmts[] = {
1162 [CS_ETM_MAGIC] = " Magic number %llx\n",
1163 [CS_ETM_CPU] = " CPU %lld\n",
1164 [CS_ETM_ETMCR] = " ETMCR %llx\n",
1165 [CS_ETM_ETMTRACEIDR] = " ETMTRACEIDR %llx\n",
1166 [CS_ETM_ETMCCER] = " ETMCCER %llx\n",
1167 [CS_ETM_ETMIDR] = " ETMIDR %llx\n",
1170 static const char * const cs_etmv4_priv_fmts[] = {
1171 [CS_ETM_MAGIC] = " Magic number %llx\n",
1172 [CS_ETM_CPU] = " CPU %lld\n",
1173 [CS_ETMV4_TRCCONFIGR] = " TRCCONFIGR %llx\n",
1174 [CS_ETMV4_TRCTRACEIDR] = " TRCTRACEIDR %llx\n",
1175 [CS_ETMV4_TRCIDR0] = " TRCIDR0 %llx\n",
1176 [CS_ETMV4_TRCIDR1] = " TRCIDR1 %llx\n",
1177 [CS_ETMV4_TRCIDR2] = " TRCIDR2 %llx\n",
1178 [CS_ETMV4_TRCIDR8] = " TRCIDR8 %llx\n",
1179 [CS_ETMV4_TRCAUTHSTATUS] = " TRCAUTHSTATUS %llx\n",
1182 static void cs_etm__print_auxtrace_info(u64 *val, int num)
1186 for (i = 0; i < CS_HEADER_VERSION_0_MAX; i++)
1187 fprintf(stdout, cs_etm_global_header_fmts[i], val[i]);
1189 for (i = CS_HEADER_VERSION_0_MAX; cpu < num; cpu++) {
1190 if (val[i] == __perf_cs_etmv3_magic)
1191 for (j = 0; j < CS_ETM_PRIV_MAX; j++, i++)
1192 fprintf(stdout, cs_etm_priv_fmts[j], val[i]);
1193 else if (val[i] == __perf_cs_etmv4_magic)
1194 for (j = 0; j < CS_ETMV4_PRIV_MAX; j++, i++)
1195 fprintf(stdout, cs_etmv4_priv_fmts[j], val[i]);
1197 /* failure.. return */
1202 int cs_etm__process_auxtrace_info(union perf_event *event,
1203 struct perf_session *session)
1205 struct auxtrace_info_event *auxtrace_info = &event->auxtrace_info;
1206 struct cs_etm_auxtrace *etm = NULL;
1207 struct int_node *inode;
1208 unsigned int pmu_type;
1209 int event_header_size = sizeof(struct perf_event_header);
1210 int info_header_size;
1211 int total_size = auxtrace_info->header.size;
1214 int err = 0, idx = -1;
1216 u64 *ptr, *hdr = NULL;
1217 u64 **metadata = NULL;
1220 * sizeof(auxtrace_info_event::type) +
1221 * sizeof(auxtrace_info_event::reserved) == 8
1223 info_header_size = 8;
1225 if (total_size < (event_header_size + info_header_size))
1228 priv_size = total_size - event_header_size - info_header_size;
1230 /* First the global part */
1231 ptr = (u64 *) auxtrace_info->priv;
1233 /* Look for version '0' of the header */
1237 hdr = zalloc(sizeof(*hdr) * CS_HEADER_VERSION_0_MAX);
1241 /* Extract header information - see cs-etm.h for format */
1242 for (i = 0; i < CS_HEADER_VERSION_0_MAX; i++)
1244 num_cpu = hdr[CS_PMU_TYPE_CPUS] & 0xffffffff;
1245 pmu_type = (unsigned int) ((hdr[CS_PMU_TYPE_CPUS] >> 32) &
1249 * Create an RB tree for traceID-CPU# tuple. Since the conversion has
1250 * to be made for each packet that gets decoded, optimizing access in
1251 * anything other than a sequential array is worth doing.
1253 traceid_list = intlist__new(NULL);
1254 if (!traceid_list) {
1259 metadata = zalloc(sizeof(*metadata) * num_cpu);
1262 goto err_free_traceid_list;
1266 * The metadata is stored in the auxtrace_info section and encodes
1267 * the configuration of the ARM embedded trace macrocell which is
1268 * required by the trace decoder to properly decode the trace due
1269 * to its highly compressed nature.
1271 for (j = 0; j < num_cpu; j++) {
1272 if (ptr[i] == __perf_cs_etmv3_magic) {
1273 metadata[j] = zalloc(sizeof(*metadata[j]) *
1277 goto err_free_metadata;
1279 for (k = 0; k < CS_ETM_PRIV_MAX; k++)
1280 metadata[j][k] = ptr[i + k];
1282 /* The traceID is our handle */
1283 idx = metadata[j][CS_ETM_ETMTRACEIDR];
1284 i += CS_ETM_PRIV_MAX;
1285 } else if (ptr[i] == __perf_cs_etmv4_magic) {
1286 metadata[j] = zalloc(sizeof(*metadata[j]) *
1290 goto err_free_metadata;
1292 for (k = 0; k < CS_ETMV4_PRIV_MAX; k++)
1293 metadata[j][k] = ptr[i + k];
1295 /* The traceID is our handle */
1296 idx = metadata[j][CS_ETMV4_TRCTRACEIDR];
1297 i += CS_ETMV4_PRIV_MAX;
1300 /* Get an RB node for this CPU */
1301 inode = intlist__findnew(traceid_list, idx);
1303 /* Something went wrong, no need to continue */
1305 err = PTR_ERR(inode);
1306 goto err_free_metadata;
1310 * The node for that CPU should not be taken.
1311 * Back out if that's the case.
1315 goto err_free_metadata;
1317 /* All good, associate the traceID with the CPU# */
1318 inode->priv = &metadata[j][CS_ETM_CPU];
1322 * Each of CS_HEADER_VERSION_0_MAX, CS_ETM_PRIV_MAX and
1323 * CS_ETMV4_PRIV_MAX mark how many double words are in the
1324 * global metadata, and each cpu's metadata respectively.
1325 * The following tests if the correct number of double words was
1326 * present in the auxtrace info section.
1328 if (i * 8 != priv_size) {
1330 goto err_free_metadata;
1333 etm = zalloc(sizeof(*etm));
1337 goto err_free_metadata;
1340 err = auxtrace_queues__init(&etm->queues);
1344 etm->session = session;
1345 etm->machine = &session->machines.host;
1347 etm->num_cpu = num_cpu;
1348 etm->pmu_type = pmu_type;
1349 etm->snapshot_mode = (hdr[CS_ETM_SNAPSHOT] != 0);
1350 etm->metadata = metadata;
1351 etm->auxtrace_type = auxtrace_info->type;
1352 etm->timeless_decoding = cs_etm__is_timeless_decoding(etm);
1354 etm->auxtrace.process_event = cs_etm__process_event;
1355 etm->auxtrace.process_auxtrace_event = cs_etm__process_auxtrace_event;
1356 etm->auxtrace.flush_events = cs_etm__flush_events;
1357 etm->auxtrace.free_events = cs_etm__free_events;
1358 etm->auxtrace.free = cs_etm__free;
1359 session->auxtrace = &etm->auxtrace;
1361 etm->unknown_thread = thread__new(999999999, 999999999);
1362 if (!etm->unknown_thread)
1363 goto err_free_queues;
1366 * Initialize list node so that at thread__zput() we can avoid
1367 * segmentation fault at list_del_init().
1369 INIT_LIST_HEAD(&etm->unknown_thread->node);
1371 err = thread__set_comm(etm->unknown_thread, "unknown", 0);
1373 goto err_delete_thread;
1375 if (thread__init_map_groups(etm->unknown_thread, etm->machine))
1376 goto err_delete_thread;
1379 cs_etm__print_auxtrace_info(auxtrace_info->priv, num_cpu);
1383 if (session->itrace_synth_opts && session->itrace_synth_opts->set) {
1384 etm->synth_opts = *session->itrace_synth_opts;
1386 itrace_synth_opts__set_default(&etm->synth_opts);
1387 etm->synth_opts.callchain = false;
1390 err = cs_etm__synth_events(etm, session);
1392 goto err_delete_thread;
1394 err = auxtrace_queues__process_index(&etm->queues, session);
1396 goto err_delete_thread;
1398 etm->data_queued = etm->queues.populated;
1403 thread__zput(etm->unknown_thread);
1405 auxtrace_queues__free(&etm->queues);
1406 session->auxtrace = NULL;
1410 /* No need to check @metadata[j], free(NULL) is supported */
1411 for (j = 0; j < num_cpu; j++)
1414 err_free_traceid_list:
1415 intlist__delete(traceid_list);