perf cs-etm: Move packet queue out of decoder structure
[linux-2.6-microblaze.git] / tools / perf / util / cs-etm.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright(C) 2015-2018 Linaro Limited.
4  *
5  * Author: Tor Jeremiassen <tor@ti.com>
6  * Author: Mathieu Poirier <mathieu.poirier@linaro.org>
7  */
8
9 #include <linux/bitops.h>
10 #include <linux/err.h>
11 #include <linux/kernel.h>
12 #include <linux/log2.h>
13 #include <linux/types.h>
14
15 #include <opencsd/ocsd_if_types.h>
16 #include <stdlib.h>
17
18 #include "auxtrace.h"
19 #include "color.h"
20 #include "cs-etm.h"
21 #include "cs-etm-decoder/cs-etm-decoder.h"
22 #include "debug.h"
23 #include "evlist.h"
24 #include "intlist.h"
25 #include "machine.h"
26 #include "map.h"
27 #include "perf.h"
28 #include "symbol.h"
29 #include "thread.h"
30 #include "thread_map.h"
31 #include "thread-stack.h"
32 #include "util.h"
33
34 #define MAX_TIMESTAMP (~0ULL)
35
36 struct cs_etm_auxtrace {
37         struct auxtrace auxtrace;
38         struct auxtrace_queues queues;
39         struct auxtrace_heap heap;
40         struct itrace_synth_opts synth_opts;
41         struct perf_session *session;
42         struct machine *machine;
43         struct thread *unknown_thread;
44
45         u8 timeless_decoding;
46         u8 snapshot_mode;
47         u8 data_queued;
48         u8 sample_branches;
49         u8 sample_instructions;
50
51         int num_cpu;
52         u32 auxtrace_type;
53         u64 branches_sample_type;
54         u64 branches_id;
55         u64 instructions_sample_type;
56         u64 instructions_sample_period;
57         u64 instructions_id;
58         u64 **metadata;
59         u64 kernel_start;
60         unsigned int pmu_type;
61 };
62
63 struct cs_etm_queue {
64         struct cs_etm_auxtrace *etm;
65         struct thread *thread;
66         struct cs_etm_decoder *decoder;
67         struct auxtrace_buffer *buffer;
68         union perf_event *event_buf;
69         unsigned int queue_nr;
70         pid_t pid, tid;
71         int cpu;
72         u64 offset;
73         u64 period_instructions;
74         struct branch_stack *last_branch;
75         struct branch_stack *last_branch_rb;
76         size_t last_branch_pos;
77         struct cs_etm_packet *prev_packet;
78         struct cs_etm_packet *packet;
79         const unsigned char *buf;
80         size_t buf_len, buf_used;
81         struct cs_etm_packet_queue packet_queue;
82 };
83
84 static int cs_etm__update_queues(struct cs_etm_auxtrace *etm);
85 static int cs_etm__process_timeless_queues(struct cs_etm_auxtrace *etm,
86                                            pid_t tid);
87
88 /* PTMs ETMIDR [11:8] set to b0011 */
89 #define ETMIDR_PTM_VERSION 0x00000300
90
91 static u32 cs_etm__get_v7_protocol_version(u32 etmidr)
92 {
93         etmidr &= ETMIDR_PTM_VERSION;
94
95         if (etmidr == ETMIDR_PTM_VERSION)
96                 return CS_ETM_PROTO_PTM;
97
98         return CS_ETM_PROTO_ETMV3;
99 }
100
101 static int cs_etm__get_magic(u8 trace_chan_id, u64 *magic)
102 {
103         struct int_node *inode;
104         u64 *metadata;
105
106         inode = intlist__find(traceid_list, trace_chan_id);
107         if (!inode)
108                 return -EINVAL;
109
110         metadata = inode->priv;
111         *magic = metadata[CS_ETM_MAGIC];
112         return 0;
113 }
114
115 int cs_etm__get_cpu(u8 trace_chan_id, int *cpu)
116 {
117         struct int_node *inode;
118         u64 *metadata;
119
120         inode = intlist__find(traceid_list, trace_chan_id);
121         if (!inode)
122                 return -EINVAL;
123
124         metadata = inode->priv;
125         *cpu = (int)metadata[CS_ETM_CPU];
126         return 0;
127 }
128
129 static void cs_etm__clear_packet_queue(struct cs_etm_packet_queue *queue)
130 {
131         int i;
132
133         queue->head = 0;
134         queue->tail = 0;
135         queue->packet_count = 0;
136         for (i = 0; i < CS_ETM_PACKET_MAX_BUFFER; i++) {
137                 queue->packet_buffer[i].isa = CS_ETM_ISA_UNKNOWN;
138                 queue->packet_buffer[i].start_addr = CS_ETM_INVAL_ADDR;
139                 queue->packet_buffer[i].end_addr = CS_ETM_INVAL_ADDR;
140                 queue->packet_buffer[i].instr_count = 0;
141                 queue->packet_buffer[i].last_instr_taken_branch = false;
142                 queue->packet_buffer[i].last_instr_size = 0;
143                 queue->packet_buffer[i].last_instr_type = 0;
144                 queue->packet_buffer[i].last_instr_subtype = 0;
145                 queue->packet_buffer[i].last_instr_cond = 0;
146                 queue->packet_buffer[i].flags = 0;
147                 queue->packet_buffer[i].exception_number = UINT32_MAX;
148                 queue->packet_buffer[i].trace_chan_id = UINT8_MAX;
149                 queue->packet_buffer[i].cpu = INT_MIN;
150         }
151 }
152
153 struct cs_etm_packet_queue
154 *cs_etm__etmq_get_packet_queue(struct cs_etm_queue *etmq)
155 {
156         return &etmq->packet_queue;
157 }
158
159 static void cs_etm__packet_dump(const char *pkt_string)
160 {
161         const char *color = PERF_COLOR_BLUE;
162         int len = strlen(pkt_string);
163
164         if (len && (pkt_string[len-1] == '\n'))
165                 color_fprintf(stdout, color, "  %s", pkt_string);
166         else
167                 color_fprintf(stdout, color, "  %s\n", pkt_string);
168
169         fflush(stdout);
170 }
171
172 static void cs_etm__set_trace_param_etmv3(struct cs_etm_trace_params *t_params,
173                                           struct cs_etm_auxtrace *etm, int idx,
174                                           u32 etmidr)
175 {
176         u64 **metadata = etm->metadata;
177
178         t_params[idx].protocol = cs_etm__get_v7_protocol_version(etmidr);
179         t_params[idx].etmv3.reg_ctrl = metadata[idx][CS_ETM_ETMCR];
180         t_params[idx].etmv3.reg_trc_id = metadata[idx][CS_ETM_ETMTRACEIDR];
181 }
182
183 static void cs_etm__set_trace_param_etmv4(struct cs_etm_trace_params *t_params,
184                                           struct cs_etm_auxtrace *etm, int idx)
185 {
186         u64 **metadata = etm->metadata;
187
188         t_params[idx].protocol = CS_ETM_PROTO_ETMV4i;
189         t_params[idx].etmv4.reg_idr0 = metadata[idx][CS_ETMV4_TRCIDR0];
190         t_params[idx].etmv4.reg_idr1 = metadata[idx][CS_ETMV4_TRCIDR1];
191         t_params[idx].etmv4.reg_idr2 = metadata[idx][CS_ETMV4_TRCIDR2];
192         t_params[idx].etmv4.reg_idr8 = metadata[idx][CS_ETMV4_TRCIDR8];
193         t_params[idx].etmv4.reg_configr = metadata[idx][CS_ETMV4_TRCCONFIGR];
194         t_params[idx].etmv4.reg_traceidr = metadata[idx][CS_ETMV4_TRCTRACEIDR];
195 }
196
197 static int cs_etm__init_trace_params(struct cs_etm_trace_params *t_params,
198                                      struct cs_etm_auxtrace *etm)
199 {
200         int i;
201         u32 etmidr;
202         u64 architecture;
203
204         for (i = 0; i < etm->num_cpu; i++) {
205                 architecture = etm->metadata[i][CS_ETM_MAGIC];
206
207                 switch (architecture) {
208                 case __perf_cs_etmv3_magic:
209                         etmidr = etm->metadata[i][CS_ETM_ETMIDR];
210                         cs_etm__set_trace_param_etmv3(t_params, etm, i, etmidr);
211                         break;
212                 case __perf_cs_etmv4_magic:
213                         cs_etm__set_trace_param_etmv4(t_params, etm, i);
214                         break;
215                 default:
216                         return -EINVAL;
217                 }
218         }
219
220         return 0;
221 }
222
223 static int cs_etm__init_decoder_params(struct cs_etm_decoder_params *d_params,
224                                        struct cs_etm_queue *etmq,
225                                        enum cs_etm_decoder_operation mode)
226 {
227         int ret = -EINVAL;
228
229         if (!(mode < CS_ETM_OPERATION_MAX))
230                 goto out;
231
232         d_params->packet_printer = cs_etm__packet_dump;
233         d_params->operation = mode;
234         d_params->data = etmq;
235         d_params->formatted = true;
236         d_params->fsyncs = false;
237         d_params->hsyncs = false;
238         d_params->frame_aligned = true;
239
240         ret = 0;
241 out:
242         return ret;
243 }
244
245 static void cs_etm__dump_event(struct cs_etm_auxtrace *etm,
246                                struct auxtrace_buffer *buffer)
247 {
248         int ret;
249         const char *color = PERF_COLOR_BLUE;
250         struct cs_etm_decoder_params d_params;
251         struct cs_etm_trace_params *t_params;
252         struct cs_etm_decoder *decoder;
253         size_t buffer_used = 0;
254
255         fprintf(stdout, "\n");
256         color_fprintf(stdout, color,
257                      ". ... CoreSight ETM Trace data: size %zu bytes\n",
258                      buffer->size);
259
260         /* Use metadata to fill in trace parameters for trace decoder */
261         t_params = zalloc(sizeof(*t_params) * etm->num_cpu);
262
263         if (!t_params)
264                 return;
265
266         if (cs_etm__init_trace_params(t_params, etm))
267                 goto out_free;
268
269         /* Set decoder parameters to simply print the trace packets */
270         if (cs_etm__init_decoder_params(&d_params, NULL,
271                                         CS_ETM_OPERATION_PRINT))
272                 goto out_free;
273
274         decoder = cs_etm_decoder__new(etm->num_cpu, &d_params, t_params);
275
276         if (!decoder)
277                 goto out_free;
278         do {
279                 size_t consumed;
280
281                 ret = cs_etm_decoder__process_data_block(
282                                 decoder, buffer->offset,
283                                 &((u8 *)buffer->data)[buffer_used],
284                                 buffer->size - buffer_used, &consumed);
285                 if (ret)
286                         break;
287
288                 buffer_used += consumed;
289         } while (buffer_used < buffer->size);
290
291         cs_etm_decoder__free(decoder);
292
293 out_free:
294         zfree(&t_params);
295 }
296
297 static int cs_etm__flush_events(struct perf_session *session,
298                                 struct perf_tool *tool)
299 {
300         int ret;
301         struct cs_etm_auxtrace *etm = container_of(session->auxtrace,
302                                                    struct cs_etm_auxtrace,
303                                                    auxtrace);
304         if (dump_trace)
305                 return 0;
306
307         if (!tool->ordered_events)
308                 return -EINVAL;
309
310         if (!etm->timeless_decoding)
311                 return -EINVAL;
312
313         ret = cs_etm__update_queues(etm);
314
315         if (ret < 0)
316                 return ret;
317
318         return cs_etm__process_timeless_queues(etm, -1);
319 }
320
321 static void cs_etm__free_queue(void *priv)
322 {
323         struct cs_etm_queue *etmq = priv;
324
325         if (!etmq)
326                 return;
327
328         thread__zput(etmq->thread);
329         cs_etm_decoder__free(etmq->decoder);
330         zfree(&etmq->event_buf);
331         zfree(&etmq->last_branch);
332         zfree(&etmq->last_branch_rb);
333         zfree(&etmq->prev_packet);
334         zfree(&etmq->packet);
335         free(etmq);
336 }
337
338 static void cs_etm__free_events(struct perf_session *session)
339 {
340         unsigned int i;
341         struct cs_etm_auxtrace *aux = container_of(session->auxtrace,
342                                                    struct cs_etm_auxtrace,
343                                                    auxtrace);
344         struct auxtrace_queues *queues = &aux->queues;
345
346         for (i = 0; i < queues->nr_queues; i++) {
347                 cs_etm__free_queue(queues->queue_array[i].priv);
348                 queues->queue_array[i].priv = NULL;
349         }
350
351         auxtrace_queues__free(queues);
352 }
353
354 static void cs_etm__free(struct perf_session *session)
355 {
356         int i;
357         struct int_node *inode, *tmp;
358         struct cs_etm_auxtrace *aux = container_of(session->auxtrace,
359                                                    struct cs_etm_auxtrace,
360                                                    auxtrace);
361         cs_etm__free_events(session);
362         session->auxtrace = NULL;
363
364         /* First remove all traceID/metadata nodes for the RB tree */
365         intlist__for_each_entry_safe(inode, tmp, traceid_list)
366                 intlist__remove(traceid_list, inode);
367         /* Then the RB tree itself */
368         intlist__delete(traceid_list);
369
370         for (i = 0; i < aux->num_cpu; i++)
371                 zfree(&aux->metadata[i]);
372
373         thread__zput(aux->unknown_thread);
374         zfree(&aux->metadata);
375         zfree(&aux);
376 }
377
378 static u8 cs_etm__cpu_mode(struct cs_etm_queue *etmq, u64 address)
379 {
380         struct machine *machine;
381
382         machine = etmq->etm->machine;
383
384         if (address >= etmq->etm->kernel_start) {
385                 if (machine__is_host(machine))
386                         return PERF_RECORD_MISC_KERNEL;
387                 else
388                         return PERF_RECORD_MISC_GUEST_KERNEL;
389         } else {
390                 if (machine__is_host(machine))
391                         return PERF_RECORD_MISC_USER;
392                 else if (perf_guest)
393                         return PERF_RECORD_MISC_GUEST_USER;
394                 else
395                         return PERF_RECORD_MISC_HYPERVISOR;
396         }
397 }
398
399 static u32 cs_etm__mem_access(struct cs_etm_queue *etmq, u64 address,
400                               size_t size, u8 *buffer)
401 {
402         u8  cpumode;
403         u64 offset;
404         int len;
405         struct   thread *thread;
406         struct   machine *machine;
407         struct   addr_location al;
408
409         if (!etmq)
410                 return 0;
411
412         machine = etmq->etm->machine;
413         cpumode = cs_etm__cpu_mode(etmq, address);
414
415         thread = etmq->thread;
416         if (!thread) {
417                 if (cpumode != PERF_RECORD_MISC_KERNEL)
418                         return 0;
419                 thread = etmq->etm->unknown_thread;
420         }
421
422         if (!thread__find_map(thread, cpumode, address, &al) || !al.map->dso)
423                 return 0;
424
425         if (al.map->dso->data.status == DSO_DATA_STATUS_ERROR &&
426             dso__data_status_seen(al.map->dso, DSO_DATA_STATUS_SEEN_ITRACE))
427                 return 0;
428
429         offset = al.map->map_ip(al.map, address);
430
431         map__load(al.map);
432
433         len = dso__data_read_offset(al.map->dso, machine, offset, buffer, size);
434
435         if (len <= 0)
436                 return 0;
437
438         return len;
439 }
440
441 static struct cs_etm_queue *cs_etm__alloc_queue(struct cs_etm_auxtrace *etm)
442 {
443         struct cs_etm_decoder_params d_params;
444         struct cs_etm_trace_params  *t_params = NULL;
445         struct cs_etm_queue *etmq;
446         size_t szp = sizeof(struct cs_etm_packet);
447
448         etmq = zalloc(sizeof(*etmq));
449         if (!etmq)
450                 return NULL;
451
452         etmq->packet = zalloc(szp);
453         if (!etmq->packet)
454                 goto out_free;
455
456         etmq->prev_packet = zalloc(szp);
457         if (!etmq->prev_packet)
458                 goto out_free;
459
460         if (etm->synth_opts.last_branch) {
461                 size_t sz = sizeof(struct branch_stack);
462
463                 sz += etm->synth_opts.last_branch_sz *
464                       sizeof(struct branch_entry);
465                 etmq->last_branch = zalloc(sz);
466                 if (!etmq->last_branch)
467                         goto out_free;
468                 etmq->last_branch_rb = zalloc(sz);
469                 if (!etmq->last_branch_rb)
470                         goto out_free;
471         }
472
473         etmq->event_buf = malloc(PERF_SAMPLE_MAX_SIZE);
474         if (!etmq->event_buf)
475                 goto out_free;
476
477         /* Use metadata to fill in trace parameters for trace decoder */
478         t_params = zalloc(sizeof(*t_params) * etm->num_cpu);
479
480         if (!t_params)
481                 goto out_free;
482
483         if (cs_etm__init_trace_params(t_params, etm))
484                 goto out_free;
485
486         /* Set decoder parameters to decode trace packets */
487         if (cs_etm__init_decoder_params(&d_params, etmq,
488                                         CS_ETM_OPERATION_DECODE))
489                 goto out_free;
490
491         etmq->decoder = cs_etm_decoder__new(etm->num_cpu, &d_params, t_params);
492
493         if (!etmq->decoder)
494                 goto out_free;
495
496         /*
497          * Register a function to handle all memory accesses required by
498          * the trace decoder library.
499          */
500         if (cs_etm_decoder__add_mem_access_cb(etmq->decoder,
501                                               0x0L, ((u64) -1L),
502                                               cs_etm__mem_access))
503                 goto out_free_decoder;
504
505         zfree(&t_params);
506         return etmq;
507
508 out_free_decoder:
509         cs_etm_decoder__free(etmq->decoder);
510 out_free:
511         zfree(&t_params);
512         zfree(&etmq->event_buf);
513         zfree(&etmq->last_branch);
514         zfree(&etmq->last_branch_rb);
515         zfree(&etmq->prev_packet);
516         zfree(&etmq->packet);
517         free(etmq);
518
519         return NULL;
520 }
521
522 static int cs_etm__setup_queue(struct cs_etm_auxtrace *etm,
523                                struct auxtrace_queue *queue,
524                                unsigned int queue_nr)
525 {
526         int ret = 0;
527         struct cs_etm_queue *etmq = queue->priv;
528
529         if (list_empty(&queue->head) || etmq)
530                 goto out;
531
532         etmq = cs_etm__alloc_queue(etm);
533
534         if (!etmq) {
535                 ret = -ENOMEM;
536                 goto out;
537         }
538
539         queue->priv = etmq;
540         etmq->etm = etm;
541         etmq->queue_nr = queue_nr;
542         etmq->cpu = queue->cpu;
543         etmq->tid = queue->tid;
544         etmq->pid = -1;
545         etmq->offset = 0;
546         etmq->period_instructions = 0;
547         cs_etm__clear_packet_queue(&etmq->packet_queue);
548
549 out:
550         return ret;
551 }
552
553 static int cs_etm__setup_queues(struct cs_etm_auxtrace *etm)
554 {
555         unsigned int i;
556         int ret;
557
558         if (!etm->kernel_start)
559                 etm->kernel_start = machine__kernel_start(etm->machine);
560
561         for (i = 0; i < etm->queues.nr_queues; i++) {
562                 ret = cs_etm__setup_queue(etm, &etm->queues.queue_array[i], i);
563                 if (ret)
564                         return ret;
565         }
566
567         return 0;
568 }
569
570 static int cs_etm__update_queues(struct cs_etm_auxtrace *etm)
571 {
572         if (etm->queues.new_data) {
573                 etm->queues.new_data = false;
574                 return cs_etm__setup_queues(etm);
575         }
576
577         return 0;
578 }
579
580 static inline void cs_etm__copy_last_branch_rb(struct cs_etm_queue *etmq)
581 {
582         struct branch_stack *bs_src = etmq->last_branch_rb;
583         struct branch_stack *bs_dst = etmq->last_branch;
584         size_t nr = 0;
585
586         /*
587          * Set the number of records before early exit: ->nr is used to
588          * determine how many branches to copy from ->entries.
589          */
590         bs_dst->nr = bs_src->nr;
591
592         /*
593          * Early exit when there is nothing to copy.
594          */
595         if (!bs_src->nr)
596                 return;
597
598         /*
599          * As bs_src->entries is a circular buffer, we need to copy from it in
600          * two steps.  First, copy the branches from the most recently inserted
601          * branch ->last_branch_pos until the end of bs_src->entries buffer.
602          */
603         nr = etmq->etm->synth_opts.last_branch_sz - etmq->last_branch_pos;
604         memcpy(&bs_dst->entries[0],
605                &bs_src->entries[etmq->last_branch_pos],
606                sizeof(struct branch_entry) * nr);
607
608         /*
609          * If we wrapped around at least once, the branches from the beginning
610          * of the bs_src->entries buffer and until the ->last_branch_pos element
611          * are older valid branches: copy them over.  The total number of
612          * branches copied over will be equal to the number of branches asked by
613          * the user in last_branch_sz.
614          */
615         if (bs_src->nr >= etmq->etm->synth_opts.last_branch_sz) {
616                 memcpy(&bs_dst->entries[nr],
617                        &bs_src->entries[0],
618                        sizeof(struct branch_entry) * etmq->last_branch_pos);
619         }
620 }
621
622 static inline void cs_etm__reset_last_branch_rb(struct cs_etm_queue *etmq)
623 {
624         etmq->last_branch_pos = 0;
625         etmq->last_branch_rb->nr = 0;
626 }
627
628 static inline int cs_etm__t32_instr_size(struct cs_etm_queue *etmq,
629                                          u64 addr) {
630         u8 instrBytes[2];
631
632         cs_etm__mem_access(etmq, addr, ARRAY_SIZE(instrBytes), instrBytes);
633         /*
634          * T32 instruction size is indicated by bits[15:11] of the first
635          * 16-bit word of the instruction: 0b11101, 0b11110 and 0b11111
636          * denote a 32-bit instruction.
637          */
638         return ((instrBytes[1] & 0xF8) >= 0xE8) ? 4 : 2;
639 }
640
641 static inline u64 cs_etm__first_executed_instr(struct cs_etm_packet *packet)
642 {
643         /* Returns 0 for the CS_ETM_DISCONTINUITY packet */
644         if (packet->sample_type == CS_ETM_DISCONTINUITY)
645                 return 0;
646
647         return packet->start_addr;
648 }
649
650 static inline
651 u64 cs_etm__last_executed_instr(const struct cs_etm_packet *packet)
652 {
653         /* Returns 0 for the CS_ETM_DISCONTINUITY packet */
654         if (packet->sample_type == CS_ETM_DISCONTINUITY)
655                 return 0;
656
657         return packet->end_addr - packet->last_instr_size;
658 }
659
660 static inline u64 cs_etm__instr_addr(struct cs_etm_queue *etmq,
661                                      const struct cs_etm_packet *packet,
662                                      u64 offset)
663 {
664         if (packet->isa == CS_ETM_ISA_T32) {
665                 u64 addr = packet->start_addr;
666
667                 while (offset > 0) {
668                         addr += cs_etm__t32_instr_size(etmq, addr);
669                         offset--;
670                 }
671                 return addr;
672         }
673
674         /* Assume a 4 byte instruction size (A32/A64) */
675         return packet->start_addr + offset * 4;
676 }
677
678 static void cs_etm__update_last_branch_rb(struct cs_etm_queue *etmq)
679 {
680         struct branch_stack *bs = etmq->last_branch_rb;
681         struct branch_entry *be;
682
683         /*
684          * The branches are recorded in a circular buffer in reverse
685          * chronological order: we start recording from the last element of the
686          * buffer down.  After writing the first element of the stack, move the
687          * insert position back to the end of the buffer.
688          */
689         if (!etmq->last_branch_pos)
690                 etmq->last_branch_pos = etmq->etm->synth_opts.last_branch_sz;
691
692         etmq->last_branch_pos -= 1;
693
694         be       = &bs->entries[etmq->last_branch_pos];
695         be->from = cs_etm__last_executed_instr(etmq->prev_packet);
696         be->to   = cs_etm__first_executed_instr(etmq->packet);
697         /* No support for mispredict */
698         be->flags.mispred = 0;
699         be->flags.predicted = 1;
700
701         /*
702          * Increment bs->nr until reaching the number of last branches asked by
703          * the user on the command line.
704          */
705         if (bs->nr < etmq->etm->synth_opts.last_branch_sz)
706                 bs->nr += 1;
707 }
708
709 static int cs_etm__inject_event(union perf_event *event,
710                                struct perf_sample *sample, u64 type)
711 {
712         event->header.size = perf_event__sample_event_size(sample, type, 0);
713         return perf_event__synthesize_sample(event, type, 0, sample);
714 }
715
716
717 static int
718 cs_etm__get_trace(struct cs_etm_queue *etmq)
719 {
720         struct auxtrace_buffer *aux_buffer = etmq->buffer;
721         struct auxtrace_buffer *old_buffer = aux_buffer;
722         struct auxtrace_queue *queue;
723
724         queue = &etmq->etm->queues.queue_array[etmq->queue_nr];
725
726         aux_buffer = auxtrace_buffer__next(queue, aux_buffer);
727
728         /* If no more data, drop the previous auxtrace_buffer and return */
729         if (!aux_buffer) {
730                 if (old_buffer)
731                         auxtrace_buffer__drop_data(old_buffer);
732                 etmq->buf_len = 0;
733                 return 0;
734         }
735
736         etmq->buffer = aux_buffer;
737
738         /* If the aux_buffer doesn't have data associated, try to load it */
739         if (!aux_buffer->data) {
740                 /* get the file desc associated with the perf data file */
741                 int fd = perf_data__fd(etmq->etm->session->data);
742
743                 aux_buffer->data = auxtrace_buffer__get_data(aux_buffer, fd);
744                 if (!aux_buffer->data)
745                         return -ENOMEM;
746         }
747
748         /* If valid, drop the previous buffer */
749         if (old_buffer)
750                 auxtrace_buffer__drop_data(old_buffer);
751
752         etmq->buf_used = 0;
753         etmq->buf_len = aux_buffer->size;
754         etmq->buf = aux_buffer->data;
755
756         return etmq->buf_len;
757 }
758
759 static void cs_etm__set_pid_tid_cpu(struct cs_etm_auxtrace *etm,
760                                     struct auxtrace_queue *queue)
761 {
762         struct cs_etm_queue *etmq = queue->priv;
763
764         /* CPU-wide tracing isn't supported yet */
765         if (queue->tid == -1)
766                 return;
767
768         if ((!etmq->thread) && (etmq->tid != -1))
769                 etmq->thread = machine__find_thread(etm->machine, -1,
770                                                     etmq->tid);
771
772         if (etmq->thread) {
773                 etmq->pid = etmq->thread->pid_;
774                 if (queue->cpu == -1)
775                         etmq->cpu = etmq->thread->cpu;
776         }
777 }
778
779 static int cs_etm__synth_instruction_sample(struct cs_etm_queue *etmq,
780                                             u64 addr, u64 period)
781 {
782         int ret = 0;
783         struct cs_etm_auxtrace *etm = etmq->etm;
784         union perf_event *event = etmq->event_buf;
785         struct perf_sample sample = {.ip = 0,};
786
787         event->sample.header.type = PERF_RECORD_SAMPLE;
788         event->sample.header.misc = cs_etm__cpu_mode(etmq, addr);
789         event->sample.header.size = sizeof(struct perf_event_header);
790
791         sample.ip = addr;
792         sample.pid = etmq->pid;
793         sample.tid = etmq->tid;
794         sample.id = etmq->etm->instructions_id;
795         sample.stream_id = etmq->etm->instructions_id;
796         sample.period = period;
797         sample.cpu = etmq->packet->cpu;
798         sample.flags = etmq->prev_packet->flags;
799         sample.insn_len = 1;
800         sample.cpumode = event->sample.header.misc;
801
802         if (etm->synth_opts.last_branch) {
803                 cs_etm__copy_last_branch_rb(etmq);
804                 sample.branch_stack = etmq->last_branch;
805         }
806
807         if (etm->synth_opts.inject) {
808                 ret = cs_etm__inject_event(event, &sample,
809                                            etm->instructions_sample_type);
810                 if (ret)
811                         return ret;
812         }
813
814         ret = perf_session__deliver_synth_event(etm->session, event, &sample);
815
816         if (ret)
817                 pr_err(
818                         "CS ETM Trace: failed to deliver instruction event, error %d\n",
819                         ret);
820
821         if (etm->synth_opts.last_branch)
822                 cs_etm__reset_last_branch_rb(etmq);
823
824         return ret;
825 }
826
827 /*
828  * The cs etm packet encodes an instruction range between a branch target
829  * and the next taken branch. Generate sample accordingly.
830  */
831 static int cs_etm__synth_branch_sample(struct cs_etm_queue *etmq)
832 {
833         int ret = 0;
834         struct cs_etm_auxtrace *etm = etmq->etm;
835         struct perf_sample sample = {.ip = 0,};
836         union perf_event *event = etmq->event_buf;
837         struct dummy_branch_stack {
838                 u64                     nr;
839                 struct branch_entry     entries;
840         } dummy_bs;
841         u64 ip;
842
843         ip = cs_etm__last_executed_instr(etmq->prev_packet);
844
845         event->sample.header.type = PERF_RECORD_SAMPLE;
846         event->sample.header.misc = cs_etm__cpu_mode(etmq, ip);
847         event->sample.header.size = sizeof(struct perf_event_header);
848
849         sample.ip = ip;
850         sample.pid = etmq->pid;
851         sample.tid = etmq->tid;
852         sample.addr = cs_etm__first_executed_instr(etmq->packet);
853         sample.id = etmq->etm->branches_id;
854         sample.stream_id = etmq->etm->branches_id;
855         sample.period = 1;
856         sample.cpu = etmq->packet->cpu;
857         sample.flags = etmq->prev_packet->flags;
858         sample.cpumode = event->sample.header.misc;
859
860         /*
861          * perf report cannot handle events without a branch stack
862          */
863         if (etm->synth_opts.last_branch) {
864                 dummy_bs = (struct dummy_branch_stack){
865                         .nr = 1,
866                         .entries = {
867                                 .from = sample.ip,
868                                 .to = sample.addr,
869                         },
870                 };
871                 sample.branch_stack = (struct branch_stack *)&dummy_bs;
872         }
873
874         if (etm->synth_opts.inject) {
875                 ret = cs_etm__inject_event(event, &sample,
876                                            etm->branches_sample_type);
877                 if (ret)
878                         return ret;
879         }
880
881         ret = perf_session__deliver_synth_event(etm->session, event, &sample);
882
883         if (ret)
884                 pr_err(
885                 "CS ETM Trace: failed to deliver instruction event, error %d\n",
886                 ret);
887
888         return ret;
889 }
890
891 struct cs_etm_synth {
892         struct perf_tool dummy_tool;
893         struct perf_session *session;
894 };
895
896 static int cs_etm__event_synth(struct perf_tool *tool,
897                                union perf_event *event,
898                                struct perf_sample *sample __maybe_unused,
899                                struct machine *machine __maybe_unused)
900 {
901         struct cs_etm_synth *cs_etm_synth =
902                       container_of(tool, struct cs_etm_synth, dummy_tool);
903
904         return perf_session__deliver_synth_event(cs_etm_synth->session,
905                                                  event, NULL);
906 }
907
908 static int cs_etm__synth_event(struct perf_session *session,
909                                struct perf_event_attr *attr, u64 id)
910 {
911         struct cs_etm_synth cs_etm_synth;
912
913         memset(&cs_etm_synth, 0, sizeof(struct cs_etm_synth));
914         cs_etm_synth.session = session;
915
916         return perf_event__synthesize_attr(&cs_etm_synth.dummy_tool, attr, 1,
917                                            &id, cs_etm__event_synth);
918 }
919
920 static int cs_etm__synth_events(struct cs_etm_auxtrace *etm,
921                                 struct perf_session *session)
922 {
923         struct perf_evlist *evlist = session->evlist;
924         struct perf_evsel *evsel;
925         struct perf_event_attr attr;
926         bool found = false;
927         u64 id;
928         int err;
929
930         evlist__for_each_entry(evlist, evsel) {
931                 if (evsel->attr.type == etm->pmu_type) {
932                         found = true;
933                         break;
934                 }
935         }
936
937         if (!found) {
938                 pr_debug("No selected events with CoreSight Trace data\n");
939                 return 0;
940         }
941
942         memset(&attr, 0, sizeof(struct perf_event_attr));
943         attr.size = sizeof(struct perf_event_attr);
944         attr.type = PERF_TYPE_HARDWARE;
945         attr.sample_type = evsel->attr.sample_type & PERF_SAMPLE_MASK;
946         attr.sample_type |= PERF_SAMPLE_IP | PERF_SAMPLE_TID |
947                             PERF_SAMPLE_PERIOD;
948         if (etm->timeless_decoding)
949                 attr.sample_type &= ~(u64)PERF_SAMPLE_TIME;
950         else
951                 attr.sample_type |= PERF_SAMPLE_TIME;
952
953         attr.exclude_user = evsel->attr.exclude_user;
954         attr.exclude_kernel = evsel->attr.exclude_kernel;
955         attr.exclude_hv = evsel->attr.exclude_hv;
956         attr.exclude_host = evsel->attr.exclude_host;
957         attr.exclude_guest = evsel->attr.exclude_guest;
958         attr.sample_id_all = evsel->attr.sample_id_all;
959         attr.read_format = evsel->attr.read_format;
960
961         /* create new id val to be a fixed offset from evsel id */
962         id = evsel->id[0] + 1000000000;
963
964         if (!id)
965                 id = 1;
966
967         if (etm->synth_opts.branches) {
968                 attr.config = PERF_COUNT_HW_BRANCH_INSTRUCTIONS;
969                 attr.sample_period = 1;
970                 attr.sample_type |= PERF_SAMPLE_ADDR;
971                 err = cs_etm__synth_event(session, &attr, id);
972                 if (err)
973                         return err;
974                 etm->sample_branches = true;
975                 etm->branches_sample_type = attr.sample_type;
976                 etm->branches_id = id;
977                 id += 1;
978                 attr.sample_type &= ~(u64)PERF_SAMPLE_ADDR;
979         }
980
981         if (etm->synth_opts.last_branch)
982                 attr.sample_type |= PERF_SAMPLE_BRANCH_STACK;
983
984         if (etm->synth_opts.instructions) {
985                 attr.config = PERF_COUNT_HW_INSTRUCTIONS;
986                 attr.sample_period = etm->synth_opts.period;
987                 etm->instructions_sample_period = attr.sample_period;
988                 err = cs_etm__synth_event(session, &attr, id);
989                 if (err)
990                         return err;
991                 etm->sample_instructions = true;
992                 etm->instructions_sample_type = attr.sample_type;
993                 etm->instructions_id = id;
994                 id += 1;
995         }
996
997         return 0;
998 }
999
1000 static int cs_etm__sample(struct cs_etm_queue *etmq)
1001 {
1002         struct cs_etm_auxtrace *etm = etmq->etm;
1003         struct cs_etm_packet *tmp;
1004         int ret;
1005         u64 instrs_executed = etmq->packet->instr_count;
1006
1007         etmq->period_instructions += instrs_executed;
1008
1009         /*
1010          * Record a branch when the last instruction in
1011          * PREV_PACKET is a branch.
1012          */
1013         if (etm->synth_opts.last_branch &&
1014             etmq->prev_packet->sample_type == CS_ETM_RANGE &&
1015             etmq->prev_packet->last_instr_taken_branch)
1016                 cs_etm__update_last_branch_rb(etmq);
1017
1018         if (etm->sample_instructions &&
1019             etmq->period_instructions >= etm->instructions_sample_period) {
1020                 /*
1021                  * Emit instruction sample periodically
1022                  * TODO: allow period to be defined in cycles and clock time
1023                  */
1024
1025                 /* Get number of instructions executed after the sample point */
1026                 u64 instrs_over = etmq->period_instructions -
1027                         etm->instructions_sample_period;
1028
1029                 /*
1030                  * Calculate the address of the sampled instruction (-1 as
1031                  * sample is reported as though instruction has just been
1032                  * executed, but PC has not advanced to next instruction)
1033                  */
1034                 u64 offset = (instrs_executed - instrs_over - 1);
1035                 u64 addr = cs_etm__instr_addr(etmq, etmq->packet, offset);
1036
1037                 ret = cs_etm__synth_instruction_sample(
1038                         etmq, addr, etm->instructions_sample_period);
1039                 if (ret)
1040                         return ret;
1041
1042                 /* Carry remaining instructions into next sample period */
1043                 etmq->period_instructions = instrs_over;
1044         }
1045
1046         if (etm->sample_branches) {
1047                 bool generate_sample = false;
1048
1049                 /* Generate sample for tracing on packet */
1050                 if (etmq->prev_packet->sample_type == CS_ETM_DISCONTINUITY)
1051                         generate_sample = true;
1052
1053                 /* Generate sample for branch taken packet */
1054                 if (etmq->prev_packet->sample_type == CS_ETM_RANGE &&
1055                     etmq->prev_packet->last_instr_taken_branch)
1056                         generate_sample = true;
1057
1058                 if (generate_sample) {
1059                         ret = cs_etm__synth_branch_sample(etmq);
1060                         if (ret)
1061                                 return ret;
1062                 }
1063         }
1064
1065         if (etm->sample_branches || etm->synth_opts.last_branch) {
1066                 /*
1067                  * Swap PACKET with PREV_PACKET: PACKET becomes PREV_PACKET for
1068                  * the next incoming packet.
1069                  */
1070                 tmp = etmq->packet;
1071                 etmq->packet = etmq->prev_packet;
1072                 etmq->prev_packet = tmp;
1073         }
1074
1075         return 0;
1076 }
1077
1078 static int cs_etm__exception(struct cs_etm_queue *etmq)
1079 {
1080         /*
1081          * When the exception packet is inserted, whether the last instruction
1082          * in previous range packet is taken branch or not, we need to force
1083          * to set 'prev_packet->last_instr_taken_branch' to true.  This ensures
1084          * to generate branch sample for the instruction range before the
1085          * exception is trapped to kernel or before the exception returning.
1086          *
1087          * The exception packet includes the dummy address values, so don't
1088          * swap PACKET with PREV_PACKET.  This keeps PREV_PACKET to be useful
1089          * for generating instruction and branch samples.
1090          */
1091         if (etmq->prev_packet->sample_type == CS_ETM_RANGE)
1092                 etmq->prev_packet->last_instr_taken_branch = true;
1093
1094         return 0;
1095 }
1096
1097 static int cs_etm__flush(struct cs_etm_queue *etmq)
1098 {
1099         int err = 0;
1100         struct cs_etm_auxtrace *etm = etmq->etm;
1101         struct cs_etm_packet *tmp;
1102
1103         /* Handle start tracing packet */
1104         if (etmq->prev_packet->sample_type == CS_ETM_EMPTY)
1105                 goto swap_packet;
1106
1107         if (etmq->etm->synth_opts.last_branch &&
1108             etmq->prev_packet->sample_type == CS_ETM_RANGE) {
1109                 /*
1110                  * Generate a last branch event for the branches left in the
1111                  * circular buffer at the end of the trace.
1112                  *
1113                  * Use the address of the end of the last reported execution
1114                  * range
1115                  */
1116                 u64 addr = cs_etm__last_executed_instr(etmq->prev_packet);
1117
1118                 err = cs_etm__synth_instruction_sample(
1119                         etmq, addr,
1120                         etmq->period_instructions);
1121                 if (err)
1122                         return err;
1123
1124                 etmq->period_instructions = 0;
1125
1126         }
1127
1128         if (etm->sample_branches &&
1129             etmq->prev_packet->sample_type == CS_ETM_RANGE) {
1130                 err = cs_etm__synth_branch_sample(etmq);
1131                 if (err)
1132                         return err;
1133         }
1134
1135 swap_packet:
1136         if (etm->sample_branches || etm->synth_opts.last_branch) {
1137                 /*
1138                  * Swap PACKET with PREV_PACKET: PACKET becomes PREV_PACKET for
1139                  * the next incoming packet.
1140                  */
1141                 tmp = etmq->packet;
1142                 etmq->packet = etmq->prev_packet;
1143                 etmq->prev_packet = tmp;
1144         }
1145
1146         return err;
1147 }
1148
1149 static int cs_etm__end_block(struct cs_etm_queue *etmq)
1150 {
1151         int err;
1152
1153         /*
1154          * It has no new packet coming and 'etmq->packet' contains the stale
1155          * packet which was set at the previous time with packets swapping;
1156          * so skip to generate branch sample to avoid stale packet.
1157          *
1158          * For this case only flush branch stack and generate a last branch
1159          * event for the branches left in the circular buffer at the end of
1160          * the trace.
1161          */
1162         if (etmq->etm->synth_opts.last_branch &&
1163             etmq->prev_packet->sample_type == CS_ETM_RANGE) {
1164                 /*
1165                  * Use the address of the end of the last reported execution
1166                  * range.
1167                  */
1168                 u64 addr = cs_etm__last_executed_instr(etmq->prev_packet);
1169
1170                 err = cs_etm__synth_instruction_sample(
1171                         etmq, addr,
1172                         etmq->period_instructions);
1173                 if (err)
1174                         return err;
1175
1176                 etmq->period_instructions = 0;
1177         }
1178
1179         return 0;
1180 }
1181 /*
1182  * cs_etm__get_data_block: Fetch a block from the auxtrace_buffer queue
1183  *                         if need be.
1184  * Returns:     < 0     if error
1185  *              = 0     if no more auxtrace_buffer to read
1186  *              > 0     if the current buffer isn't empty yet
1187  */
1188 static int cs_etm__get_data_block(struct cs_etm_queue *etmq)
1189 {
1190         int ret;
1191
1192         if (!etmq->buf_len) {
1193                 ret = cs_etm__get_trace(etmq);
1194                 if (ret <= 0)
1195                         return ret;
1196                 /*
1197                  * We cannot assume consecutive blocks in the data file
1198                  * are contiguous, reset the decoder to force re-sync.
1199                  */
1200                 ret = cs_etm_decoder__reset(etmq->decoder);
1201                 if (ret)
1202                         return ret;
1203         }
1204
1205         return etmq->buf_len;
1206 }
1207
1208 static bool cs_etm__is_svc_instr(struct cs_etm_queue *etmq,
1209                                  struct cs_etm_packet *packet,
1210                                  u64 end_addr)
1211 {
1212         u16 instr16;
1213         u32 instr32;
1214         u64 addr;
1215
1216         switch (packet->isa) {
1217         case CS_ETM_ISA_T32:
1218                 /*
1219                  * The SVC of T32 is defined in ARM DDI 0487D.a, F5.1.247:
1220                  *
1221                  *  b'15         b'8
1222                  * +-----------------+--------+
1223                  * | 1 1 0 1 1 1 1 1 |  imm8  |
1224                  * +-----------------+--------+
1225                  *
1226                  * According to the specifiction, it only defines SVC for T32
1227                  * with 16 bits instruction and has no definition for 32bits;
1228                  * so below only read 2 bytes as instruction size for T32.
1229                  */
1230                 addr = end_addr - 2;
1231                 cs_etm__mem_access(etmq, addr, sizeof(instr16), (u8 *)&instr16);
1232                 if ((instr16 & 0xFF00) == 0xDF00)
1233                         return true;
1234
1235                 break;
1236         case CS_ETM_ISA_A32:
1237                 /*
1238                  * The SVC of A32 is defined in ARM DDI 0487D.a, F5.1.247:
1239                  *
1240                  *  b'31 b'28 b'27 b'24
1241                  * +---------+---------+-------------------------+
1242                  * |  !1111  | 1 1 1 1 |        imm24            |
1243                  * +---------+---------+-------------------------+
1244                  */
1245                 addr = end_addr - 4;
1246                 cs_etm__mem_access(etmq, addr, sizeof(instr32), (u8 *)&instr32);
1247                 if ((instr32 & 0x0F000000) == 0x0F000000 &&
1248                     (instr32 & 0xF0000000) != 0xF0000000)
1249                         return true;
1250
1251                 break;
1252         case CS_ETM_ISA_A64:
1253                 /*
1254                  * The SVC of A64 is defined in ARM DDI 0487D.a, C6.2.294:
1255                  *
1256                  *  b'31               b'21           b'4     b'0
1257                  * +-----------------------+---------+-----------+
1258                  * | 1 1 0 1 0 1 0 0 0 0 0 |  imm16  | 0 0 0 0 1 |
1259                  * +-----------------------+---------+-----------+
1260                  */
1261                 addr = end_addr - 4;
1262                 cs_etm__mem_access(etmq, addr, sizeof(instr32), (u8 *)&instr32);
1263                 if ((instr32 & 0xFFE0001F) == 0xd4000001)
1264                         return true;
1265
1266                 break;
1267         case CS_ETM_ISA_UNKNOWN:
1268         default:
1269                 break;
1270         }
1271
1272         return false;
1273 }
1274
1275 static bool cs_etm__is_syscall(struct cs_etm_queue *etmq, u64 magic)
1276 {
1277         struct cs_etm_packet *packet = etmq->packet;
1278         struct cs_etm_packet *prev_packet = etmq->prev_packet;
1279
1280         if (magic == __perf_cs_etmv3_magic)
1281                 if (packet->exception_number == CS_ETMV3_EXC_SVC)
1282                         return true;
1283
1284         /*
1285          * ETMv4 exception type CS_ETMV4_EXC_CALL covers SVC, SMC and
1286          * HVC cases; need to check if it's SVC instruction based on
1287          * packet address.
1288          */
1289         if (magic == __perf_cs_etmv4_magic) {
1290                 if (packet->exception_number == CS_ETMV4_EXC_CALL &&
1291                     cs_etm__is_svc_instr(etmq, prev_packet,
1292                                          prev_packet->end_addr))
1293                         return true;
1294         }
1295
1296         return false;
1297 }
1298
1299 static bool cs_etm__is_async_exception(struct cs_etm_queue *etmq, u64 magic)
1300 {
1301         struct cs_etm_packet *packet = etmq->packet;
1302
1303         if (magic == __perf_cs_etmv3_magic)
1304                 if (packet->exception_number == CS_ETMV3_EXC_DEBUG_HALT ||
1305                     packet->exception_number == CS_ETMV3_EXC_ASYNC_DATA_ABORT ||
1306                     packet->exception_number == CS_ETMV3_EXC_PE_RESET ||
1307                     packet->exception_number == CS_ETMV3_EXC_IRQ ||
1308                     packet->exception_number == CS_ETMV3_EXC_FIQ)
1309                         return true;
1310
1311         if (magic == __perf_cs_etmv4_magic)
1312                 if (packet->exception_number == CS_ETMV4_EXC_RESET ||
1313                     packet->exception_number == CS_ETMV4_EXC_DEBUG_HALT ||
1314                     packet->exception_number == CS_ETMV4_EXC_SYSTEM_ERROR ||
1315                     packet->exception_number == CS_ETMV4_EXC_INST_DEBUG ||
1316                     packet->exception_number == CS_ETMV4_EXC_DATA_DEBUG ||
1317                     packet->exception_number == CS_ETMV4_EXC_IRQ ||
1318                     packet->exception_number == CS_ETMV4_EXC_FIQ)
1319                         return true;
1320
1321         return false;
1322 }
1323
1324 static bool cs_etm__is_sync_exception(struct cs_etm_queue *etmq, u64 magic)
1325 {
1326         struct cs_etm_packet *packet = etmq->packet;
1327         struct cs_etm_packet *prev_packet = etmq->prev_packet;
1328
1329         if (magic == __perf_cs_etmv3_magic)
1330                 if (packet->exception_number == CS_ETMV3_EXC_SMC ||
1331                     packet->exception_number == CS_ETMV3_EXC_HYP ||
1332                     packet->exception_number == CS_ETMV3_EXC_JAZELLE_THUMBEE ||
1333                     packet->exception_number == CS_ETMV3_EXC_UNDEFINED_INSTR ||
1334                     packet->exception_number == CS_ETMV3_EXC_PREFETCH_ABORT ||
1335                     packet->exception_number == CS_ETMV3_EXC_DATA_FAULT ||
1336                     packet->exception_number == CS_ETMV3_EXC_GENERIC)
1337                         return true;
1338
1339         if (magic == __perf_cs_etmv4_magic) {
1340                 if (packet->exception_number == CS_ETMV4_EXC_TRAP ||
1341                     packet->exception_number == CS_ETMV4_EXC_ALIGNMENT ||
1342                     packet->exception_number == CS_ETMV4_EXC_INST_FAULT ||
1343                     packet->exception_number == CS_ETMV4_EXC_DATA_FAULT)
1344                         return true;
1345
1346                 /*
1347                  * For CS_ETMV4_EXC_CALL, except SVC other instructions
1348                  * (SMC, HVC) are taken as sync exceptions.
1349                  */
1350                 if (packet->exception_number == CS_ETMV4_EXC_CALL &&
1351                     !cs_etm__is_svc_instr(etmq, prev_packet,
1352                                           prev_packet->end_addr))
1353                         return true;
1354
1355                 /*
1356                  * ETMv4 has 5 bits for exception number; if the numbers
1357                  * are in the range ( CS_ETMV4_EXC_FIQ, CS_ETMV4_EXC_END ]
1358                  * they are implementation defined exceptions.
1359                  *
1360                  * For this case, simply take it as sync exception.
1361                  */
1362                 if (packet->exception_number > CS_ETMV4_EXC_FIQ &&
1363                     packet->exception_number <= CS_ETMV4_EXC_END)
1364                         return true;
1365         }
1366
1367         return false;
1368 }
1369
1370 static int cs_etm__set_sample_flags(struct cs_etm_queue *etmq)
1371 {
1372         struct cs_etm_packet *packet = etmq->packet;
1373         struct cs_etm_packet *prev_packet = etmq->prev_packet;
1374         u64 magic;
1375         int ret;
1376
1377         switch (packet->sample_type) {
1378         case CS_ETM_RANGE:
1379                 /*
1380                  * Immediate branch instruction without neither link nor
1381                  * return flag, it's normal branch instruction within
1382                  * the function.
1383                  */
1384                 if (packet->last_instr_type == OCSD_INSTR_BR &&
1385                     packet->last_instr_subtype == OCSD_S_INSTR_NONE) {
1386                         packet->flags = PERF_IP_FLAG_BRANCH;
1387
1388                         if (packet->last_instr_cond)
1389                                 packet->flags |= PERF_IP_FLAG_CONDITIONAL;
1390                 }
1391
1392                 /*
1393                  * Immediate branch instruction with link (e.g. BL), this is
1394                  * branch instruction for function call.
1395                  */
1396                 if (packet->last_instr_type == OCSD_INSTR_BR &&
1397                     packet->last_instr_subtype == OCSD_S_INSTR_BR_LINK)
1398                         packet->flags = PERF_IP_FLAG_BRANCH |
1399                                         PERF_IP_FLAG_CALL;
1400
1401                 /*
1402                  * Indirect branch instruction with link (e.g. BLR), this is
1403                  * branch instruction for function call.
1404                  */
1405                 if (packet->last_instr_type == OCSD_INSTR_BR_INDIRECT &&
1406                     packet->last_instr_subtype == OCSD_S_INSTR_BR_LINK)
1407                         packet->flags = PERF_IP_FLAG_BRANCH |
1408                                         PERF_IP_FLAG_CALL;
1409
1410                 /*
1411                  * Indirect branch instruction with subtype of
1412                  * OCSD_S_INSTR_V7_IMPLIED_RET, this is explicit hint for
1413                  * function return for A32/T32.
1414                  */
1415                 if (packet->last_instr_type == OCSD_INSTR_BR_INDIRECT &&
1416                     packet->last_instr_subtype == OCSD_S_INSTR_V7_IMPLIED_RET)
1417                         packet->flags = PERF_IP_FLAG_BRANCH |
1418                                         PERF_IP_FLAG_RETURN;
1419
1420                 /*
1421                  * Indirect branch instruction without link (e.g. BR), usually
1422                  * this is used for function return, especially for functions
1423                  * within dynamic link lib.
1424                  */
1425                 if (packet->last_instr_type == OCSD_INSTR_BR_INDIRECT &&
1426                     packet->last_instr_subtype == OCSD_S_INSTR_NONE)
1427                         packet->flags = PERF_IP_FLAG_BRANCH |
1428                                         PERF_IP_FLAG_RETURN;
1429
1430                 /* Return instruction for function return. */
1431                 if (packet->last_instr_type == OCSD_INSTR_BR_INDIRECT &&
1432                     packet->last_instr_subtype == OCSD_S_INSTR_V8_RET)
1433                         packet->flags = PERF_IP_FLAG_BRANCH |
1434                                         PERF_IP_FLAG_RETURN;
1435
1436                 /*
1437                  * Decoder might insert a discontinuity in the middle of
1438                  * instruction packets, fixup prev_packet with flag
1439                  * PERF_IP_FLAG_TRACE_BEGIN to indicate restarting trace.
1440                  */
1441                 if (prev_packet->sample_type == CS_ETM_DISCONTINUITY)
1442                         prev_packet->flags |= PERF_IP_FLAG_BRANCH |
1443                                               PERF_IP_FLAG_TRACE_BEGIN;
1444
1445                 /*
1446                  * If the previous packet is an exception return packet
1447                  * and the return address just follows SVC instuction,
1448                  * it needs to calibrate the previous packet sample flags
1449                  * as PERF_IP_FLAG_SYSCALLRET.
1450                  */
1451                 if (prev_packet->flags == (PERF_IP_FLAG_BRANCH |
1452                                            PERF_IP_FLAG_RETURN |
1453                                            PERF_IP_FLAG_INTERRUPT) &&
1454                     cs_etm__is_svc_instr(etmq, packet, packet->start_addr))
1455                         prev_packet->flags = PERF_IP_FLAG_BRANCH |
1456                                              PERF_IP_FLAG_RETURN |
1457                                              PERF_IP_FLAG_SYSCALLRET;
1458                 break;
1459         case CS_ETM_DISCONTINUITY:
1460                 /*
1461                  * The trace is discontinuous, if the previous packet is
1462                  * instruction packet, set flag PERF_IP_FLAG_TRACE_END
1463                  * for previous packet.
1464                  */
1465                 if (prev_packet->sample_type == CS_ETM_RANGE)
1466                         prev_packet->flags |= PERF_IP_FLAG_BRANCH |
1467                                               PERF_IP_FLAG_TRACE_END;
1468                 break;
1469         case CS_ETM_EXCEPTION:
1470                 ret = cs_etm__get_magic(packet->trace_chan_id, &magic);
1471                 if (ret)
1472                         return ret;
1473
1474                 /* The exception is for system call. */
1475                 if (cs_etm__is_syscall(etmq, magic))
1476                         packet->flags = PERF_IP_FLAG_BRANCH |
1477                                         PERF_IP_FLAG_CALL |
1478                                         PERF_IP_FLAG_SYSCALLRET;
1479                 /*
1480                  * The exceptions are triggered by external signals from bus,
1481                  * interrupt controller, debug module, PE reset or halt.
1482                  */
1483                 else if (cs_etm__is_async_exception(etmq, magic))
1484                         packet->flags = PERF_IP_FLAG_BRANCH |
1485                                         PERF_IP_FLAG_CALL |
1486                                         PERF_IP_FLAG_ASYNC |
1487                                         PERF_IP_FLAG_INTERRUPT;
1488                 /*
1489                  * Otherwise, exception is caused by trap, instruction &
1490                  * data fault, or alignment errors.
1491                  */
1492                 else if (cs_etm__is_sync_exception(etmq, magic))
1493                         packet->flags = PERF_IP_FLAG_BRANCH |
1494                                         PERF_IP_FLAG_CALL |
1495                                         PERF_IP_FLAG_INTERRUPT;
1496
1497                 /*
1498                  * When the exception packet is inserted, since exception
1499                  * packet is not used standalone for generating samples
1500                  * and it's affiliation to the previous instruction range
1501                  * packet; so set previous range packet flags to tell perf
1502                  * it is an exception taken branch.
1503                  */
1504                 if (prev_packet->sample_type == CS_ETM_RANGE)
1505                         prev_packet->flags = packet->flags;
1506                 break;
1507         case CS_ETM_EXCEPTION_RET:
1508                 /*
1509                  * When the exception return packet is inserted, since
1510                  * exception return packet is not used standalone for
1511                  * generating samples and it's affiliation to the previous
1512                  * instruction range packet; so set previous range packet
1513                  * flags to tell perf it is an exception return branch.
1514                  *
1515                  * The exception return can be for either system call or
1516                  * other exception types; unfortunately the packet doesn't
1517                  * contain exception type related info so we cannot decide
1518                  * the exception type purely based on exception return packet.
1519                  * If we record the exception number from exception packet and
1520                  * reuse it for excpetion return packet, this is not reliable
1521                  * due the trace can be discontinuity or the interrupt can
1522                  * be nested, thus the recorded exception number cannot be
1523                  * used for exception return packet for these two cases.
1524                  *
1525                  * For exception return packet, we only need to distinguish the
1526                  * packet is for system call or for other types.  Thus the
1527                  * decision can be deferred when receive the next packet which
1528                  * contains the return address, based on the return address we
1529                  * can read out the previous instruction and check if it's a
1530                  * system call instruction and then calibrate the sample flag
1531                  * as needed.
1532                  */
1533                 if (prev_packet->sample_type == CS_ETM_RANGE)
1534                         prev_packet->flags = PERF_IP_FLAG_BRANCH |
1535                                              PERF_IP_FLAG_RETURN |
1536                                              PERF_IP_FLAG_INTERRUPT;
1537                 break;
1538         case CS_ETM_EMPTY:
1539         default:
1540                 break;
1541         }
1542
1543         return 0;
1544 }
1545
1546 static int cs_etm__decode_data_block(struct cs_etm_queue *etmq)
1547 {
1548         int ret = 0;
1549         size_t processed = 0;
1550
1551         /*
1552          * Packets are decoded and added to the decoder's packet queue
1553          * until the decoder packet processing callback has requested that
1554          * processing stops or there is nothing left in the buffer.  Normal
1555          * operations that stop processing are a timestamp packet or a full
1556          * decoder buffer queue.
1557          */
1558         ret = cs_etm_decoder__process_data_block(etmq->decoder,
1559                                                  etmq->offset,
1560                                                  &etmq->buf[etmq->buf_used],
1561                                                  etmq->buf_len,
1562                                                  &processed);
1563         if (ret)
1564                 goto out;
1565
1566         etmq->offset += processed;
1567         etmq->buf_used += processed;
1568         etmq->buf_len -= processed;
1569
1570 out:
1571         return ret;
1572 }
1573
1574 static int cs_etm__process_decoder_queue(struct cs_etm_queue *etmq)
1575 {
1576         int ret;
1577         struct cs_etm_packet_queue *packet_queue;
1578
1579         packet_queue = cs_etm__etmq_get_packet_queue(etmq);
1580
1581                 /* Process each packet in this chunk */
1582                 while (1) {
1583                         ret = cs_etm_decoder__get_packet(packet_queue,
1584                                                          etmq->packet);
1585                         if (ret <= 0)
1586                                 /*
1587                                  * Stop processing this chunk on
1588                                  * end of data or error
1589                                  */
1590                                 break;
1591
1592                         /*
1593                          * Since packet addresses are swapped in packet
1594                          * handling within below switch() statements,
1595                          * thus setting sample flags must be called
1596                          * prior to switch() statement to use address
1597                          * information before packets swapping.
1598                          */
1599                         ret = cs_etm__set_sample_flags(etmq);
1600                         if (ret < 0)
1601                                 break;
1602
1603                         switch (etmq->packet->sample_type) {
1604                         case CS_ETM_RANGE:
1605                                 /*
1606                                  * If the packet contains an instruction
1607                                  * range, generate instruction sequence
1608                                  * events.
1609                                  */
1610                                 cs_etm__sample(etmq);
1611                                 break;
1612                         case CS_ETM_EXCEPTION:
1613                         case CS_ETM_EXCEPTION_RET:
1614                                 /*
1615                                  * If the exception packet is coming,
1616                                  * make sure the previous instruction
1617                                  * range packet to be handled properly.
1618                                  */
1619                                 cs_etm__exception(etmq);
1620                                 break;
1621                         case CS_ETM_DISCONTINUITY:
1622                                 /*
1623                                  * Discontinuity in trace, flush
1624                                  * previous branch stack
1625                                  */
1626                                 cs_etm__flush(etmq);
1627                                 break;
1628                         case CS_ETM_EMPTY:
1629                                 /*
1630                                  * Should not receive empty packet,
1631                                  * report error.
1632                                  */
1633                                 pr_err("CS ETM Trace: empty packet\n");
1634                                 return -EINVAL;
1635                         default:
1636                                 break;
1637                         }
1638                 }
1639
1640         return ret;
1641 }
1642
1643 static int cs_etm__run_decoder(struct cs_etm_queue *etmq)
1644 {
1645         int err = 0;
1646
1647         /* Go through each buffer in the queue and decode them one by one */
1648         while (1) {
1649                 err = cs_etm__get_data_block(etmq);
1650                 if (err <= 0)
1651                         return err;
1652
1653                 /* Run trace decoder until buffer consumed or end of trace */
1654                 do {
1655                         err = cs_etm__decode_data_block(etmq);
1656                         if (err)
1657                                 return err;
1658
1659                         /*
1660                          * Process each packet in this chunk, nothing to do if
1661                          * an error occurs other than hoping the next one will
1662                          * be better.
1663                          */
1664                         err = cs_etm__process_decoder_queue(etmq);
1665
1666                 } while (etmq->buf_len);
1667
1668                 if (err == 0)
1669                         /* Flush any remaining branch stack entries */
1670                         err = cs_etm__end_block(etmq);
1671         }
1672
1673         return err;
1674 }
1675
1676 static int cs_etm__process_timeless_queues(struct cs_etm_auxtrace *etm,
1677                                            pid_t tid)
1678 {
1679         unsigned int i;
1680         struct auxtrace_queues *queues = &etm->queues;
1681
1682         for (i = 0; i < queues->nr_queues; i++) {
1683                 struct auxtrace_queue *queue = &etm->queues.queue_array[i];
1684                 struct cs_etm_queue *etmq = queue->priv;
1685
1686                 if (etmq && ((tid == -1) || (etmq->tid == tid))) {
1687                         cs_etm__set_pid_tid_cpu(etm, queue);
1688                         cs_etm__run_decoder(etmq);
1689                 }
1690         }
1691
1692         return 0;
1693 }
1694
1695 static int cs_etm__process_itrace_start(struct cs_etm_auxtrace *etm,
1696                                         union perf_event *event)
1697 {
1698         struct thread *th;
1699
1700         if (etm->timeless_decoding)
1701                 return 0;
1702
1703         /*
1704          * Add the tid/pid to the log so that we can get a match when
1705          * we get a contextID from the decoder.
1706          */
1707         th = machine__findnew_thread(etm->machine,
1708                                      event->itrace_start.pid,
1709                                      event->itrace_start.tid);
1710         if (!th)
1711                 return -ENOMEM;
1712
1713         thread__put(th);
1714
1715         return 0;
1716 }
1717
1718 static int cs_etm__process_switch_cpu_wide(struct cs_etm_auxtrace *etm,
1719                                            union perf_event *event)
1720 {
1721         struct thread *th;
1722         bool out = event->header.misc & PERF_RECORD_MISC_SWITCH_OUT;
1723
1724         /*
1725          * Context switch in per-thread mode are irrelevant since perf
1726          * will start/stop tracing as the process is scheduled.
1727          */
1728         if (etm->timeless_decoding)
1729                 return 0;
1730
1731         /*
1732          * SWITCH_IN events carry the next process to be switched out while
1733          * SWITCH_OUT events carry the process to be switched in.  As such
1734          * we don't care about IN events.
1735          */
1736         if (!out)
1737                 return 0;
1738
1739         /*
1740          * Add the tid/pid to the log so that we can get a match when
1741          * we get a contextID from the decoder.
1742          */
1743         th = machine__findnew_thread(etm->machine,
1744                                      event->context_switch.next_prev_pid,
1745                                      event->context_switch.next_prev_tid);
1746         if (!th)
1747                 return -ENOMEM;
1748
1749         thread__put(th);
1750
1751         return 0;
1752 }
1753
1754 static int cs_etm__process_event(struct perf_session *session,
1755                                  union perf_event *event,
1756                                  struct perf_sample *sample,
1757                                  struct perf_tool *tool)
1758 {
1759         int err = 0;
1760         u64 timestamp;
1761         struct cs_etm_auxtrace *etm = container_of(session->auxtrace,
1762                                                    struct cs_etm_auxtrace,
1763                                                    auxtrace);
1764
1765         if (dump_trace)
1766                 return 0;
1767
1768         if (!tool->ordered_events) {
1769                 pr_err("CoreSight ETM Trace requires ordered events\n");
1770                 return -EINVAL;
1771         }
1772
1773         if (!etm->timeless_decoding)
1774                 return -EINVAL;
1775
1776         if (sample->time && (sample->time != (u64) -1))
1777                 timestamp = sample->time;
1778         else
1779                 timestamp = 0;
1780
1781         if (timestamp || etm->timeless_decoding) {
1782                 err = cs_etm__update_queues(etm);
1783                 if (err)
1784                         return err;
1785         }
1786
1787         if (event->header.type == PERF_RECORD_EXIT)
1788                 return cs_etm__process_timeless_queues(etm,
1789                                                        event->fork.tid);
1790
1791         if (event->header.type == PERF_RECORD_ITRACE_START)
1792                 return cs_etm__process_itrace_start(etm, event);
1793         else if (event->header.type == PERF_RECORD_SWITCH_CPU_WIDE)
1794                 return cs_etm__process_switch_cpu_wide(etm, event);
1795
1796         return 0;
1797 }
1798
1799 static int cs_etm__process_auxtrace_event(struct perf_session *session,
1800                                           union perf_event *event,
1801                                           struct perf_tool *tool __maybe_unused)
1802 {
1803         struct cs_etm_auxtrace *etm = container_of(session->auxtrace,
1804                                                    struct cs_etm_auxtrace,
1805                                                    auxtrace);
1806         if (!etm->data_queued) {
1807                 struct auxtrace_buffer *buffer;
1808                 off_t  data_offset;
1809                 int fd = perf_data__fd(session->data);
1810                 bool is_pipe = perf_data__is_pipe(session->data);
1811                 int err;
1812
1813                 if (is_pipe)
1814                         data_offset = 0;
1815                 else {
1816                         data_offset = lseek(fd, 0, SEEK_CUR);
1817                         if (data_offset == -1)
1818                                 return -errno;
1819                 }
1820
1821                 err = auxtrace_queues__add_event(&etm->queues, session,
1822                                                  event, data_offset, &buffer);
1823                 if (err)
1824                         return err;
1825
1826                 if (dump_trace)
1827                         if (auxtrace_buffer__get_data(buffer, fd)) {
1828                                 cs_etm__dump_event(etm, buffer);
1829                                 auxtrace_buffer__put_data(buffer);
1830                         }
1831         }
1832
1833         return 0;
1834 }
1835
1836 static bool cs_etm__is_timeless_decoding(struct cs_etm_auxtrace *etm)
1837 {
1838         struct perf_evsel *evsel;
1839         struct perf_evlist *evlist = etm->session->evlist;
1840         bool timeless_decoding = true;
1841
1842         /*
1843          * Circle through the list of event and complain if we find one
1844          * with the time bit set.
1845          */
1846         evlist__for_each_entry(evlist, evsel) {
1847                 if ((evsel->attr.sample_type & PERF_SAMPLE_TIME))
1848                         timeless_decoding = false;
1849         }
1850
1851         return timeless_decoding;
1852 }
1853
1854 static const char * const cs_etm_global_header_fmts[] = {
1855         [CS_HEADER_VERSION_0]   = "     Header version                 %llx\n",
1856         [CS_PMU_TYPE_CPUS]      = "     PMU type/num cpus              %llx\n",
1857         [CS_ETM_SNAPSHOT]       = "     Snapshot                       %llx\n",
1858 };
1859
1860 static const char * const cs_etm_priv_fmts[] = {
1861         [CS_ETM_MAGIC]          = "     Magic number                   %llx\n",
1862         [CS_ETM_CPU]            = "     CPU                            %lld\n",
1863         [CS_ETM_ETMCR]          = "     ETMCR                          %llx\n",
1864         [CS_ETM_ETMTRACEIDR]    = "     ETMTRACEIDR                    %llx\n",
1865         [CS_ETM_ETMCCER]        = "     ETMCCER                        %llx\n",
1866         [CS_ETM_ETMIDR]         = "     ETMIDR                         %llx\n",
1867 };
1868
1869 static const char * const cs_etmv4_priv_fmts[] = {
1870         [CS_ETM_MAGIC]          = "     Magic number                   %llx\n",
1871         [CS_ETM_CPU]            = "     CPU                            %lld\n",
1872         [CS_ETMV4_TRCCONFIGR]   = "     TRCCONFIGR                     %llx\n",
1873         [CS_ETMV4_TRCTRACEIDR]  = "     TRCTRACEIDR                    %llx\n",
1874         [CS_ETMV4_TRCIDR0]      = "     TRCIDR0                        %llx\n",
1875         [CS_ETMV4_TRCIDR1]      = "     TRCIDR1                        %llx\n",
1876         [CS_ETMV4_TRCIDR2]      = "     TRCIDR2                        %llx\n",
1877         [CS_ETMV4_TRCIDR8]      = "     TRCIDR8                        %llx\n",
1878         [CS_ETMV4_TRCAUTHSTATUS] = "    TRCAUTHSTATUS                  %llx\n",
1879 };
1880
1881 static void cs_etm__print_auxtrace_info(u64 *val, int num)
1882 {
1883         int i, j, cpu = 0;
1884
1885         for (i = 0; i < CS_HEADER_VERSION_0_MAX; i++)
1886                 fprintf(stdout, cs_etm_global_header_fmts[i], val[i]);
1887
1888         for (i = CS_HEADER_VERSION_0_MAX; cpu < num; cpu++) {
1889                 if (val[i] == __perf_cs_etmv3_magic)
1890                         for (j = 0; j < CS_ETM_PRIV_MAX; j++, i++)
1891                                 fprintf(stdout, cs_etm_priv_fmts[j], val[i]);
1892                 else if (val[i] == __perf_cs_etmv4_magic)
1893                         for (j = 0; j < CS_ETMV4_PRIV_MAX; j++, i++)
1894                                 fprintf(stdout, cs_etmv4_priv_fmts[j], val[i]);
1895                 else
1896                         /* failure.. return */
1897                         return;
1898         }
1899 }
1900
1901 int cs_etm__process_auxtrace_info(union perf_event *event,
1902                                   struct perf_session *session)
1903 {
1904         struct auxtrace_info_event *auxtrace_info = &event->auxtrace_info;
1905         struct cs_etm_auxtrace *etm = NULL;
1906         struct int_node *inode;
1907         unsigned int pmu_type;
1908         int event_header_size = sizeof(struct perf_event_header);
1909         int info_header_size;
1910         int total_size = auxtrace_info->header.size;
1911         int priv_size = 0;
1912         int num_cpu;
1913         int err = 0, idx = -1;
1914         int i, j, k;
1915         u64 *ptr, *hdr = NULL;
1916         u64 **metadata = NULL;
1917
1918         /*
1919          * sizeof(auxtrace_info_event::type) +
1920          * sizeof(auxtrace_info_event::reserved) == 8
1921          */
1922         info_header_size = 8;
1923
1924         if (total_size < (event_header_size + info_header_size))
1925                 return -EINVAL;
1926
1927         priv_size = total_size - event_header_size - info_header_size;
1928
1929         /* First the global part */
1930         ptr = (u64 *) auxtrace_info->priv;
1931
1932         /* Look for version '0' of the header */
1933         if (ptr[0] != 0)
1934                 return -EINVAL;
1935
1936         hdr = zalloc(sizeof(*hdr) * CS_HEADER_VERSION_0_MAX);
1937         if (!hdr)
1938                 return -ENOMEM;
1939
1940         /* Extract header information - see cs-etm.h for format */
1941         for (i = 0; i < CS_HEADER_VERSION_0_MAX; i++)
1942                 hdr[i] = ptr[i];
1943         num_cpu = hdr[CS_PMU_TYPE_CPUS] & 0xffffffff;
1944         pmu_type = (unsigned int) ((hdr[CS_PMU_TYPE_CPUS] >> 32) &
1945                                     0xffffffff);
1946
1947         /*
1948          * Create an RB tree for traceID-metadata tuple.  Since the conversion
1949          * has to be made for each packet that gets decoded, optimizing access
1950          * in anything other than a sequential array is worth doing.
1951          */
1952         traceid_list = intlist__new(NULL);
1953         if (!traceid_list) {
1954                 err = -ENOMEM;
1955                 goto err_free_hdr;
1956         }
1957
1958         metadata = zalloc(sizeof(*metadata) * num_cpu);
1959         if (!metadata) {
1960                 err = -ENOMEM;
1961                 goto err_free_traceid_list;
1962         }
1963
1964         /*
1965          * The metadata is stored in the auxtrace_info section and encodes
1966          * the configuration of the ARM embedded trace macrocell which is
1967          * required by the trace decoder to properly decode the trace due
1968          * to its highly compressed nature.
1969          */
1970         for (j = 0; j < num_cpu; j++) {
1971                 if (ptr[i] == __perf_cs_etmv3_magic) {
1972                         metadata[j] = zalloc(sizeof(*metadata[j]) *
1973                                              CS_ETM_PRIV_MAX);
1974                         if (!metadata[j]) {
1975                                 err = -ENOMEM;
1976                                 goto err_free_metadata;
1977                         }
1978                         for (k = 0; k < CS_ETM_PRIV_MAX; k++)
1979                                 metadata[j][k] = ptr[i + k];
1980
1981                         /* The traceID is our handle */
1982                         idx = metadata[j][CS_ETM_ETMTRACEIDR];
1983                         i += CS_ETM_PRIV_MAX;
1984                 } else if (ptr[i] == __perf_cs_etmv4_magic) {
1985                         metadata[j] = zalloc(sizeof(*metadata[j]) *
1986                                              CS_ETMV4_PRIV_MAX);
1987                         if (!metadata[j]) {
1988                                 err = -ENOMEM;
1989                                 goto err_free_metadata;
1990                         }
1991                         for (k = 0; k < CS_ETMV4_PRIV_MAX; k++)
1992                                 metadata[j][k] = ptr[i + k];
1993
1994                         /* The traceID is our handle */
1995                         idx = metadata[j][CS_ETMV4_TRCTRACEIDR];
1996                         i += CS_ETMV4_PRIV_MAX;
1997                 }
1998
1999                 /* Get an RB node for this CPU */
2000                 inode = intlist__findnew(traceid_list, idx);
2001
2002                 /* Something went wrong, no need to continue */
2003                 if (!inode) {
2004                         err = PTR_ERR(inode);
2005                         goto err_free_metadata;
2006                 }
2007
2008                 /*
2009                  * The node for that CPU should not be taken.
2010                  * Back out if that's the case.
2011                  */
2012                 if (inode->priv) {
2013                         err = -EINVAL;
2014                         goto err_free_metadata;
2015                 }
2016                 /* All good, associate the traceID with the metadata pointer */
2017                 inode->priv = metadata[j];
2018         }
2019
2020         /*
2021          * Each of CS_HEADER_VERSION_0_MAX, CS_ETM_PRIV_MAX and
2022          * CS_ETMV4_PRIV_MAX mark how many double words are in the
2023          * global metadata, and each cpu's metadata respectively.
2024          * The following tests if the correct number of double words was
2025          * present in the auxtrace info section.
2026          */
2027         if (i * 8 != priv_size) {
2028                 err = -EINVAL;
2029                 goto err_free_metadata;
2030         }
2031
2032         etm = zalloc(sizeof(*etm));
2033
2034         if (!etm) {
2035                 err = -ENOMEM;
2036                 goto err_free_metadata;
2037         }
2038
2039         err = auxtrace_queues__init(&etm->queues);
2040         if (err)
2041                 goto err_free_etm;
2042
2043         etm->session = session;
2044         etm->machine = &session->machines.host;
2045
2046         etm->num_cpu = num_cpu;
2047         etm->pmu_type = pmu_type;
2048         etm->snapshot_mode = (hdr[CS_ETM_SNAPSHOT] != 0);
2049         etm->metadata = metadata;
2050         etm->auxtrace_type = auxtrace_info->type;
2051         etm->timeless_decoding = cs_etm__is_timeless_decoding(etm);
2052
2053         etm->auxtrace.process_event = cs_etm__process_event;
2054         etm->auxtrace.process_auxtrace_event = cs_etm__process_auxtrace_event;
2055         etm->auxtrace.flush_events = cs_etm__flush_events;
2056         etm->auxtrace.free_events = cs_etm__free_events;
2057         etm->auxtrace.free = cs_etm__free;
2058         session->auxtrace = &etm->auxtrace;
2059
2060         etm->unknown_thread = thread__new(999999999, 999999999);
2061         if (!etm->unknown_thread)
2062                 goto err_free_queues;
2063
2064         /*
2065          * Initialize list node so that at thread__zput() we can avoid
2066          * segmentation fault at list_del_init().
2067          */
2068         INIT_LIST_HEAD(&etm->unknown_thread->node);
2069
2070         err = thread__set_comm(etm->unknown_thread, "unknown", 0);
2071         if (err)
2072                 goto err_delete_thread;
2073
2074         if (thread__init_map_groups(etm->unknown_thread, etm->machine))
2075                 goto err_delete_thread;
2076
2077         if (dump_trace) {
2078                 cs_etm__print_auxtrace_info(auxtrace_info->priv, num_cpu);
2079                 return 0;
2080         }
2081
2082         if (session->itrace_synth_opts && session->itrace_synth_opts->set) {
2083                 etm->synth_opts = *session->itrace_synth_opts;
2084         } else {
2085                 itrace_synth_opts__set_default(&etm->synth_opts,
2086                                 session->itrace_synth_opts->default_no_sample);
2087                 etm->synth_opts.callchain = false;
2088         }
2089
2090         err = cs_etm__synth_events(etm, session);
2091         if (err)
2092                 goto err_delete_thread;
2093
2094         err = auxtrace_queues__process_index(&etm->queues, session);
2095         if (err)
2096                 goto err_delete_thread;
2097
2098         etm->data_queued = etm->queues.populated;
2099
2100         return 0;
2101
2102 err_delete_thread:
2103         thread__zput(etm->unknown_thread);
2104 err_free_queues:
2105         auxtrace_queues__free(&etm->queues);
2106         session->auxtrace = NULL;
2107 err_free_etm:
2108         zfree(&etm);
2109 err_free_metadata:
2110         /* No need to check @metadata[j], free(NULL) is supported */
2111         for (j = 0; j < num_cpu; j++)
2112                 free(metadata[j]);
2113         zfree(&metadata);
2114 err_free_traceid_list:
2115         intlist__delete(traceid_list);
2116 err_free_hdr:
2117         zfree(&hdr);
2118
2119         return -EINVAL;
2120 }