1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright(C) 2015 Linaro Limited. All rights reserved.
4 * Author: Mathieu Poirier <mathieu.poirier@linaro.org>
8 #include <linux/bits.h>
9 #include <linux/bitops.h>
10 #include <linux/compiler.h>
11 #include <linux/coresight-pmu.h>
12 #include <linux/kernel.h>
13 #include <linux/log2.h>
14 #include <linux/string.h>
15 #include <linux/types.h>
16 #include <linux/zalloc.h>
19 #include "../../util/debug.h"
20 #include "../../util/record.h"
21 #include "../../util/auxtrace.h"
22 #include "../../util/cpumap.h"
23 #include "../../util/event.h"
24 #include "../../util/evlist.h"
25 #include "../../util/evsel.h"
26 #include "../../util/perf_api_probe.h"
27 #include "../../util/evsel_config.h"
28 #include "../../util/pmu.h"
29 #include "../../util/cs-etm.h"
30 #include <internal/lib.h> // page_size
31 #include "../../util/session.h"
37 struct cs_etm_recording {
38 struct auxtrace_record itr;
39 struct perf_pmu *cs_etm_pmu;
40 struct evlist *evlist;
47 static const char *metadata_etmv3_ro[CS_ETM_PRIV_MAX] = {
48 [CS_ETM_ETMCCER] = "mgmt/etmccer",
49 [CS_ETM_ETMIDR] = "mgmt/etmidr",
52 static const char *metadata_etmv4_ro[CS_ETMV4_PRIV_MAX] = {
53 [CS_ETMV4_TRCIDR0] = "trcidr/trcidr0",
54 [CS_ETMV4_TRCIDR1] = "trcidr/trcidr1",
55 [CS_ETMV4_TRCIDR2] = "trcidr/trcidr2",
56 [CS_ETMV4_TRCIDR8] = "trcidr/trcidr8",
57 [CS_ETMV4_TRCAUTHSTATUS] = "mgmt/trcauthstatus",
60 static bool cs_etm_is_etmv4(struct auxtrace_record *itr, int cpu);
62 static int cs_etm_set_context_id(struct auxtrace_record *itr,
63 struct evsel *evsel, int cpu)
65 struct cs_etm_recording *ptr;
66 struct perf_pmu *cs_etm_pmu;
71 ptr = container_of(itr, struct cs_etm_recording, itr);
72 cs_etm_pmu = ptr->cs_etm_pmu;
74 if (!cs_etm_is_etmv4(itr, cpu))
77 /* Get a handle on TRCIRD2 */
78 snprintf(path, PATH_MAX, "cpu%d/%s",
79 cpu, metadata_etmv4_ro[CS_ETMV4_TRCIDR2]);
80 err = perf_pmu__scan_file(cs_etm_pmu, path, "%x", &val);
82 /* There was a problem reading the file, bailing out */
84 pr_err("%s: can't read file %s\n",
85 CORESIGHT_ETM_PMU_NAME, path);
90 * TRCIDR2.CIDSIZE, bit [9-5], indicates whether contextID tracing
92 * 0b00000 Context ID tracing is not supported.
93 * 0b00100 Maximum of 32-bit Context ID size.
94 * All other values are reserved.
96 val = BMVAL(val, 5, 9);
97 if (!val || val != 0x4) {
102 /* All good, let the kernel know */
103 evsel->core.attr.config |= (1 << ETM_OPT_CTXTID);
111 static int cs_etm_set_timestamp(struct auxtrace_record *itr,
112 struct evsel *evsel, int cpu)
114 struct cs_etm_recording *ptr;
115 struct perf_pmu *cs_etm_pmu;
120 ptr = container_of(itr, struct cs_etm_recording, itr);
121 cs_etm_pmu = ptr->cs_etm_pmu;
123 if (!cs_etm_is_etmv4(itr, cpu))
126 /* Get a handle on TRCIRD0 */
127 snprintf(path, PATH_MAX, "cpu%d/%s",
128 cpu, metadata_etmv4_ro[CS_ETMV4_TRCIDR0]);
129 err = perf_pmu__scan_file(cs_etm_pmu, path, "%x", &val);
131 /* There was a problem reading the file, bailing out */
133 pr_err("%s: can't read file %s\n",
134 CORESIGHT_ETM_PMU_NAME, path);
139 * TRCIDR0.TSSIZE, bit [28-24], indicates whether global timestamping
141 * 0b00000 Global timestamping is not implemented
142 * 0b00110 Implementation supports a maximum timestamp of 48bits.
143 * 0b01000 Implementation supports a maximum timestamp of 64bits.
145 val &= GENMASK(28, 24);
151 /* All good, let the kernel know */
152 evsel->core.attr.config |= (1 << ETM_OPT_TS);
159 #define ETM_SET_OPT_CTXTID (1 << 0)
160 #define ETM_SET_OPT_TS (1 << 1)
161 #define ETM_SET_OPT_MASK (ETM_SET_OPT_CTXTID | ETM_SET_OPT_TS)
163 static int cs_etm_set_option(struct auxtrace_record *itr,
164 struct evsel *evsel, u32 option)
166 int i, err = -EINVAL;
167 struct perf_cpu_map *event_cpus = evsel->evlist->core.cpus;
168 struct perf_cpu_map *online_cpus = perf_cpu_map__new(NULL);
170 /* Set option of each CPU we have */
171 for (i = 0; i < cpu__max_cpu(); i++) {
172 if (!cpu_map__has(event_cpus, i) ||
173 !cpu_map__has(online_cpus, i))
176 if (option & ETM_SET_OPT_CTXTID) {
177 err = cs_etm_set_context_id(itr, evsel, i);
181 if (option & ETM_SET_OPT_TS) {
182 err = cs_etm_set_timestamp(itr, evsel, i);
186 if (option & ~(ETM_SET_OPT_MASK))
187 /* Nothing else is currently supported */
193 perf_cpu_map__put(online_cpus);
197 static int cs_etm_parse_snapshot_options(struct auxtrace_record *itr,
198 struct record_opts *opts,
201 struct cs_etm_recording *ptr =
202 container_of(itr, struct cs_etm_recording, itr);
203 unsigned long long snapshot_size = 0;
207 snapshot_size = strtoull(str, &endptr, 0);
208 if (*endptr || snapshot_size > SIZE_MAX)
212 opts->auxtrace_snapshot_mode = true;
213 opts->auxtrace_snapshot_size = snapshot_size;
214 ptr->snapshot_size = snapshot_size;
219 static int cs_etm_set_sink_attr(struct perf_pmu *pmu,
222 char msg[BUFSIZ], path[PATH_MAX], *sink;
223 struct evsel_config_term *term;
227 if (evsel->core.attr.config2 & GENMASK(31, 0))
230 list_for_each_entry(term, &evsel->config_terms, list) {
231 if (term->type != EVSEL__CONFIG_TERM_DRV_CFG)
234 sink = term->val.str;
235 snprintf(path, PATH_MAX, "sinks/%s", sink);
237 ret = perf_pmu__scan_file(pmu, path, "%x", &hash);
239 pr_err("failed to set sink \"%s\" on event %s with %d (%s)\n",
240 sink, evsel__name(evsel), errno,
241 str_error_r(errno, msg, sizeof(msg)));
245 evsel->core.attr.config2 |= hash;
250 * No sink was provided on the command line - allow the CoreSight
251 * system to look for a default
256 static int cs_etm_recording_options(struct auxtrace_record *itr,
257 struct evlist *evlist,
258 struct record_opts *opts)
261 struct cs_etm_recording *ptr =
262 container_of(itr, struct cs_etm_recording, itr);
263 struct perf_pmu *cs_etm_pmu = ptr->cs_etm_pmu;
264 struct evsel *evsel, *cs_etm_evsel = NULL;
265 struct perf_cpu_map *cpus = evlist->core.cpus;
266 bool privileged = perf_event_paranoid_check(-1);
269 ptr->evlist = evlist;
270 ptr->snapshot_mode = opts->auxtrace_snapshot_mode;
272 if (!record_opts__no_switch_events(opts) &&
273 perf_can_record_switch_events())
274 opts->record_switch_events = true;
276 evlist__for_each_entry(evlist, evsel) {
277 if (evsel->core.attr.type == cs_etm_pmu->type) {
279 pr_err("There may be only one %s event\n",
280 CORESIGHT_ETM_PMU_NAME);
283 evsel->core.attr.freq = 0;
284 evsel->core.attr.sample_period = 1;
285 cs_etm_evsel = evsel;
286 opts->full_auxtrace = true;
290 /* no need to continue if at least one event of interest was found */
294 ret = cs_etm_set_sink_attr(cs_etm_pmu, cs_etm_evsel);
298 if (opts->use_clockid) {
299 pr_err("Cannot use clockid (-k option) with %s\n",
300 CORESIGHT_ETM_PMU_NAME);
304 /* we are in snapshot mode */
305 if (opts->auxtrace_snapshot_mode) {
307 * No size were given to '-S' or '-m,', so go with
310 if (!opts->auxtrace_snapshot_size &&
311 !opts->auxtrace_mmap_pages) {
313 opts->auxtrace_mmap_pages = MiB(4) / page_size;
315 opts->auxtrace_mmap_pages =
316 KiB(128) / page_size;
317 if (opts->mmap_pages == UINT_MAX)
318 opts->mmap_pages = KiB(256) / page_size;
320 } else if (!opts->auxtrace_mmap_pages && !privileged &&
321 opts->mmap_pages == UINT_MAX) {
322 opts->mmap_pages = KiB(256) / page_size;
326 * '-m,xyz' was specified but no snapshot size, so make the
327 * snapshot size as big as the auxtrace mmap area.
329 if (!opts->auxtrace_snapshot_size) {
330 opts->auxtrace_snapshot_size =
331 opts->auxtrace_mmap_pages * (size_t)page_size;
335 * -Sxyz was specified but no auxtrace mmap area, so make the
336 * auxtrace mmap area big enough to fit the requested snapshot
339 if (!opts->auxtrace_mmap_pages) {
340 size_t sz = opts->auxtrace_snapshot_size;
342 sz = round_up(sz, page_size) / page_size;
343 opts->auxtrace_mmap_pages = roundup_pow_of_two(sz);
346 /* Snapshost size can't be bigger than the auxtrace area */
347 if (opts->auxtrace_snapshot_size >
348 opts->auxtrace_mmap_pages * (size_t)page_size) {
349 pr_err("Snapshot size %zu must not be greater than AUX area tracing mmap size %zu\n",
350 opts->auxtrace_snapshot_size,
351 opts->auxtrace_mmap_pages * (size_t)page_size);
355 /* Something went wrong somewhere - this shouldn't happen */
356 if (!opts->auxtrace_snapshot_size ||
357 !opts->auxtrace_mmap_pages) {
358 pr_err("Failed to calculate default snapshot size and/or AUX area tracing mmap pages\n");
363 /* We are in full trace mode but '-m,xyz' wasn't specified */
364 if (opts->full_auxtrace && !opts->auxtrace_mmap_pages) {
366 opts->auxtrace_mmap_pages = MiB(4) / page_size;
368 opts->auxtrace_mmap_pages = KiB(128) / page_size;
369 if (opts->mmap_pages == UINT_MAX)
370 opts->mmap_pages = KiB(256) / page_size;
375 /* Validate auxtrace_mmap_pages provided by user */
376 if (opts->auxtrace_mmap_pages) {
377 unsigned int max_page = (KiB(128) / page_size);
378 size_t sz = opts->auxtrace_mmap_pages * (size_t)page_size;
381 opts->auxtrace_mmap_pages > max_page) {
382 opts->auxtrace_mmap_pages = max_page;
383 pr_err("auxtrace too big, truncating to %d\n",
387 if (!is_power_of_2(sz)) {
388 pr_err("Invalid mmap size for %s: must be a power of 2\n",
389 CORESIGHT_ETM_PMU_NAME);
394 if (opts->auxtrace_snapshot_mode)
395 pr_debug2("%s snapshot size: %zu\n", CORESIGHT_ETM_PMU_NAME,
396 opts->auxtrace_snapshot_size);
399 * To obtain the auxtrace buffer file descriptor, the auxtrace
400 * event must come first.
402 evlist__to_front(evlist, cs_etm_evsel);
405 * In the case of per-cpu mmaps, we need the CPU on the
406 * AUX event. We also need the contextID in order to be notified
407 * when a context switch happened.
409 if (!perf_cpu_map__empty(cpus)) {
410 evsel__set_sample_bit(cs_etm_evsel, CPU);
412 err = cs_etm_set_option(itr, cs_etm_evsel,
413 ETM_SET_OPT_CTXTID | ETM_SET_OPT_TS);
418 /* Add dummy event to keep tracking */
419 if (opts->full_auxtrace) {
420 struct evsel *tracking_evsel;
422 err = parse_events(evlist, "dummy:u", NULL);
426 tracking_evsel = evlist__last(evlist);
427 evlist__set_tracking_event(evlist, tracking_evsel);
429 tracking_evsel->core.attr.freq = 0;
430 tracking_evsel->core.attr.sample_period = 1;
432 /* In per-cpu case, always need the time of mmap events etc */
433 if (!perf_cpu_map__empty(cpus))
434 evsel__set_sample_bit(tracking_evsel, TIME);
441 static u64 cs_etm_get_config(struct auxtrace_record *itr)
444 struct cs_etm_recording *ptr =
445 container_of(itr, struct cs_etm_recording, itr);
446 struct perf_pmu *cs_etm_pmu = ptr->cs_etm_pmu;
447 struct evlist *evlist = ptr->evlist;
450 evlist__for_each_entry(evlist, evsel) {
451 if (evsel->core.attr.type == cs_etm_pmu->type) {
453 * Variable perf_event_attr::config is assigned to
454 * ETMv3/PTM. The bit fields have been made to match
455 * the ETMv3.5 ETRMCR register specification. See the
456 * PMU_FORMAT_ATTR() declarations in
457 * drivers/hwtracing/coresight/coresight-perf.c for
460 config = evsel->core.attr.config;
469 #define BIT(N) (1UL << (N))
472 static u64 cs_etmv4_get_config(struct auxtrace_record *itr)
478 * The perf event variable config bits represent both
479 * the command line options and register programming
480 * bits in ETMv3/PTM. For ETMv4 we must remap options
483 config_opts = cs_etm_get_config(itr);
484 if (config_opts & BIT(ETM_OPT_CYCACC))
485 config |= BIT(ETM4_CFG_BIT_CYCACC);
486 if (config_opts & BIT(ETM_OPT_CTXTID))
487 config |= BIT(ETM4_CFG_BIT_CTXTID);
488 if (config_opts & BIT(ETM_OPT_TS))
489 config |= BIT(ETM4_CFG_BIT_TS);
490 if (config_opts & BIT(ETM_OPT_RETSTK))
491 config |= BIT(ETM4_CFG_BIT_RETSTK);
497 cs_etm_info_priv_size(struct auxtrace_record *itr __maybe_unused,
498 struct evlist *evlist __maybe_unused)
501 int etmv3 = 0, etmv4 = 0;
502 struct perf_cpu_map *event_cpus = evlist->core.cpus;
503 struct perf_cpu_map *online_cpus = perf_cpu_map__new(NULL);
505 /* cpu map is not empty, we have specific CPUs to work with */
506 if (!perf_cpu_map__empty(event_cpus)) {
507 for (i = 0; i < cpu__max_cpu(); i++) {
508 if (!cpu_map__has(event_cpus, i) ||
509 !cpu_map__has(online_cpus, i))
512 if (cs_etm_is_etmv4(itr, i))
518 /* get configuration for all CPUs in the system */
519 for (i = 0; i < cpu__max_cpu(); i++) {
520 if (!cpu_map__has(online_cpus, i))
523 if (cs_etm_is_etmv4(itr, i))
530 perf_cpu_map__put(online_cpus);
532 return (CS_ETM_HEADER_SIZE +
533 (etmv4 * CS_ETMV4_PRIV_SIZE) +
534 (etmv3 * CS_ETMV3_PRIV_SIZE));
537 static bool cs_etm_is_etmv4(struct auxtrace_record *itr, int cpu)
543 struct cs_etm_recording *ptr =
544 container_of(itr, struct cs_etm_recording, itr);
545 struct perf_pmu *cs_etm_pmu = ptr->cs_etm_pmu;
547 /* Take any of the RO files for ETMv4 and see if it present */
548 snprintf(path, PATH_MAX, "cpu%d/%s",
549 cpu, metadata_etmv4_ro[CS_ETMV4_TRCIDR0]);
550 scan = perf_pmu__scan_file(cs_etm_pmu, path, "%x", &val);
552 /* The file was read successfully, we have a winner */
559 static int cs_etm_get_ro(struct perf_pmu *pmu, int cpu, const char *path)
561 char pmu_path[PATH_MAX];
563 unsigned int val = 0;
565 /* Get RO metadata from sysfs */
566 snprintf(pmu_path, PATH_MAX, "cpu%d/%s", cpu, path);
568 scan = perf_pmu__scan_file(pmu, pmu_path, "%x", &val);
570 pr_err("%s: error reading: %s\n", __func__, pmu_path);
575 static void cs_etm_get_metadata(int cpu, u32 *offset,
576 struct auxtrace_record *itr,
577 struct perf_record_auxtrace_info *info)
581 struct cs_etm_recording *ptr =
582 container_of(itr, struct cs_etm_recording, itr);
583 struct perf_pmu *cs_etm_pmu = ptr->cs_etm_pmu;
585 /* first see what kind of tracer this cpu is affined to */
586 if (cs_etm_is_etmv4(itr, cpu)) {
587 magic = __perf_cs_etmv4_magic;
588 /* Get trace configuration register */
589 info->priv[*offset + CS_ETMV4_TRCCONFIGR] =
590 cs_etmv4_get_config(itr);
591 /* Get traceID from the framework */
592 info->priv[*offset + CS_ETMV4_TRCTRACEIDR] =
593 coresight_get_trace_id(cpu);
594 /* Get read-only information from sysFS */
595 info->priv[*offset + CS_ETMV4_TRCIDR0] =
596 cs_etm_get_ro(cs_etm_pmu, cpu,
597 metadata_etmv4_ro[CS_ETMV4_TRCIDR0]);
598 info->priv[*offset + CS_ETMV4_TRCIDR1] =
599 cs_etm_get_ro(cs_etm_pmu, cpu,
600 metadata_etmv4_ro[CS_ETMV4_TRCIDR1]);
601 info->priv[*offset + CS_ETMV4_TRCIDR2] =
602 cs_etm_get_ro(cs_etm_pmu, cpu,
603 metadata_etmv4_ro[CS_ETMV4_TRCIDR2]);
604 info->priv[*offset + CS_ETMV4_TRCIDR8] =
605 cs_etm_get_ro(cs_etm_pmu, cpu,
606 metadata_etmv4_ro[CS_ETMV4_TRCIDR8]);
607 info->priv[*offset + CS_ETMV4_TRCAUTHSTATUS] =
608 cs_etm_get_ro(cs_etm_pmu, cpu,
610 [CS_ETMV4_TRCAUTHSTATUS]);
612 /* How much space was used */
613 increment = CS_ETMV4_PRIV_MAX;
615 magic = __perf_cs_etmv3_magic;
616 /* Get configuration register */
617 info->priv[*offset + CS_ETM_ETMCR] = cs_etm_get_config(itr);
618 /* Get traceID from the framework */
619 info->priv[*offset + CS_ETM_ETMTRACEIDR] =
620 coresight_get_trace_id(cpu);
621 /* Get read-only information from sysFS */
622 info->priv[*offset + CS_ETM_ETMCCER] =
623 cs_etm_get_ro(cs_etm_pmu, cpu,
624 metadata_etmv3_ro[CS_ETM_ETMCCER]);
625 info->priv[*offset + CS_ETM_ETMIDR] =
626 cs_etm_get_ro(cs_etm_pmu, cpu,
627 metadata_etmv3_ro[CS_ETM_ETMIDR]);
629 /* How much space was used */
630 increment = CS_ETM_PRIV_MAX;
633 /* Build generic header portion */
634 info->priv[*offset + CS_ETM_MAGIC] = magic;
635 info->priv[*offset + CS_ETM_CPU] = cpu;
636 /* Where the next CPU entry should start from */
637 *offset += increment;
640 static int cs_etm_info_fill(struct auxtrace_record *itr,
641 struct perf_session *session,
642 struct perf_record_auxtrace_info *info,
648 struct perf_cpu_map *cpu_map;
649 struct perf_cpu_map *event_cpus = session->evlist->core.cpus;
650 struct perf_cpu_map *online_cpus = perf_cpu_map__new(NULL);
651 struct cs_etm_recording *ptr =
652 container_of(itr, struct cs_etm_recording, itr);
653 struct perf_pmu *cs_etm_pmu = ptr->cs_etm_pmu;
655 if (priv_size != cs_etm_info_priv_size(itr, session->evlist))
658 if (!session->evlist->core.nr_mmaps)
661 /* If the cpu_map is empty all online CPUs are involved */
662 if (perf_cpu_map__empty(event_cpus)) {
663 cpu_map = online_cpus;
665 /* Make sure all specified CPUs are online */
666 for (i = 0; i < perf_cpu_map__nr(event_cpus); i++) {
667 if (cpu_map__has(event_cpus, i) &&
668 !cpu_map__has(online_cpus, i))
672 cpu_map = event_cpus;
675 nr_cpu = perf_cpu_map__nr(cpu_map);
676 /* Get PMU type as dynamically assigned by the core */
677 type = cs_etm_pmu->type;
679 /* First fill out the session header */
680 info->type = PERF_AUXTRACE_CS_ETM;
681 info->priv[CS_HEADER_VERSION_0] = 0;
682 info->priv[CS_PMU_TYPE_CPUS] = type << 32;
683 info->priv[CS_PMU_TYPE_CPUS] |= nr_cpu;
684 info->priv[CS_ETM_SNAPSHOT] = ptr->snapshot_mode;
686 offset = CS_ETM_SNAPSHOT + 1;
688 for (i = 0; i < cpu__max_cpu() && offset < priv_size; i++)
689 if (cpu_map__has(cpu_map, i))
690 cs_etm_get_metadata(i, &offset, itr, info);
692 perf_cpu_map__put(online_cpus);
697 static int cs_etm_alloc_wrapped_array(struct cs_etm_recording *ptr, int idx)
700 int cnt = ptr->wrapped_cnt;
702 /* Make @ptr->wrapped as big as @idx */
707 * Free'ed in cs_etm_recording_free(). Using realloc() to avoid
708 * cross compilation problems where the host's system supports
709 * reallocarray() but not the target.
711 wrapped = realloc(ptr->wrapped, cnt * sizeof(bool));
715 wrapped[cnt - 1] = false;
716 ptr->wrapped_cnt = cnt;
717 ptr->wrapped = wrapped;
722 static bool cs_etm_buffer_has_wrapped(unsigned char *buffer,
723 size_t buffer_size, u64 head)
726 u64 *buf = (u64 *)buffer;
727 size_t buf_size = buffer_size;
730 * We want to look the very last 512 byte (chosen arbitrarily) in
733 watermark = buf_size - 512;
736 * @head is continuously increasing - if its value is equal or greater
737 * than the size of the ring buffer, it has wrapped around.
739 if (head >= buffer_size)
743 * The value of @head is somewhere within the size of the ring buffer.
744 * This can be that there hasn't been enough data to fill the ring
745 * buffer yet or the trace time was so long that @head has numerically
746 * wrapped around. To find we need to check if we have data at the very
747 * end of the ring buffer. We can reliably do this because mmap'ed
748 * pages are zeroed out and there is a fresh mapping with every new
752 /* @head is less than 512 byte from the end of the ring buffer */
753 if (head > watermark)
757 * Speed things up by using 64 bit transactions (see "u64 *buf" above)
763 * If we find trace data at the end of the ring buffer, @head has
764 * been there and has numerically wrapped around at least once.
766 for (i = watermark; i < buf_size; i++)
773 static int cs_etm_find_snapshot(struct auxtrace_record *itr,
774 int idx, struct auxtrace_mmap *mm,
780 struct cs_etm_recording *ptr =
781 container_of(itr, struct cs_etm_recording, itr);
784 * Allocate memory to keep track of wrapping if this is the first
785 * time we deal with this *mm.
787 if (idx >= ptr->wrapped_cnt) {
788 err = cs_etm_alloc_wrapped_array(ptr, idx);
794 * Check to see if *head has wrapped around. If it hasn't only the
795 * amount of data between *head and *old is snapshot'ed to avoid
796 * bloating the perf.data file with zeros. But as soon as *head has
797 * wrapped around the entire size of the AUX ring buffer it taken.
799 wrapped = ptr->wrapped[idx];
800 if (!wrapped && cs_etm_buffer_has_wrapped(data, mm->len, *head)) {
802 ptr->wrapped[idx] = true;
805 pr_debug3("%s: mmap index %d old head %zu new head %zu size %zu\n",
806 __func__, idx, (size_t)*old, (size_t)*head, mm->len);
808 /* No wrap has occurred, we can just use *head and *old. */
813 * *head has wrapped around - adjust *head and *old to pickup the
814 * entire content of the AUX buffer.
816 if (*head >= mm->len) {
817 *old = *head - mm->len;
820 *old = *head - mm->len;
826 static int cs_etm_snapshot_start(struct auxtrace_record *itr)
828 struct cs_etm_recording *ptr =
829 container_of(itr, struct cs_etm_recording, itr);
832 evlist__for_each_entry(ptr->evlist, evsel) {
833 if (evsel->core.attr.type == ptr->cs_etm_pmu->type)
834 return evsel__disable(evsel);
839 static int cs_etm_snapshot_finish(struct auxtrace_record *itr)
841 struct cs_etm_recording *ptr =
842 container_of(itr, struct cs_etm_recording, itr);
845 evlist__for_each_entry(ptr->evlist, evsel) {
846 if (evsel->core.attr.type == ptr->cs_etm_pmu->type)
847 return evsel__enable(evsel);
852 static u64 cs_etm_reference(struct auxtrace_record *itr __maybe_unused)
854 return (((u64) rand() << 0) & 0x00000000FFFFFFFFull) |
855 (((u64) rand() << 32) & 0xFFFFFFFF00000000ull);
858 static void cs_etm_recording_free(struct auxtrace_record *itr)
860 struct cs_etm_recording *ptr =
861 container_of(itr, struct cs_etm_recording, itr);
863 zfree(&ptr->wrapped);
867 struct auxtrace_record *cs_etm_record_init(int *err)
869 struct perf_pmu *cs_etm_pmu;
870 struct cs_etm_recording *ptr;
872 cs_etm_pmu = perf_pmu__find(CORESIGHT_ETM_PMU_NAME);
879 ptr = zalloc(sizeof(struct cs_etm_recording));
885 ptr->cs_etm_pmu = cs_etm_pmu;
886 ptr->itr.pmu = cs_etm_pmu;
887 ptr->itr.parse_snapshot_options = cs_etm_parse_snapshot_options;
888 ptr->itr.recording_options = cs_etm_recording_options;
889 ptr->itr.info_priv_size = cs_etm_info_priv_size;
890 ptr->itr.info_fill = cs_etm_info_fill;
891 ptr->itr.find_snapshot = cs_etm_find_snapshot;
892 ptr->itr.snapshot_start = cs_etm_snapshot_start;
893 ptr->itr.snapshot_finish = cs_etm_snapshot_finish;
894 ptr->itr.reference = cs_etm_reference;
895 ptr->itr.free = cs_etm_recording_free;
896 ptr->itr.read_finish = auxtrace_record__read_finish;