1 // SPDX-License-Identifier: GPL-2.0
4 /* For the CPU_* macros */
10 #include <api/fs/fs.h>
11 #include <linux/err.h>
12 #include <linux/string.h>
13 #include <api/fs/tracing_path.h>
16 #include "thread_map.h"
20 #include "util/counts.h"
22 int test__openat_syscall_event_on_all_cpus(struct test *test __maybe_unused, int subtest __maybe_unused)
24 int err = -1, fd, cpu;
25 struct perf_cpu_map *cpus;
27 unsigned int nr_openat_calls = 111, i;
29 struct perf_thread_map *threads = thread_map__new(-1, getpid(), UINT_MAX);
30 char sbuf[STRERR_BUFSIZE];
33 if (threads == NULL) {
34 pr_debug("thread_map__new\n");
38 cpus = perf_cpu_map__new(NULL);
40 pr_debug("cpu_map__new\n");
41 goto out_thread_map_delete;
46 evsel = perf_evsel__newtp("syscalls", "sys_enter_openat");
48 tracing_path__strerror_open_tp(errno, errbuf, sizeof(errbuf), "syscalls", "sys_enter_openat");
49 pr_debug("%s\n", errbuf);
50 goto out_cpu_map_delete;
53 if (evsel__open(evsel, cpus, threads) < 0) {
54 pr_debug("failed to open counter: %s, "
55 "tweak /proc/sys/kernel/perf_event_paranoid?\n",
56 str_error_r(errno, sbuf, sizeof(sbuf)));
57 goto out_evsel_delete;
60 for (cpu = 0; cpu < cpus->nr; ++cpu) {
61 unsigned int ncalls = nr_openat_calls + cpu;
63 * XXX eventually lift this restriction in a way that
64 * keeps perf building on older glibc installations
65 * without CPU_ALLOC. 1024 cpus in 2010 still seems
66 * a reasonable upper limit tho :-)
68 if (cpus->map[cpu] >= CPU_SETSIZE) {
69 pr_debug("Ignoring CPU %d\n", cpus->map[cpu]);
73 CPU_SET(cpus->map[cpu], &cpu_set);
74 if (sched_setaffinity(0, sizeof(cpu_set), &cpu_set) < 0) {
75 pr_debug("sched_setaffinity() failed on CPU %d: %s ",
77 str_error_r(errno, sbuf, sizeof(sbuf)));
80 for (i = 0; i < ncalls; ++i) {
81 fd = openat(0, "/etc/passwd", O_RDONLY);
84 CPU_CLR(cpus->map[cpu], &cpu_set);
88 * Here we need to explicitly preallocate the counts, as if
89 * we use the auto allocation it will allocate just for 1 cpu,
90 * as we start by cpu 0.
92 if (perf_evsel__alloc_counts(evsel, cpus->nr, 1) < 0) {
93 pr_debug("perf_evsel__alloc_counts(ncpus=%d)\n", cpus->nr);
99 for (cpu = 0; cpu < cpus->nr; ++cpu) {
100 unsigned int expected;
102 if (cpus->map[cpu] >= CPU_SETSIZE)
105 if (perf_evsel__read_on_cpu(evsel, cpu, 0) < 0) {
106 pr_debug("perf_evsel__read_on_cpu\n");
111 expected = nr_openat_calls + cpu;
112 if (perf_counts(evsel->counts, cpu, 0)->val != expected) {
113 pr_debug("perf_evsel__read_on_cpu: expected to intercept %d calls on cpu %d, got %" PRIu64 "\n",
114 expected, cpus->map[cpu], perf_counts(evsel->counts, cpu, 0)->val);
119 perf_evsel__free_counts(evsel);
121 perf_evsel__close_fd(&evsel->core);
123 evsel__delete(evsel);
125 perf_cpu_map__put(cpus);
126 out_thread_map_delete:
127 perf_thread_map__put(threads);