1 // SPDX-License-Identifier: GPL-2.0
5 #include "util/header.h"
6 #include <linux/ctype.h>
7 #include <linux/zalloc.h>
10 #include <sys/utsname.h>
15 struct perf_env perf_env;
17 #ifdef HAVE_LIBBPF_SUPPORT
18 #include "bpf-event.h"
19 #include <bpf/libbpf.h>
21 void perf_env__insert_bpf_prog_info(struct perf_env *env,
22 struct bpf_prog_info_node *info_node)
24 __u32 prog_id = info_node->info_linear->info.id;
25 struct bpf_prog_info_node *node;
26 struct rb_node *parent = NULL;
29 down_write(&env->bpf_progs.lock);
30 p = &env->bpf_progs.infos.rb_node;
34 node = rb_entry(parent, struct bpf_prog_info_node, rb_node);
35 if (prog_id < node->info_linear->info.id) {
37 } else if (prog_id > node->info_linear->info.id) {
40 pr_debug("duplicated bpf prog info %u\n", prog_id);
45 rb_link_node(&info_node->rb_node, parent, p);
46 rb_insert_color(&info_node->rb_node, &env->bpf_progs.infos);
47 env->bpf_progs.infos_cnt++;
49 up_write(&env->bpf_progs.lock);
52 struct bpf_prog_info_node *perf_env__find_bpf_prog_info(struct perf_env *env,
55 struct bpf_prog_info_node *node = NULL;
58 down_read(&env->bpf_progs.lock);
59 n = env->bpf_progs.infos.rb_node;
62 node = rb_entry(n, struct bpf_prog_info_node, rb_node);
63 if (prog_id < node->info_linear->info.id)
65 else if (prog_id > node->info_linear->info.id)
73 up_read(&env->bpf_progs.lock);
77 void perf_env__insert_btf(struct perf_env *env, struct btf_node *btf_node)
79 struct rb_node *parent = NULL;
80 __u32 btf_id = btf_node->id;
81 struct btf_node *node;
84 down_write(&env->bpf_progs.lock);
85 p = &env->bpf_progs.btfs.rb_node;
89 node = rb_entry(parent, struct btf_node, rb_node);
90 if (btf_id < node->id) {
92 } else if (btf_id > node->id) {
95 pr_debug("duplicated btf %u\n", btf_id);
100 rb_link_node(&btf_node->rb_node, parent, p);
101 rb_insert_color(&btf_node->rb_node, &env->bpf_progs.btfs);
102 env->bpf_progs.btfs_cnt++;
104 up_write(&env->bpf_progs.lock);
107 struct btf_node *perf_env__find_btf(struct perf_env *env, __u32 btf_id)
109 struct btf_node *node = NULL;
112 down_read(&env->bpf_progs.lock);
113 n = env->bpf_progs.btfs.rb_node;
116 node = rb_entry(n, struct btf_node, rb_node);
117 if (btf_id < node->id)
119 else if (btf_id > node->id)
127 up_read(&env->bpf_progs.lock);
131 /* purge data in bpf_progs.infos tree */
132 static void perf_env__purge_bpf(struct perf_env *env)
134 struct rb_root *root;
135 struct rb_node *next;
137 down_write(&env->bpf_progs.lock);
139 root = &env->bpf_progs.infos;
140 next = rb_first(root);
143 struct bpf_prog_info_node *node;
145 node = rb_entry(next, struct bpf_prog_info_node, rb_node);
146 next = rb_next(&node->rb_node);
147 rb_erase(&node->rb_node, root);
148 free(node->info_linear);
152 env->bpf_progs.infos_cnt = 0;
154 root = &env->bpf_progs.btfs;
155 next = rb_first(root);
158 struct btf_node *node;
160 node = rb_entry(next, struct btf_node, rb_node);
161 next = rb_next(&node->rb_node);
162 rb_erase(&node->rb_node, root);
166 env->bpf_progs.btfs_cnt = 0;
168 up_write(&env->bpf_progs.lock);
170 #else // HAVE_LIBBPF_SUPPORT
171 static void perf_env__purge_bpf(struct perf_env *env __maybe_unused)
174 #endif // HAVE_LIBBPF_SUPPORT
176 void perf_env__exit(struct perf_env *env)
180 perf_env__purge_bpf(env);
181 perf_env__purge_cgroups(env);
182 zfree(&env->hostname);
183 zfree(&env->os_release);
184 zfree(&env->version);
186 zfree(&env->cpu_desc);
188 zfree(&env->cmdline);
189 zfree(&env->cmdline_argv);
190 zfree(&env->sibling_dies);
191 zfree(&env->sibling_cores);
192 zfree(&env->sibling_threads);
193 zfree(&env->pmu_mappings);
195 zfree(&env->cpu_pmu_caps);
196 zfree(&env->numa_map);
198 for (i = 0; i < env->nr_numa_nodes; i++)
199 perf_cpu_map__put(env->numa_nodes[i].map);
200 zfree(&env->numa_nodes);
202 for (i = 0; i < env->caches_cnt; i++)
203 cpu_cache_level__free(&env->caches[i]);
206 for (i = 0; i < env->nr_memory_nodes; i++)
207 zfree(&env->memory_nodes[i].set);
208 zfree(&env->memory_nodes);
210 for (i = 0; i < env->nr_hybrid_nodes; i++) {
211 zfree(&env->hybrid_nodes[i].pmu_name);
212 zfree(&env->hybrid_nodes[i].cpus);
214 zfree(&env->hybrid_nodes);
216 for (i = 0; i < env->nr_hybrid_cpc_nodes; i++) {
217 zfree(&env->hybrid_cpc_nodes[i].cpu_pmu_caps);
218 zfree(&env->hybrid_cpc_nodes[i].pmu_name);
220 zfree(&env->hybrid_cpc_nodes);
223 void perf_env__init(struct perf_env *env)
225 #ifdef HAVE_LIBBPF_SUPPORT
226 env->bpf_progs.infos = RB_ROOT;
227 env->bpf_progs.btfs = RB_ROOT;
228 init_rwsem(&env->bpf_progs.lock);
230 env->kernel_is_64_bit = -1;
233 static void perf_env__init_kernel_mode(struct perf_env *env)
235 const char *arch = perf_env__raw_arch(env);
237 if (!strncmp(arch, "x86_64", 6) || !strncmp(arch, "aarch64", 7) ||
238 !strncmp(arch, "arm64", 5) || !strncmp(arch, "mips64", 6) ||
239 !strncmp(arch, "parisc64", 8) || !strncmp(arch, "riscv64", 7) ||
240 !strncmp(arch, "s390x", 5) || !strncmp(arch, "sparc64", 7))
241 env->kernel_is_64_bit = 1;
243 env->kernel_is_64_bit = 0;
246 int perf_env__kernel_is_64_bit(struct perf_env *env)
248 if (env->kernel_is_64_bit == -1)
249 perf_env__init_kernel_mode(env);
251 return env->kernel_is_64_bit;
254 int perf_env__set_cmdline(struct perf_env *env, int argc, const char *argv[])
258 /* do not include NULL termination */
259 env->cmdline_argv = calloc(argc, sizeof(char *));
260 if (env->cmdline_argv == NULL)
264 * Must copy argv contents because it gets moved around during option
267 for (i = 0; i < argc ; i++) {
268 env->cmdline_argv[i] = argv[i];
269 if (env->cmdline_argv[i] == NULL)
273 env->nr_cmdline = argc;
277 zfree(&env->cmdline_argv);
282 int perf_env__read_cpu_topology_map(struct perf_env *env)
286 if (env->cpu != NULL)
289 if (env->nr_cpus_avail == 0)
290 env->nr_cpus_avail = cpu__max_present_cpu();
292 nr_cpus = env->nr_cpus_avail;
296 env->cpu = calloc(nr_cpus, sizeof(env->cpu[0]));
297 if (env->cpu == NULL)
300 for (cpu = 0; cpu < nr_cpus; ++cpu) {
301 env->cpu[cpu].core_id = cpu_map__get_core_id(cpu);
302 env->cpu[cpu].socket_id = cpu_map__get_socket_id(cpu);
303 env->cpu[cpu].die_id = cpu_map__get_die_id(cpu);
306 env->nr_cpus_avail = nr_cpus;
310 int perf_env__read_pmu_mappings(struct perf_env *env)
312 struct perf_pmu *pmu = NULL;
316 while ((pmu = perf_pmu__scan(pmu))) {
322 pr_debug("pmu mappings not available\n");
325 env->nr_pmu_mappings = pmu_num;
327 if (strbuf_init(&sb, 128 * pmu_num) < 0)
330 while ((pmu = perf_pmu__scan(pmu))) {
333 if (strbuf_addf(&sb, "%u:%s", pmu->type, pmu->name) < 0)
335 /* include a NULL character at the end */
336 if (strbuf_add(&sb, "", 1) < 0)
340 env->pmu_mappings = strbuf_detach(&sb, NULL);
349 int perf_env__read_cpuid(struct perf_env *env)
352 int err = get_cpuid(cpuid, sizeof(cpuid));
358 env->cpuid = strdup(cpuid);
359 if (env->cpuid == NULL)
364 static int perf_env__read_arch(struct perf_env *env)
372 env->arch = strdup(uts.machine);
374 return env->arch ? 0 : -ENOMEM;
377 static int perf_env__read_nr_cpus_avail(struct perf_env *env)
379 if (env->nr_cpus_avail == 0)
380 env->nr_cpus_avail = cpu__max_present_cpu();
382 return env->nr_cpus_avail ? 0 : -ENOENT;
385 const char *perf_env__raw_arch(struct perf_env *env)
387 return env && !perf_env__read_arch(env) ? env->arch : "unknown";
390 int perf_env__nr_cpus_avail(struct perf_env *env)
392 return env && !perf_env__read_nr_cpus_avail(env) ? env->nr_cpus_avail : 0;
395 void cpu_cache_level__free(struct cpu_cache_level *cache)
403 * Return architecture name in a normalized form.
404 * The conversion logic comes from the Makefile.
406 static const char *normalize_arch(char *arch)
408 if (!strcmp(arch, "x86_64"))
410 if (arch[0] == 'i' && arch[2] == '8' && arch[3] == '6')
412 if (!strcmp(arch, "sun4u") || !strncmp(arch, "sparc", 5))
414 if (!strncmp(arch, "aarch64", 7) || !strncmp(arch, "arm64", 5))
416 if (!strncmp(arch, "arm", 3) || !strcmp(arch, "sa110"))
418 if (!strncmp(arch, "s390", 4))
420 if (!strncmp(arch, "parisc", 6))
422 if (!strncmp(arch, "powerpc", 7) || !strncmp(arch, "ppc", 3))
424 if (!strncmp(arch, "mips", 4))
426 if (!strncmp(arch, "sh", 2) && isdigit(arch[2]))
432 const char *perf_env__arch(struct perf_env *env)
436 if (!env || !env->arch) { /* Assume local operation */
437 static struct utsname uts = { .machine[0] = '\0', };
438 if (uts.machine[0] == '\0' && uname(&uts) < 0)
440 arch_name = uts.machine;
442 arch_name = env->arch;
444 return normalize_arch(arch_name);
447 const char *perf_env__cpuid(struct perf_env *env)
451 if (!env || !env->cpuid) { /* Assume local operation */
452 status = perf_env__read_cpuid(env);
460 int perf_env__nr_pmu_mappings(struct perf_env *env)
464 if (!env || !env->nr_pmu_mappings) { /* Assume local operation */
465 status = perf_env__read_pmu_mappings(env);
470 return env->nr_pmu_mappings;
473 const char *perf_env__pmu_mappings(struct perf_env *env)
477 if (!env || !env->pmu_mappings) { /* Assume local operation */
478 status = perf_env__read_pmu_mappings(env);
483 return env->pmu_mappings;
486 int perf_env__numa_node(struct perf_env *env, int cpu)
488 if (!env->nr_numa_map) {
489 struct numa_node *nn;
492 for (i = 0; i < env->nr_numa_nodes; i++) {
493 nn = &env->numa_nodes[i];
494 nr = max(nr, perf_cpu_map__max(nn->map));
500 * We initialize the numa_map array to prepare
501 * it for missing cpus, which return node -1
503 env->numa_map = malloc(nr * sizeof(int));
507 for (i = 0; i < nr; i++)
508 env->numa_map[i] = -1;
510 env->nr_numa_map = nr;
512 for (i = 0; i < env->nr_numa_nodes; i++) {
515 nn = &env->numa_nodes[i];
516 perf_cpu_map__for_each_cpu(j, tmp, nn->map)
517 env->numa_map[j] = i;
521 return cpu >= 0 && cpu < env->nr_numa_map ? env->numa_map[cpu] : -1;