1 // SPDX-License-Identifier: GPL-2.0
5 #include "util/header.h"
6 #include <linux/ctype.h>
7 #include <linux/zalloc.h>
11 #include <sys/utsname.h>
12 #include <bpf/libbpf.h>
16 struct perf_env perf_env;
18 void perf_env__insert_bpf_prog_info(struct perf_env *env,
19 struct bpf_prog_info_node *info_node)
21 __u32 prog_id = info_node->info_linear->info.id;
22 struct bpf_prog_info_node *node;
23 struct rb_node *parent = NULL;
26 down_write(&env->bpf_progs.lock);
27 p = &env->bpf_progs.infos.rb_node;
31 node = rb_entry(parent, struct bpf_prog_info_node, rb_node);
32 if (prog_id < node->info_linear->info.id) {
34 } else if (prog_id > node->info_linear->info.id) {
37 pr_debug("duplicated bpf prog info %u\n", prog_id);
42 rb_link_node(&info_node->rb_node, parent, p);
43 rb_insert_color(&info_node->rb_node, &env->bpf_progs.infos);
44 env->bpf_progs.infos_cnt++;
46 up_write(&env->bpf_progs.lock);
49 struct bpf_prog_info_node *perf_env__find_bpf_prog_info(struct perf_env *env,
52 struct bpf_prog_info_node *node = NULL;
55 down_read(&env->bpf_progs.lock);
56 n = env->bpf_progs.infos.rb_node;
59 node = rb_entry(n, struct bpf_prog_info_node, rb_node);
60 if (prog_id < node->info_linear->info.id)
62 else if (prog_id > node->info_linear->info.id)
70 up_read(&env->bpf_progs.lock);
74 void perf_env__insert_btf(struct perf_env *env, struct btf_node *btf_node)
76 struct rb_node *parent = NULL;
77 __u32 btf_id = btf_node->id;
78 struct btf_node *node;
81 down_write(&env->bpf_progs.lock);
82 p = &env->bpf_progs.btfs.rb_node;
86 node = rb_entry(parent, struct btf_node, rb_node);
87 if (btf_id < node->id) {
89 } else if (btf_id > node->id) {
92 pr_debug("duplicated btf %u\n", btf_id);
97 rb_link_node(&btf_node->rb_node, parent, p);
98 rb_insert_color(&btf_node->rb_node, &env->bpf_progs.btfs);
99 env->bpf_progs.btfs_cnt++;
101 up_write(&env->bpf_progs.lock);
104 struct btf_node *perf_env__find_btf(struct perf_env *env, __u32 btf_id)
106 struct btf_node *node = NULL;
109 down_read(&env->bpf_progs.lock);
110 n = env->bpf_progs.btfs.rb_node;
113 node = rb_entry(n, struct btf_node, rb_node);
114 if (btf_id < node->id)
116 else if (btf_id > node->id)
124 up_read(&env->bpf_progs.lock);
128 /* purge data in bpf_progs.infos tree */
129 static void perf_env__purge_bpf(struct perf_env *env)
131 struct rb_root *root;
132 struct rb_node *next;
134 down_write(&env->bpf_progs.lock);
136 root = &env->bpf_progs.infos;
137 next = rb_first(root);
140 struct bpf_prog_info_node *node;
142 node = rb_entry(next, struct bpf_prog_info_node, rb_node);
143 next = rb_next(&node->rb_node);
144 rb_erase(&node->rb_node, root);
148 env->bpf_progs.infos_cnt = 0;
150 root = &env->bpf_progs.btfs;
151 next = rb_first(root);
154 struct btf_node *node;
156 node = rb_entry(next, struct btf_node, rb_node);
157 next = rb_next(&node->rb_node);
158 rb_erase(&node->rb_node, root);
162 env->bpf_progs.btfs_cnt = 0;
164 up_write(&env->bpf_progs.lock);
167 void perf_env__exit(struct perf_env *env)
171 perf_env__purge_bpf(env);
172 perf_env__purge_cgroups(env);
173 zfree(&env->hostname);
174 zfree(&env->os_release);
175 zfree(&env->version);
177 zfree(&env->cpu_desc);
179 zfree(&env->cmdline);
180 zfree(&env->cmdline_argv);
181 zfree(&env->sibling_cores);
182 zfree(&env->sibling_threads);
183 zfree(&env->pmu_mappings);
185 zfree(&env->numa_map);
187 for (i = 0; i < env->nr_numa_nodes; i++)
188 perf_cpu_map__put(env->numa_nodes[i].map);
189 zfree(&env->numa_nodes);
191 for (i = 0; i < env->caches_cnt; i++)
192 cpu_cache_level__free(&env->caches[i]);
195 for (i = 0; i < env->nr_memory_nodes; i++)
196 zfree(&env->memory_nodes[i].set);
197 zfree(&env->memory_nodes);
200 void perf_env__init(struct perf_env *env)
202 env->bpf_progs.infos = RB_ROOT;
203 env->bpf_progs.btfs = RB_ROOT;
204 init_rwsem(&env->bpf_progs.lock);
207 int perf_env__set_cmdline(struct perf_env *env, int argc, const char *argv[])
211 /* do not include NULL termination */
212 env->cmdline_argv = calloc(argc, sizeof(char *));
213 if (env->cmdline_argv == NULL)
217 * Must copy argv contents because it gets moved around during option
220 for (i = 0; i < argc ; i++) {
221 env->cmdline_argv[i] = argv[i];
222 if (env->cmdline_argv[i] == NULL)
226 env->nr_cmdline = argc;
230 zfree(&env->cmdline_argv);
235 int perf_env__read_cpu_topology_map(struct perf_env *env)
239 if (env->cpu != NULL)
242 if (env->nr_cpus_avail == 0)
243 env->nr_cpus_avail = cpu__max_present_cpu();
245 nr_cpus = env->nr_cpus_avail;
249 env->cpu = calloc(nr_cpus, sizeof(env->cpu[0]));
250 if (env->cpu == NULL)
253 for (cpu = 0; cpu < nr_cpus; ++cpu) {
254 env->cpu[cpu].core_id = cpu_map__get_core_id(cpu);
255 env->cpu[cpu].socket_id = cpu_map__get_socket_id(cpu);
256 env->cpu[cpu].die_id = cpu_map__get_die_id(cpu);
259 env->nr_cpus_avail = nr_cpus;
263 int perf_env__read_cpuid(struct perf_env *env)
266 int err = get_cpuid(cpuid, sizeof(cpuid));
272 env->cpuid = strdup(cpuid);
273 if (env->cpuid == NULL)
278 static int perf_env__read_arch(struct perf_env *env)
286 env->arch = strdup(uts.machine);
288 return env->arch ? 0 : -ENOMEM;
291 static int perf_env__read_nr_cpus_avail(struct perf_env *env)
293 if (env->nr_cpus_avail == 0)
294 env->nr_cpus_avail = cpu__max_present_cpu();
296 return env->nr_cpus_avail ? 0 : -ENOENT;
299 const char *perf_env__raw_arch(struct perf_env *env)
301 return env && !perf_env__read_arch(env) ? env->arch : "unknown";
304 int perf_env__nr_cpus_avail(struct perf_env *env)
306 return env && !perf_env__read_nr_cpus_avail(env) ? env->nr_cpus_avail : 0;
309 void cpu_cache_level__free(struct cpu_cache_level *cache)
317 * Return architecture name in a normalized form.
318 * The conversion logic comes from the Makefile.
320 static const char *normalize_arch(char *arch)
322 if (!strcmp(arch, "x86_64"))
324 if (arch[0] == 'i' && arch[2] == '8' && arch[3] == '6')
326 if (!strcmp(arch, "sun4u") || !strncmp(arch, "sparc", 5))
328 if (!strcmp(arch, "aarch64") || !strcmp(arch, "arm64"))
330 if (!strncmp(arch, "arm", 3) || !strcmp(arch, "sa110"))
332 if (!strncmp(arch, "s390", 4))
334 if (!strncmp(arch, "parisc", 6))
336 if (!strncmp(arch, "powerpc", 7) || !strncmp(arch, "ppc", 3))
338 if (!strncmp(arch, "mips", 4))
340 if (!strncmp(arch, "sh", 2) && isdigit(arch[2]))
346 const char *perf_env__arch(struct perf_env *env)
350 if (!env || !env->arch) { /* Assume local operation */
351 static struct utsname uts = { .machine[0] = '\0', };
352 if (uts.machine[0] == '\0' && uname(&uts) < 0)
354 arch_name = uts.machine;
356 arch_name = env->arch;
358 return normalize_arch(arch_name);
362 int perf_env__numa_node(struct perf_env *env, int cpu)
364 if (!env->nr_numa_map) {
365 struct numa_node *nn;
368 for (i = 0; i < env->nr_numa_nodes; i++) {
369 nn = &env->numa_nodes[i];
370 nr = max(nr, perf_cpu_map__max(nn->map));
376 * We initialize the numa_map array to prepare
377 * it for missing cpus, which return node -1
379 env->numa_map = malloc(nr * sizeof(int));
383 for (i = 0; i < nr; i++)
384 env->numa_map[i] = -1;
386 env->nr_numa_map = nr;
388 for (i = 0; i < env->nr_numa_nodes; i++) {
391 nn = &env->numa_nodes[i];
392 perf_cpu_map__for_each_cpu(j, tmp, nn->map)
393 env->numa_map[j] = i;
397 return cpu >= 0 && cpu < env->nr_numa_map ? env->numa_map[cpu] : -1;