1 /* SPDX-License-Identifier: GPL-2.0 */
5 #include <linux/types.h>
6 #include <linux/rbtree.h>
11 struct cpu_topology_map {
17 struct cpu_cache_level {
31 struct perf_cpu_map *map;
45 struct hybrid_cpc_node {
47 unsigned int max_branches;
61 unsigned long long total_mem;
62 unsigned int msr_pmu_type;
63 unsigned int max_branches;
69 int nr_sibling_threads;
76 int nr_hybrid_cpc_nodes;
78 const char **cmdline_argv;
81 char *sibling_threads;
84 struct cpu_topology_map *cpu;
85 struct cpu_cache_level *caches;
92 struct numa_node *numa_nodes;
93 struct memory_node *memory_nodes;
94 unsigned long long memory_bsize;
95 struct hybrid_node *hybrid_nodes;
96 struct hybrid_cpc_node *hybrid_cpc_nodes;
97 #ifdef HAVE_LIBBPF_SUPPORT
99 * bpf_info_lock protects bpf rbtrees. This is needed because the
100 * trees are accessed by different threads in perf-top
103 struct rw_semaphore lock;
104 struct rb_root infos;
109 #endif // HAVE_LIBBPF_SUPPORT
110 /* same reason as above (for perf-top) */
112 struct rw_semaphore lock;
116 /* For fast cpu to numa node lookup via perf_env__numa_node */
120 /* For real clock time reference. */
127 * enabled is valid for report mode, and is true if above
128 * values are set, it's set in process_clock_data
134 enum perf_compress_type {
140 struct bpf_prog_info_node;
143 extern struct perf_env perf_env;
145 void perf_env__exit(struct perf_env *env);
147 int perf_env__kernel_is_64_bit(struct perf_env *env);
149 int perf_env__set_cmdline(struct perf_env *env, int argc, const char *argv[]);
151 int perf_env__read_cpuid(struct perf_env *env);
152 int perf_env__read_pmu_mappings(struct perf_env *env);
153 int perf_env__nr_pmu_mappings(struct perf_env *env);
154 const char *perf_env__pmu_mappings(struct perf_env *env);
156 int perf_env__read_cpu_topology_map(struct perf_env *env);
158 void cpu_cache_level__free(struct cpu_cache_level *cache);
160 const char *perf_env__arch(struct perf_env *env);
161 const char *perf_env__cpuid(struct perf_env *env);
162 const char *perf_env__raw_arch(struct perf_env *env);
163 int perf_env__nr_cpus_avail(struct perf_env *env);
165 void perf_env__init(struct perf_env *env);
166 void perf_env__insert_bpf_prog_info(struct perf_env *env,
167 struct bpf_prog_info_node *info_node);
168 struct bpf_prog_info_node *perf_env__find_bpf_prog_info(struct perf_env *env,
170 void perf_env__insert_btf(struct perf_env *env, struct btf_node *btf_node);
171 struct btf_node *perf_env__find_btf(struct perf_env *env, __u32 btf_id);
173 int perf_env__numa_node(struct perf_env *env, int cpu);
174 #endif /* __PERF_ENV_H */