1 // SPDX-License-Identifier: GPL-2.0
10 #include <linux/bitmap.h>
13 #include <linux/ctype.h>
14 #include <linux/zalloc.h>
16 static struct perf_cpu max_cpu_num;
17 static struct perf_cpu max_present_cpu_num;
18 static int max_node_num;
20 * The numa node X as read from /sys/devices/system/node/nodeX indexed by the
23 static int *cpunode_map;
25 bool perf_record_cpu_map_data__test_bit(int i,
26 const struct perf_record_cpu_map_data *data)
28 int bit_word32 = i / 32;
29 __u32 bit_mask32 = 1U << (i & 31);
30 int bit_word64 = i / 64;
31 __u64 bit_mask64 = ((__u64)1) << (i & 63);
33 return (data->mask32_data.long_size == 4)
34 ? (bit_word32 < data->mask32_data.nr) &&
35 (data->mask32_data.mask[bit_word32] & bit_mask32) != 0
36 : (bit_word64 < data->mask64_data.nr) &&
37 (data->mask64_data.mask[bit_word64] & bit_mask64) != 0;
40 /* Read ith mask value from data into the given 64-bit sized bitmap */
41 static void perf_record_cpu_map_data__read_one_mask(const struct perf_record_cpu_map_data *data,
42 int i, unsigned long *bitmap)
44 #if __SIZEOF_LONG__ == 8
45 if (data->mask32_data.long_size == 4)
46 bitmap[0] = data->mask32_data.mask[i];
48 bitmap[0] = data->mask64_data.mask[i];
50 if (data->mask32_data.long_size == 4) {
51 bitmap[0] = data->mask32_data.mask[i];
54 #if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
55 bitmap[0] = (unsigned long)(data->mask64_data.mask[i] >> 32);
56 bitmap[1] = (unsigned long)data->mask64_data.mask[i];
58 bitmap[0] = (unsigned long)data->mask64_data.mask[i];
59 bitmap[1] = (unsigned long)(data->mask64_data.mask[i] >> 32);
64 static struct perf_cpu_map *cpu_map__from_entries(const struct perf_record_cpu_map_data *data)
66 struct perf_cpu_map *map;
68 map = perf_cpu_map__empty_new(data->cpus_data.nr);
72 for (i = 0; i < data->cpus_data.nr; i++) {
74 * Special treatment for -1, which is not real cpu number,
75 * and we need to use (int) -1 to initialize map[i],
76 * otherwise it would become 65535.
78 if (data->cpus_data.cpu[i] == (u16) -1)
81 map->map[i].cpu = (int) data->cpus_data.cpu[i];
88 static struct perf_cpu_map *cpu_map__from_mask(const struct perf_record_cpu_map_data *data)
90 DECLARE_BITMAP(local_copy, 64);
91 int weight = 0, mask_nr = data->mask32_data.nr;
92 struct perf_cpu_map *map;
94 for (int i = 0; i < mask_nr; i++) {
95 perf_record_cpu_map_data__read_one_mask(data, i, local_copy);
96 weight += bitmap_weight(local_copy, 64);
99 map = perf_cpu_map__empty_new(weight);
103 for (int i = 0, j = 0; i < mask_nr; i++) {
104 int cpus_per_i = (i * data->mask32_data.long_size * BITS_PER_BYTE);
107 perf_record_cpu_map_data__read_one_mask(data, i, local_copy);
108 for_each_set_bit(cpu, local_copy, 64)
109 map->map[j++].cpu = cpu + cpus_per_i;
115 struct perf_cpu_map *cpu_map__new_data(const struct perf_record_cpu_map_data *data)
117 if (data->type == PERF_CPU_MAP__CPUS)
118 return cpu_map__from_entries(data);
120 return cpu_map__from_mask(data);
123 size_t cpu_map__fprintf(struct perf_cpu_map *map, FILE *fp)
128 cpu_map__snprint(map, buf, sizeof(buf));
129 return fprintf(fp, "%s\n", buf);
133 struct perf_cpu_map *perf_cpu_map__empty_new(int nr)
135 struct perf_cpu_map *cpus = malloc(sizeof(*cpus) + sizeof(int) * nr);
141 for (i = 0; i < nr; i++)
142 cpus->map[i].cpu = -1;
144 refcount_set(&cpus->refcnt, 1);
150 struct cpu_aggr_map *cpu_aggr_map__empty_new(int nr)
152 struct cpu_aggr_map *cpus = malloc(sizeof(*cpus) + sizeof(struct aggr_cpu_id) * nr);
158 for (i = 0; i < nr; i++)
159 cpus->map[i] = aggr_cpu_id__empty();
161 refcount_set(&cpus->refcnt, 1);
167 static int cpu__get_topology_int(int cpu, const char *name, int *value)
171 snprintf(path, PATH_MAX,
172 "devices/system/cpu/cpu%d/topology/%s", cpu, name);
174 return sysfs__read_int(path, value);
177 int cpu__get_socket_id(struct perf_cpu cpu)
179 int value, ret = cpu__get_topology_int(cpu.cpu, "physical_package_id", &value);
183 struct aggr_cpu_id aggr_cpu_id__socket(struct perf_cpu cpu, void *data __maybe_unused)
185 struct aggr_cpu_id id = aggr_cpu_id__empty();
187 id.socket = cpu__get_socket_id(cpu);
191 static int aggr_cpu_id__cmp(const void *a_pointer, const void *b_pointer)
193 struct aggr_cpu_id *a = (struct aggr_cpu_id *)a_pointer;
194 struct aggr_cpu_id *b = (struct aggr_cpu_id *)b_pointer;
196 if (a->node != b->node)
197 return a->node - b->node;
198 else if (a->socket != b->socket)
199 return a->socket - b->socket;
200 else if (a->die != b->die)
201 return a->die - b->die;
202 else if (a->core != b->core)
203 return a->core - b->core;
205 return a->thread - b->thread;
208 struct cpu_aggr_map *cpu_aggr_map__new(const struct perf_cpu_map *cpus,
209 aggr_cpu_id_get_t get_id,
214 struct cpu_aggr_map *c = cpu_aggr_map__empty_new(cpus->nr);
219 /* Reset size as it may only be partially filled */
222 perf_cpu_map__for_each_cpu(cpu, idx, cpus) {
223 bool duplicate = false;
224 struct aggr_cpu_id cpu_id = get_id(cpu, data);
226 for (int j = 0; j < c->nr; j++) {
227 if (aggr_cpu_id__equal(&cpu_id, &c->map[j])) {
233 c->map[c->nr] = cpu_id;
238 if (c->nr != cpus->nr) {
239 struct cpu_aggr_map *trimmed_c =
241 sizeof(struct cpu_aggr_map) + sizeof(struct aggr_cpu_id) * c->nr);
246 /* ensure we process id in increasing order */
247 qsort(c->map, c->nr, sizeof(struct aggr_cpu_id), aggr_cpu_id__cmp);
253 int cpu__get_die_id(struct perf_cpu cpu)
255 int value, ret = cpu__get_topology_int(cpu.cpu, "die_id", &value);
260 struct aggr_cpu_id aggr_cpu_id__die(struct perf_cpu cpu, void *data)
262 struct aggr_cpu_id id;
265 die = cpu__get_die_id(cpu);
266 /* There is no die_id on legacy system. */
271 * die_id is relative to socket, so start
272 * with the socket ID and then add die to
275 id = aggr_cpu_id__socket(cpu, data);
276 if (aggr_cpu_id__is_empty(&id))
283 int cpu__get_core_id(struct perf_cpu cpu)
285 int value, ret = cpu__get_topology_int(cpu.cpu, "core_id", &value);
289 struct aggr_cpu_id aggr_cpu_id__core(struct perf_cpu cpu, void *data)
291 struct aggr_cpu_id id;
292 int core = cpu__get_core_id(cpu);
294 /* aggr_cpu_id__die returns a struct with socket and die set. */
295 id = aggr_cpu_id__die(cpu, data);
296 if (aggr_cpu_id__is_empty(&id))
300 * core_id is relative to socket and die, we need a global id.
301 * So we combine the result from cpu_map__get_die with the core id
308 struct aggr_cpu_id aggr_cpu_id__cpu(struct perf_cpu cpu, void *data)
310 struct aggr_cpu_id id;
312 /* aggr_cpu_id__core returns a struct with socket, die and core set. */
313 id = aggr_cpu_id__core(cpu, data);
314 if (aggr_cpu_id__is_empty(&id))
322 struct aggr_cpu_id aggr_cpu_id__node(struct perf_cpu cpu, void *data __maybe_unused)
324 struct aggr_cpu_id id = aggr_cpu_id__empty();
326 id.node = cpu__get_node(cpu);
330 /* setup simple routines to easily access node numbers given a cpu number */
331 static int get_max_num(char *path, int *max)
337 if (filename__read_str(path, &buf, &num))
342 /* start on the right, to find highest node num */
344 if ((buf[num] == ',') || (buf[num] == '-')) {
349 if (sscanf(&buf[num], "%d", max) < 1) {
354 /* convert from 0-based to 1-based */
362 /* Determine highest possible cpu in the system for sparse allocation */
363 static void set_max_cpu_num(void)
370 max_cpu_num.cpu = 4096;
371 max_present_cpu_num.cpu = 4096;
373 mnt = sysfs__mountpoint();
377 /* get the highest possible cpu number for a sparse allocation */
378 ret = snprintf(path, PATH_MAX, "%s/devices/system/cpu/possible", mnt);
379 if (ret >= PATH_MAX) {
380 pr_err("sysfs path crossed PATH_MAX(%d) size\n", PATH_MAX);
384 ret = get_max_num(path, &max_cpu_num.cpu);
388 /* get the highest present cpu number for a sparse allocation */
389 ret = snprintf(path, PATH_MAX, "%s/devices/system/cpu/present", mnt);
390 if (ret >= PATH_MAX) {
391 pr_err("sysfs path crossed PATH_MAX(%d) size\n", PATH_MAX);
395 ret = get_max_num(path, &max_present_cpu_num.cpu);
399 pr_err("Failed to read max cpus, using default of %d\n", max_cpu_num.cpu);
402 /* Determine highest possible node in the system for sparse allocation */
403 static void set_max_node_num(void)
412 mnt = sysfs__mountpoint();
416 /* get the highest possible cpu number for a sparse allocation */
417 ret = snprintf(path, PATH_MAX, "%s/devices/system/node/possible", mnt);
418 if (ret >= PATH_MAX) {
419 pr_err("sysfs path crossed PATH_MAX(%d) size\n", PATH_MAX);
423 ret = get_max_num(path, &max_node_num);
427 pr_err("Failed to read max nodes, using default of %d\n", max_node_num);
430 int cpu__max_node(void)
432 if (unlikely(!max_node_num))
438 struct perf_cpu cpu__max_cpu(void)
440 if (unlikely(!max_cpu_num.cpu))
446 struct perf_cpu cpu__max_present_cpu(void)
448 if (unlikely(!max_present_cpu_num.cpu))
451 return max_present_cpu_num;
455 int cpu__get_node(struct perf_cpu cpu)
457 if (unlikely(cpunode_map == NULL)) {
458 pr_debug("cpu_map not initialized\n");
462 return cpunode_map[cpu.cpu];
465 static int init_cpunode_map(void)
472 cpunode_map = calloc(max_cpu_num.cpu, sizeof(int));
474 pr_err("%s: calloc failed\n", __func__);
478 for (i = 0; i < max_cpu_num.cpu; i++)
484 int cpu__setup_cpunode_map(void)
486 struct dirent *dent1, *dent2;
488 unsigned int cpu, mem;
494 /* initialize globals */
495 if (init_cpunode_map())
498 mnt = sysfs__mountpoint();
502 n = snprintf(path, PATH_MAX, "%s/devices/system/node", mnt);
504 pr_err("sysfs path crossed PATH_MAX(%d) size\n", PATH_MAX);
508 dir1 = opendir(path);
512 /* walk tree and setup map */
513 while ((dent1 = readdir(dir1)) != NULL) {
514 if (dent1->d_type != DT_DIR || sscanf(dent1->d_name, "node%u", &mem) < 1)
517 n = snprintf(buf, PATH_MAX, "%s/%s", path, dent1->d_name);
519 pr_err("sysfs path crossed PATH_MAX(%d) size\n", PATH_MAX);
526 while ((dent2 = readdir(dir2)) != NULL) {
527 if (dent2->d_type != DT_LNK || sscanf(dent2->d_name, "cpu%u", &cpu) < 1)
529 cpunode_map[cpu] = mem;
537 size_t cpu_map__snprint(struct perf_cpu_map *map, char *buf, size_t size)
543 #define COMMA first ? "" : ","
545 for (i = 0; i < map->nr + 1; i++) {
546 struct perf_cpu cpu = { .cpu = INT_MAX };
547 bool last = i == map->nr;
555 ret += snprintf(buf + ret, size - ret,
559 } else if (((i - start) != (cpu.cpu - map->map[start].cpu)) || last) {
563 ret += snprintf(buf + ret, size - ret,
565 map->map[start].cpu);
567 ret += snprintf(buf + ret, size - ret,
569 map->map[start].cpu, map->map[end].cpu);
578 pr_debug2("cpumask list: %s\n", buf);
582 static char hex_char(unsigned char val)
587 return val - 10 + 'a';
591 size_t cpu_map__snprint_mask(struct perf_cpu_map *map, char *buf, size_t size)
595 unsigned char *bitmap;
596 struct perf_cpu last_cpu = perf_cpu_map__cpu(map, map->nr - 1);
601 bitmap = zalloc(last_cpu.cpu / 8 + 1);
602 if (bitmap == NULL) {
607 for (i = 0; i < map->nr; i++) {
608 cpu = perf_cpu_map__cpu(map, i).cpu;
609 bitmap[cpu / 8] |= 1 << (cpu % 8);
612 for (cpu = last_cpu.cpu / 4 * 4; cpu >= 0; cpu -= 4) {
613 unsigned char bits = bitmap[cpu / 8];
620 *ptr++ = hex_char(bits);
621 if ((cpu % 32) == 0 && cpu > 0)
627 buf[size - 1] = '\0';
631 const struct perf_cpu_map *cpu_map__online(void) /* thread unsafe */
633 static const struct perf_cpu_map *online = NULL;
636 online = perf_cpu_map__new(NULL); /* from /sys/devices/system/cpu/online */
641 bool aggr_cpu_id__equal(const struct aggr_cpu_id *a, const struct aggr_cpu_id *b)
643 return a->thread == b->thread &&
644 a->node == b->node &&
645 a->socket == b->socket &&
647 a->core == b->core &&
648 a->cpu.cpu == b->cpu.cpu;
651 bool aggr_cpu_id__is_empty(const struct aggr_cpu_id *a)
653 return a->thread == -1 &&
661 struct aggr_cpu_id aggr_cpu_id__empty(void)
663 struct aggr_cpu_id ret = {
669 .cpu = (struct perf_cpu){ .cpu = -1 },