1 // SPDX-License-Identifier: GPL-2.0
10 #include <linux/bitmap.h>
13 #include <linux/ctype.h>
14 #include <linux/zalloc.h>
15 #include <internal/cpumap.h>
17 static struct perf_cpu max_cpu_num;
18 static struct perf_cpu max_present_cpu_num;
19 static int max_node_num;
21 * The numa node X as read from /sys/devices/system/node/nodeX indexed by the
24 static int *cpunode_map;
26 bool perf_record_cpu_map_data__test_bit(int i,
27 const struct perf_record_cpu_map_data *data)
29 int bit_word32 = i / 32;
30 __u32 bit_mask32 = 1U << (i & 31);
31 int bit_word64 = i / 64;
32 __u64 bit_mask64 = ((__u64)1) << (i & 63);
34 return (data->mask32_data.long_size == 4)
35 ? (bit_word32 < data->mask32_data.nr) &&
36 (data->mask32_data.mask[bit_word32] & bit_mask32) != 0
37 : (bit_word64 < data->mask64_data.nr) &&
38 (data->mask64_data.mask[bit_word64] & bit_mask64) != 0;
41 /* Read ith mask value from data into the given 64-bit sized bitmap */
42 static void perf_record_cpu_map_data__read_one_mask(const struct perf_record_cpu_map_data *data,
43 int i, unsigned long *bitmap)
45 #if __SIZEOF_LONG__ == 8
46 if (data->mask32_data.long_size == 4)
47 bitmap[0] = data->mask32_data.mask[i];
49 bitmap[0] = data->mask64_data.mask[i];
51 if (data->mask32_data.long_size == 4) {
52 bitmap[0] = data->mask32_data.mask[i];
55 #if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
56 bitmap[0] = (unsigned long)(data->mask64_data.mask[i] >> 32);
57 bitmap[1] = (unsigned long)data->mask64_data.mask[i];
59 bitmap[0] = (unsigned long)data->mask64_data.mask[i];
60 bitmap[1] = (unsigned long)(data->mask64_data.mask[i] >> 32);
65 static struct perf_cpu_map *cpu_map__from_entries(const struct perf_record_cpu_map_data *data)
67 struct perf_cpu_map *map;
69 map = perf_cpu_map__empty_new(data->cpus_data.nr);
73 for (i = 0; i < data->cpus_data.nr; i++) {
75 * Special treatment for -1, which is not real cpu number,
76 * and we need to use (int) -1 to initialize map[i],
77 * otherwise it would become 65535.
79 if (data->cpus_data.cpu[i] == (u16) -1)
80 RC_CHK_ACCESS(map)->map[i].cpu = -1;
82 RC_CHK_ACCESS(map)->map[i].cpu = (int) data->cpus_data.cpu[i];
89 static struct perf_cpu_map *cpu_map__from_mask(const struct perf_record_cpu_map_data *data)
91 DECLARE_BITMAP(local_copy, 64);
92 int weight = 0, mask_nr = data->mask32_data.nr;
93 struct perf_cpu_map *map;
95 for (int i = 0; i < mask_nr; i++) {
96 perf_record_cpu_map_data__read_one_mask(data, i, local_copy);
97 weight += bitmap_weight(local_copy, 64);
100 map = perf_cpu_map__empty_new(weight);
104 for (int i = 0, j = 0; i < mask_nr; i++) {
105 int cpus_per_i = (i * data->mask32_data.long_size * BITS_PER_BYTE);
108 perf_record_cpu_map_data__read_one_mask(data, i, local_copy);
109 for_each_set_bit(cpu, local_copy, 64)
110 RC_CHK_ACCESS(map)->map[j++].cpu = cpu + cpus_per_i;
116 static struct perf_cpu_map *cpu_map__from_range(const struct perf_record_cpu_map_data *data)
118 struct perf_cpu_map *map;
121 map = perf_cpu_map__empty_new(data->range_cpu_data.end_cpu -
122 data->range_cpu_data.start_cpu + 1 + data->range_cpu_data.any_cpu);
126 if (data->range_cpu_data.any_cpu)
127 RC_CHK_ACCESS(map)->map[i++].cpu = -1;
129 for (int cpu = data->range_cpu_data.start_cpu; cpu <= data->range_cpu_data.end_cpu;
131 RC_CHK_ACCESS(map)->map[i].cpu = cpu;
136 struct perf_cpu_map *cpu_map__new_data(const struct perf_record_cpu_map_data *data)
138 switch (data->type) {
139 case PERF_CPU_MAP__CPUS:
140 return cpu_map__from_entries(data);
141 case PERF_CPU_MAP__MASK:
142 return cpu_map__from_mask(data);
143 case PERF_CPU_MAP__RANGE_CPUS:
144 return cpu_map__from_range(data);
146 pr_err("cpu_map__new_data unknown type %d\n", data->type);
151 size_t cpu_map__fprintf(struct perf_cpu_map *map, FILE *fp)
156 cpu_map__snprint(map, buf, sizeof(buf));
157 return fprintf(fp, "%s\n", buf);
161 struct perf_cpu_map *perf_cpu_map__empty_new(int nr)
163 struct perf_cpu_map *cpus = perf_cpu_map__alloc(nr);
166 for (int i = 0; i < nr; i++)
167 RC_CHK_ACCESS(cpus)->map[i].cpu = -1;
173 struct cpu_aggr_map *cpu_aggr_map__empty_new(int nr)
175 struct cpu_aggr_map *cpus = malloc(sizeof(*cpus) + sizeof(struct aggr_cpu_id) * nr);
181 for (i = 0; i < nr; i++)
182 cpus->map[i] = aggr_cpu_id__empty();
184 refcount_set(&cpus->refcnt, 1);
190 static int cpu__get_topology_int(int cpu, const char *name, int *value)
194 snprintf(path, PATH_MAX,
195 "devices/system/cpu/cpu%d/topology/%s", cpu, name);
197 return sysfs__read_int(path, value);
200 int cpu__get_socket_id(struct perf_cpu cpu)
202 int value, ret = cpu__get_topology_int(cpu.cpu, "physical_package_id", &value);
206 struct aggr_cpu_id aggr_cpu_id__socket(struct perf_cpu cpu, void *data __maybe_unused)
208 struct aggr_cpu_id id = aggr_cpu_id__empty();
210 id.socket = cpu__get_socket_id(cpu);
214 static int aggr_cpu_id__cmp(const void *a_pointer, const void *b_pointer)
216 struct aggr_cpu_id *a = (struct aggr_cpu_id *)a_pointer;
217 struct aggr_cpu_id *b = (struct aggr_cpu_id *)b_pointer;
219 if (a->node != b->node)
220 return a->node - b->node;
221 else if (a->socket != b->socket)
222 return a->socket - b->socket;
223 else if (a->die != b->die)
224 return a->die - b->die;
225 else if (a->cluster != b->cluster)
226 return a->cluster - b->cluster;
227 else if (a->cache_lvl != b->cache_lvl)
228 return a->cache_lvl - b->cache_lvl;
229 else if (a->cache != b->cache)
230 return a->cache - b->cache;
231 else if (a->core != b->core)
232 return a->core - b->core;
234 return a->thread_idx - b->thread_idx;
237 struct cpu_aggr_map *cpu_aggr_map__new(const struct perf_cpu_map *cpus,
238 aggr_cpu_id_get_t get_id,
239 void *data, bool needs_sort)
243 struct cpu_aggr_map *c = cpu_aggr_map__empty_new(perf_cpu_map__nr(cpus));
248 /* Reset size as it may only be partially filled */
251 perf_cpu_map__for_each_cpu(cpu, idx, cpus) {
252 bool duplicate = false;
253 struct aggr_cpu_id cpu_id = get_id(cpu, data);
255 for (int j = 0; j < c->nr; j++) {
256 if (aggr_cpu_id__equal(&cpu_id, &c->map[j])) {
262 c->map[c->nr] = cpu_id;
267 if (c->nr != perf_cpu_map__nr(cpus)) {
268 struct cpu_aggr_map *trimmed_c =
270 sizeof(struct cpu_aggr_map) + sizeof(struct aggr_cpu_id) * c->nr);
276 /* ensure we process id in increasing order */
278 qsort(c->map, c->nr, sizeof(struct aggr_cpu_id), aggr_cpu_id__cmp);
284 int cpu__get_die_id(struct perf_cpu cpu)
286 int value, ret = cpu__get_topology_int(cpu.cpu, "die_id", &value);
291 struct aggr_cpu_id aggr_cpu_id__die(struct perf_cpu cpu, void *data)
293 struct aggr_cpu_id id;
296 die = cpu__get_die_id(cpu);
297 /* There is no die_id on legacy system. */
302 * die_id is relative to socket, so start
303 * with the socket ID and then add die to
306 id = aggr_cpu_id__socket(cpu, data);
307 if (aggr_cpu_id__is_empty(&id))
314 int cpu__get_cluster_id(struct perf_cpu cpu)
316 int value, ret = cpu__get_topology_int(cpu.cpu, "cluster_id", &value);
321 struct aggr_cpu_id aggr_cpu_id__cluster(struct perf_cpu cpu, void *data)
323 int cluster = cpu__get_cluster_id(cpu);
324 struct aggr_cpu_id id;
326 /* There is no cluster_id on legacy system. */
330 id = aggr_cpu_id__die(cpu, data);
331 if (aggr_cpu_id__is_empty(&id))
334 id.cluster = cluster;
338 int cpu__get_core_id(struct perf_cpu cpu)
340 int value, ret = cpu__get_topology_int(cpu.cpu, "core_id", &value);
344 struct aggr_cpu_id aggr_cpu_id__core(struct perf_cpu cpu, void *data)
346 struct aggr_cpu_id id;
347 int core = cpu__get_core_id(cpu);
349 /* aggr_cpu_id__die returns a struct with socket die, and cluster set. */
350 id = aggr_cpu_id__cluster(cpu, data);
351 if (aggr_cpu_id__is_empty(&id))
355 * core_id is relative to socket and die, we need a global id.
356 * So we combine the result from cpu_map__get_die with the core id
363 struct aggr_cpu_id aggr_cpu_id__cpu(struct perf_cpu cpu, void *data)
365 struct aggr_cpu_id id;
367 /* aggr_cpu_id__core returns a struct with socket, die and core set. */
368 id = aggr_cpu_id__core(cpu, data);
369 if (aggr_cpu_id__is_empty(&id))
377 struct aggr_cpu_id aggr_cpu_id__node(struct perf_cpu cpu, void *data __maybe_unused)
379 struct aggr_cpu_id id = aggr_cpu_id__empty();
381 id.node = cpu__get_node(cpu);
385 struct aggr_cpu_id aggr_cpu_id__global(struct perf_cpu cpu, void *data __maybe_unused)
387 struct aggr_cpu_id id = aggr_cpu_id__empty();
389 /* it always aggregates to the cpu 0 */
395 /* setup simple routines to easily access node numbers given a cpu number */
396 static int get_max_num(char *path, int *max)
402 if (filename__read_str(path, &buf, &num))
407 /* start on the right, to find highest node num */
409 if ((buf[num] == ',') || (buf[num] == '-')) {
414 if (sscanf(&buf[num], "%d", max) < 1) {
419 /* convert from 0-based to 1-based */
427 /* Determine highest possible cpu in the system for sparse allocation */
428 static void set_max_cpu_num(void)
435 max_cpu_num.cpu = 4096;
436 max_present_cpu_num.cpu = 4096;
438 mnt = sysfs__mountpoint();
442 /* get the highest possible cpu number for a sparse allocation */
443 ret = snprintf(path, PATH_MAX, "%s/devices/system/cpu/possible", mnt);
444 if (ret >= PATH_MAX) {
445 pr_err("sysfs path crossed PATH_MAX(%d) size\n", PATH_MAX);
449 ret = get_max_num(path, &max_cpu_num.cpu);
453 /* get the highest present cpu number for a sparse allocation */
454 ret = snprintf(path, PATH_MAX, "%s/devices/system/cpu/present", mnt);
455 if (ret >= PATH_MAX) {
456 pr_err("sysfs path crossed PATH_MAX(%d) size\n", PATH_MAX);
460 ret = get_max_num(path, &max_present_cpu_num.cpu);
464 pr_err("Failed to read max cpus, using default of %d\n", max_cpu_num.cpu);
467 /* Determine highest possible node in the system for sparse allocation */
468 static void set_max_node_num(void)
477 mnt = sysfs__mountpoint();
481 /* get the highest possible cpu number for a sparse allocation */
482 ret = snprintf(path, PATH_MAX, "%s/devices/system/node/possible", mnt);
483 if (ret >= PATH_MAX) {
484 pr_err("sysfs path crossed PATH_MAX(%d) size\n", PATH_MAX);
488 ret = get_max_num(path, &max_node_num);
492 pr_err("Failed to read max nodes, using default of %d\n", max_node_num);
495 int cpu__max_node(void)
497 if (unlikely(!max_node_num))
503 struct perf_cpu cpu__max_cpu(void)
505 if (unlikely(!max_cpu_num.cpu))
511 struct perf_cpu cpu__max_present_cpu(void)
513 if (unlikely(!max_present_cpu_num.cpu))
516 return max_present_cpu_num;
520 int cpu__get_node(struct perf_cpu cpu)
522 if (unlikely(cpunode_map == NULL)) {
523 pr_debug("cpu_map not initialized\n");
527 return cpunode_map[cpu.cpu];
530 static int init_cpunode_map(void)
537 cpunode_map = calloc(max_cpu_num.cpu, sizeof(int));
539 pr_err("%s: calloc failed\n", __func__);
543 for (i = 0; i < max_cpu_num.cpu; i++)
549 int cpu__setup_cpunode_map(void)
551 struct dirent *dent1, *dent2;
553 unsigned int cpu, mem;
559 /* initialize globals */
560 if (init_cpunode_map())
563 mnt = sysfs__mountpoint();
567 n = snprintf(path, PATH_MAX, "%s/devices/system/node", mnt);
569 pr_err("sysfs path crossed PATH_MAX(%d) size\n", PATH_MAX);
573 dir1 = opendir(path);
577 /* walk tree and setup map */
578 while ((dent1 = readdir(dir1)) != NULL) {
579 if (dent1->d_type != DT_DIR || sscanf(dent1->d_name, "node%u", &mem) < 1)
582 n = snprintf(buf, PATH_MAX, "%s/%s", path, dent1->d_name);
584 pr_err("sysfs path crossed PATH_MAX(%d) size\n", PATH_MAX);
591 while ((dent2 = readdir(dir2)) != NULL) {
592 if (dent2->d_type != DT_LNK || sscanf(dent2->d_name, "cpu%u", &cpu) < 1)
594 cpunode_map[cpu] = mem;
602 size_t cpu_map__snprint(struct perf_cpu_map *map, char *buf, size_t size)
608 #define COMMA first ? "" : ","
610 for (i = 0; i < perf_cpu_map__nr(map) + 1; i++) {
611 struct perf_cpu cpu = { .cpu = INT_MAX };
612 bool last = i == perf_cpu_map__nr(map);
615 cpu = perf_cpu_map__cpu(map, i);
620 ret += snprintf(buf + ret, size - ret,
622 perf_cpu_map__cpu(map, i).cpu);
624 } else if (((i - start) != (cpu.cpu - perf_cpu_map__cpu(map, start).cpu)) || last) {
628 ret += snprintf(buf + ret, size - ret,
630 perf_cpu_map__cpu(map, start).cpu);
632 ret += snprintf(buf + ret, size - ret,
634 perf_cpu_map__cpu(map, start).cpu, perf_cpu_map__cpu(map, end).cpu);
643 pr_debug2("cpumask list: %s\n", buf);
647 static char hex_char(unsigned char val)
652 return val - 10 + 'a';
656 size_t cpu_map__snprint_mask(struct perf_cpu_map *map, char *buf, size_t size)
660 unsigned char *bitmap;
661 struct perf_cpu last_cpu = perf_cpu_map__cpu(map, perf_cpu_map__nr(map) - 1);
666 bitmap = zalloc(last_cpu.cpu / 8 + 1);
667 if (bitmap == NULL) {
672 for (i = 0; i < perf_cpu_map__nr(map); i++) {
673 cpu = perf_cpu_map__cpu(map, i).cpu;
674 bitmap[cpu / 8] |= 1 << (cpu % 8);
677 for (cpu = last_cpu.cpu / 4 * 4; cpu >= 0; cpu -= 4) {
678 unsigned char bits = bitmap[cpu / 8];
685 *ptr++ = hex_char(bits);
686 if ((cpu % 32) == 0 && cpu > 0)
692 buf[size - 1] = '\0';
696 struct perf_cpu_map *cpu_map__online(void) /* thread unsafe */
698 static struct perf_cpu_map *online;
701 online = perf_cpu_map__new_online_cpus(); /* from /sys/devices/system/cpu/online */
706 bool aggr_cpu_id__equal(const struct aggr_cpu_id *a, const struct aggr_cpu_id *b)
708 return a->thread_idx == b->thread_idx &&
709 a->node == b->node &&
710 a->socket == b->socket &&
712 a->cluster == b->cluster &&
713 a->cache_lvl == b->cache_lvl &&
714 a->cache == b->cache &&
715 a->core == b->core &&
716 a->cpu.cpu == b->cpu.cpu;
719 bool aggr_cpu_id__is_empty(const struct aggr_cpu_id *a)
721 return a->thread_idx == -1 &&
726 a->cache_lvl == -1 &&
732 struct aggr_cpu_id aggr_cpu_id__empty(void)
734 struct aggr_cpu_id ret = {
743 .cpu = (struct perf_cpu){ .cpu = -1 },