1 // SPDX-License-Identifier: GPL-2.0
10 #include <linux/kernel.h>
11 #include "map_symbol.h"
12 #include "mem-events.h"
16 unsigned int perf_mem_events__loads_ldlat = 30;
18 #define E(t, n, s) { .tag = t, .name = n, .sysfs_name = s }
20 struct perf_mem_event perf_mem_events[PERF_MEM_EVENTS__MAX] = {
21 E("ldlat-loads", "cpu/mem-loads,ldlat=%u/P", "mem-loads"),
22 E("ldlat-stores", "cpu/mem-stores/P", "mem-stores"),
28 static char mem_loads_name[100];
29 static bool mem_loads_name__init;
31 char * __weak perf_mem_events__name(int i)
33 if (i == PERF_MEM_EVENTS__LOAD) {
34 if (!mem_loads_name__init) {
35 mem_loads_name__init = true;
36 scnprintf(mem_loads_name, sizeof(mem_loads_name),
37 perf_mem_events[i].name,
38 perf_mem_events__loads_ldlat);
40 return mem_loads_name;
43 return (char *)perf_mem_events[i].name;
46 int perf_mem_events__parse(const char *str)
48 char *tok, *saveptr = NULL;
53 /* We need buffer that we know we can write to. */
54 buf = malloc(strlen(str) + 1);
60 tok = strtok_r((char *)buf, ",", &saveptr);
63 for (j = 0; j < PERF_MEM_EVENTS__MAX; j++) {
64 struct perf_mem_event *e = &perf_mem_events[j];
66 if (strstr(e->tag, tok))
67 e->record = found = true;
70 tok = strtok_r(NULL, ",", &saveptr);
78 pr_err("failed: event '%s' not found, use '-e list' to get list of available events\n", str);
82 int perf_mem_events__init(void)
84 const char *mnt = sysfs__mount();
91 for (j = 0; j < PERF_MEM_EVENTS__MAX; j++) {
93 struct perf_mem_event *e = &perf_mem_events[j];
96 scnprintf(path, PATH_MAX, "%s/devices/cpu/events/%s",
100 e->supported = found = true;
103 return found ? 0 : -ENOENT;
106 static const char * const tlb_access[] = {
116 int perf_mem__tlb_scnprintf(char *out, size_t sz, struct mem_info *mem_info)
119 u64 m = PERF_MEM_TLB_NA;
122 sz -= 1; /* -1 for null termination */
126 m = mem_info->data_src.mem_dtlb;
128 hit = m & PERF_MEM_TLB_HIT;
129 miss = m & PERF_MEM_TLB_MISS;
131 /* already taken care of */
132 m &= ~(PERF_MEM_TLB_HIT|PERF_MEM_TLB_MISS);
134 for (i = 0; m && i < ARRAY_SIZE(tlb_access); i++, m >>= 1) {
141 l += scnprintf(out + l, sz - l, tlb_access[i]);
144 l += scnprintf(out, sz - l, "N/A");
146 l += scnprintf(out + l, sz - l, " hit");
148 l += scnprintf(out + l, sz - l, " miss");
153 static const char * const mem_lvl[] = {
162 "Remote RAM (1 hop)",
163 "Remote RAM (2 hops)",
164 "Remote Cache (1 hop)",
165 "Remote Cache (2 hops)",
170 static const char * const mem_lvlnum[] = {
171 [PERF_MEM_LVLNUM_ANY_CACHE] = "Any cache",
172 [PERF_MEM_LVLNUM_LFB] = "LFB",
173 [PERF_MEM_LVLNUM_RAM] = "RAM",
174 [PERF_MEM_LVLNUM_PMEM] = "PMEM",
175 [PERF_MEM_LVLNUM_NA] = "N/A",
178 int perf_mem__lvl_scnprintf(char *out, size_t sz, struct mem_info *mem_info)
181 u64 m = PERF_MEM_LVL_NA;
186 m = mem_info->data_src.mem_lvl;
188 sz -= 1; /* -1 for null termination */
191 hit = m & PERF_MEM_LVL_HIT;
192 miss = m & PERF_MEM_LVL_MISS;
194 /* already taken care of */
195 m &= ~(PERF_MEM_LVL_HIT|PERF_MEM_LVL_MISS);
198 if (mem_info && mem_info->data_src.mem_remote) {
199 strcat(out, "Remote ");
204 for (i = 0; m && i < ARRAY_SIZE(mem_lvl); i++, m >>= 1) {
211 l += scnprintf(out + l, sz - l, mem_lvl[i]);
214 if (mem_info && mem_info->data_src.mem_lvl_num) {
215 int lvl = mem_info->data_src.mem_lvl_num;
221 l += scnprintf(out + l, sz - l, mem_lvlnum[lvl]);
223 l += scnprintf(out + l, sz - l, "L%d", lvl);
227 l += scnprintf(out + l, sz - l, "N/A");
229 l += scnprintf(out + l, sz - l, " hit");
231 l += scnprintf(out + l, sz - l, " miss");
236 static const char * const snoop_access[] = {
244 int perf_mem__snp_scnprintf(char *out, size_t sz, struct mem_info *mem_info)
247 u64 m = PERF_MEM_SNOOP_NA;
249 sz -= 1; /* -1 for null termination */
253 m = mem_info->data_src.mem_snoop;
255 for (i = 0; m && i < ARRAY_SIZE(snoop_access); i++, m >>= 1) {
262 l += scnprintf(out + l, sz - l, snoop_access[i]);
265 (mem_info->data_src.mem_snoopx & PERF_MEM_SNOOPX_FWD)) {
270 l += scnprintf(out + l, sz - l, "Fwd");
274 l += scnprintf(out, sz - l, "N/A");
279 int perf_mem__lck_scnprintf(char *out, size_t sz, struct mem_info *mem_info)
281 u64 mask = PERF_MEM_LOCK_NA;
285 mask = mem_info->data_src.mem_lock;
287 if (mask & PERF_MEM_LOCK_NA)
288 l = scnprintf(out, sz, "N/A");
289 else if (mask & PERF_MEM_LOCK_LOCKED)
290 l = scnprintf(out, sz, "Yes");
292 l = scnprintf(out, sz, "No");
297 int perf_script__meminfo_scnprintf(char *out, size_t sz, struct mem_info *mem_info)
301 i += perf_mem__lvl_scnprintf(out, sz, mem_info);
302 i += scnprintf(out + i, sz - i, "|SNP ");
303 i += perf_mem__snp_scnprintf(out + i, sz - i, mem_info);
304 i += scnprintf(out + i, sz - i, "|TLB ");
305 i += perf_mem__tlb_scnprintf(out + i, sz - i, mem_info);
306 i += scnprintf(out + i, sz - i, "|LCK ");
307 i += perf_mem__lck_scnprintf(out + i, sz - i, mem_info);
312 int c2c_decode_stats(struct c2c_stats *stats, struct mem_info *mi)
314 union perf_mem_data_src *data_src = &mi->data_src;
315 u64 daddr = mi->daddr.addr;
316 u64 op = data_src->mem_op;
317 u64 lvl = data_src->mem_lvl;
318 u64 snoop = data_src->mem_snoop;
319 u64 lock = data_src->mem_lock;
321 * Skylake might report unknown remote level via this
322 * bit, consider it when evaluating remote HITMs.
324 bool mrem = data_src->mem_remote;
327 #define HITM_INC(__f) \
333 #define P(a, b) PERF_MEM_##a##_##b
337 if (lock & P(LOCK, LOCKED)) stats->locks++;
339 if (op & P(OP, LOAD)) {
348 if (lvl & P(LVL, HIT)) {
349 if (lvl & P(LVL, UNC)) stats->ld_uncache++;
350 if (lvl & P(LVL, IO)) stats->ld_io++;
351 if (lvl & P(LVL, LFB)) stats->ld_fbhit++;
352 if (lvl & P(LVL, L1 )) stats->ld_l1hit++;
353 if (lvl & P(LVL, L2 )) stats->ld_l2hit++;
354 if (lvl & P(LVL, L3 )) {
355 if (snoop & P(SNOOP, HITM))
361 if (lvl & P(LVL, LOC_RAM)) {
363 if (snoop & P(SNOOP, HIT))
369 if ((lvl & P(LVL, REM_RAM1)) ||
370 (lvl & P(LVL, REM_RAM2)) ||
373 if (snoop & P(SNOOP, HIT))
380 if ((lvl & P(LVL, REM_CCE1)) ||
381 (lvl & P(LVL, REM_CCE2)) ||
383 if (snoop & P(SNOOP, HIT))
385 else if (snoop & P(SNOOP, HITM))
389 if ((lvl & P(LVL, MISS)))
392 } else if (op & P(OP, STORE)) {
401 if (lvl & P(LVL, HIT)) {
402 if (lvl & P(LVL, UNC)) stats->st_uncache++;
403 if (lvl & P(LVL, L1 )) stats->st_l1hit++;
405 if (lvl & P(LVL, MISS))
406 if (lvl & P(LVL, L1)) stats->st_l1miss++;
408 /* unparsable data_src? */
413 if (!mi->daddr.map || !mi->iaddr.map) {
423 void c2c_add_stats(struct c2c_stats *stats, struct c2c_stats *add)
425 stats->nr_entries += add->nr_entries;
427 stats->locks += add->locks;
428 stats->store += add->store;
429 stats->st_uncache += add->st_uncache;
430 stats->st_noadrs += add->st_noadrs;
431 stats->st_l1hit += add->st_l1hit;
432 stats->st_l1miss += add->st_l1miss;
433 stats->load += add->load;
434 stats->ld_excl += add->ld_excl;
435 stats->ld_shared += add->ld_shared;
436 stats->ld_uncache += add->ld_uncache;
437 stats->ld_io += add->ld_io;
438 stats->ld_miss += add->ld_miss;
439 stats->ld_noadrs += add->ld_noadrs;
440 stats->ld_fbhit += add->ld_fbhit;
441 stats->ld_l1hit += add->ld_l1hit;
442 stats->ld_l2hit += add->ld_l2hit;
443 stats->ld_llchit += add->ld_llchit;
444 stats->lcl_hitm += add->lcl_hitm;
445 stats->rmt_hitm += add->rmt_hitm;
446 stats->tot_hitm += add->tot_hitm;
447 stats->rmt_hit += add->rmt_hit;
448 stats->lcl_dram += add->lcl_dram;
449 stats->rmt_dram += add->rmt_dram;
450 stats->nomap += add->nomap;
451 stats->noparse += add->noparse;