1 // SPDX-License-Identifier: GPL-2.0
10 #include "metricgroup.h"
13 CTX_BIT_USER = 1 << 0,
14 CTX_BIT_KERNEL = 1 << 1,
16 CTX_BIT_HOST = 1 << 3,
17 CTX_BIT_IDLE = 1 << 4,
21 #define NUM_CTX CTX_BIT_MAX
24 * AGGR_GLOBAL: Use CPU 0
25 * AGGR_SOCKET: Use first CPU of socket
26 * AGGR_CORE: Use first CPU of core
27 * AGGR_NONE: Use matching CPU
28 * AGGR_THREAD: Not supported?
30 static struct stats runtime_nsecs_stats[MAX_NR_CPUS];
31 static struct stats runtime_cycles_stats[NUM_CTX][MAX_NR_CPUS];
32 static struct stats runtime_stalled_cycles_front_stats[NUM_CTX][MAX_NR_CPUS];
33 static struct stats runtime_stalled_cycles_back_stats[NUM_CTX][MAX_NR_CPUS];
34 static struct stats runtime_branches_stats[NUM_CTX][MAX_NR_CPUS];
35 static struct stats runtime_cacherefs_stats[NUM_CTX][MAX_NR_CPUS];
36 static struct stats runtime_l1_dcache_stats[NUM_CTX][MAX_NR_CPUS];
37 static struct stats runtime_l1_icache_stats[NUM_CTX][MAX_NR_CPUS];
38 static struct stats runtime_ll_cache_stats[NUM_CTX][MAX_NR_CPUS];
39 static struct stats runtime_itlb_cache_stats[NUM_CTX][MAX_NR_CPUS];
40 static struct stats runtime_dtlb_cache_stats[NUM_CTX][MAX_NR_CPUS];
41 static struct stats runtime_cycles_in_tx_stats[NUM_CTX][MAX_NR_CPUS];
42 static struct stats runtime_transaction_stats[NUM_CTX][MAX_NR_CPUS];
43 static struct stats runtime_elision_stats[NUM_CTX][MAX_NR_CPUS];
44 static struct stats runtime_topdown_total_slots[NUM_CTX][MAX_NR_CPUS];
45 static struct stats runtime_topdown_slots_issued[NUM_CTX][MAX_NR_CPUS];
46 static struct stats runtime_topdown_slots_retired[NUM_CTX][MAX_NR_CPUS];
47 static struct stats runtime_topdown_fetch_bubbles[NUM_CTX][MAX_NR_CPUS];
48 static struct stats runtime_topdown_recovery_bubbles[NUM_CTX][MAX_NR_CPUS];
49 static struct stats runtime_smi_num_stats[NUM_CTX][MAX_NR_CPUS];
50 static struct stats runtime_aperf_stats[NUM_CTX][MAX_NR_CPUS];
51 static struct rblist runtime_saved_values;
52 static bool have_frontend_stalled;
54 struct stats walltime_nsecs_stats;
57 struct rb_node rb_node;
58 struct perf_evsel *evsel;
63 static int saved_value_cmp(struct rb_node *rb_node, const void *entry)
65 struct saved_value *a = container_of(rb_node,
68 const struct saved_value *b = entry;
71 return a->cpu - b->cpu;
72 if (a->evsel == b->evsel)
74 if ((char *)a->evsel < (char *)b->evsel)
79 static struct rb_node *saved_value_new(struct rblist *rblist __maybe_unused,
82 struct saved_value *nd = malloc(sizeof(struct saved_value));
86 memcpy(nd, entry, sizeof(struct saved_value));
90 static struct saved_value *saved_value_lookup(struct perf_evsel *evsel,
95 struct saved_value dm = {
99 nd = rblist__find(&runtime_saved_values, &dm);
101 return container_of(nd, struct saved_value, rb_node);
103 rblist__add_node(&runtime_saved_values, &dm);
104 nd = rblist__find(&runtime_saved_values, &dm);
106 return container_of(nd, struct saved_value, rb_node);
111 void perf_stat__init_shadow_stats(void)
113 have_frontend_stalled = pmu_have_event("cpu", "stalled-cycles-frontend");
114 rblist__init(&runtime_saved_values);
115 runtime_saved_values.node_cmp = saved_value_cmp;
116 runtime_saved_values.node_new = saved_value_new;
117 /* No delete for now */
120 static int evsel_context(struct perf_evsel *evsel)
124 if (evsel->attr.exclude_kernel)
125 ctx |= CTX_BIT_KERNEL;
126 if (evsel->attr.exclude_user)
128 if (evsel->attr.exclude_hv)
130 if (evsel->attr.exclude_host)
132 if (evsel->attr.exclude_idle)
138 void perf_stat__reset_shadow_stats(void)
140 struct rb_node *pos, *next;
142 memset(runtime_nsecs_stats, 0, sizeof(runtime_nsecs_stats));
143 memset(runtime_cycles_stats, 0, sizeof(runtime_cycles_stats));
144 memset(runtime_stalled_cycles_front_stats, 0, sizeof(runtime_stalled_cycles_front_stats));
145 memset(runtime_stalled_cycles_back_stats, 0, sizeof(runtime_stalled_cycles_back_stats));
146 memset(runtime_branches_stats, 0, sizeof(runtime_branches_stats));
147 memset(runtime_cacherefs_stats, 0, sizeof(runtime_cacherefs_stats));
148 memset(runtime_l1_dcache_stats, 0, sizeof(runtime_l1_dcache_stats));
149 memset(runtime_l1_icache_stats, 0, sizeof(runtime_l1_icache_stats));
150 memset(runtime_ll_cache_stats, 0, sizeof(runtime_ll_cache_stats));
151 memset(runtime_itlb_cache_stats, 0, sizeof(runtime_itlb_cache_stats));
152 memset(runtime_dtlb_cache_stats, 0, sizeof(runtime_dtlb_cache_stats));
153 memset(runtime_cycles_in_tx_stats, 0,
154 sizeof(runtime_cycles_in_tx_stats));
155 memset(runtime_transaction_stats, 0,
156 sizeof(runtime_transaction_stats));
157 memset(runtime_elision_stats, 0, sizeof(runtime_elision_stats));
158 memset(&walltime_nsecs_stats, 0, sizeof(walltime_nsecs_stats));
159 memset(runtime_topdown_total_slots, 0, sizeof(runtime_topdown_total_slots));
160 memset(runtime_topdown_slots_retired, 0, sizeof(runtime_topdown_slots_retired));
161 memset(runtime_topdown_slots_issued, 0, sizeof(runtime_topdown_slots_issued));
162 memset(runtime_topdown_fetch_bubbles, 0, sizeof(runtime_topdown_fetch_bubbles));
163 memset(runtime_topdown_recovery_bubbles, 0, sizeof(runtime_topdown_recovery_bubbles));
164 memset(runtime_smi_num_stats, 0, sizeof(runtime_smi_num_stats));
165 memset(runtime_aperf_stats, 0, sizeof(runtime_aperf_stats));
167 next = rb_first(&runtime_saved_values.entries);
171 memset(&container_of(pos, struct saved_value, rb_node)->stats,
173 sizeof(struct stats));
178 * Update various tracking values we maintain to print
179 * more semantic information such as miss/hit ratios,
180 * instruction rates, etc:
182 void perf_stat__update_shadow_stats(struct perf_evsel *counter, u64 count,
185 int ctx = evsel_context(counter);
187 count *= counter->scale;
189 if (perf_evsel__match(counter, SOFTWARE, SW_TASK_CLOCK) ||
190 perf_evsel__match(counter, SOFTWARE, SW_CPU_CLOCK))
191 update_stats(&runtime_nsecs_stats[cpu], count);
192 else if (perf_evsel__match(counter, HARDWARE, HW_CPU_CYCLES))
193 update_stats(&runtime_cycles_stats[ctx][cpu], count);
194 else if (perf_stat_evsel__is(counter, CYCLES_IN_TX))
195 update_stats(&runtime_cycles_in_tx_stats[ctx][cpu], count);
196 else if (perf_stat_evsel__is(counter, TRANSACTION_START))
197 update_stats(&runtime_transaction_stats[ctx][cpu], count);
198 else if (perf_stat_evsel__is(counter, ELISION_START))
199 update_stats(&runtime_elision_stats[ctx][cpu], count);
200 else if (perf_stat_evsel__is(counter, TOPDOWN_TOTAL_SLOTS))
201 update_stats(&runtime_topdown_total_slots[ctx][cpu], count);
202 else if (perf_stat_evsel__is(counter, TOPDOWN_SLOTS_ISSUED))
203 update_stats(&runtime_topdown_slots_issued[ctx][cpu], count);
204 else if (perf_stat_evsel__is(counter, TOPDOWN_SLOTS_RETIRED))
205 update_stats(&runtime_topdown_slots_retired[ctx][cpu], count);
206 else if (perf_stat_evsel__is(counter, TOPDOWN_FETCH_BUBBLES))
207 update_stats(&runtime_topdown_fetch_bubbles[ctx][cpu], count);
208 else if (perf_stat_evsel__is(counter, TOPDOWN_RECOVERY_BUBBLES))
209 update_stats(&runtime_topdown_recovery_bubbles[ctx][cpu], count);
210 else if (perf_evsel__match(counter, HARDWARE, HW_STALLED_CYCLES_FRONTEND))
211 update_stats(&runtime_stalled_cycles_front_stats[ctx][cpu], count);
212 else if (perf_evsel__match(counter, HARDWARE, HW_STALLED_CYCLES_BACKEND))
213 update_stats(&runtime_stalled_cycles_back_stats[ctx][cpu], count);
214 else if (perf_evsel__match(counter, HARDWARE, HW_BRANCH_INSTRUCTIONS))
215 update_stats(&runtime_branches_stats[ctx][cpu], count);
216 else if (perf_evsel__match(counter, HARDWARE, HW_CACHE_REFERENCES))
217 update_stats(&runtime_cacherefs_stats[ctx][cpu], count);
218 else if (perf_evsel__match(counter, HW_CACHE, HW_CACHE_L1D))
219 update_stats(&runtime_l1_dcache_stats[ctx][cpu], count);
220 else if (perf_evsel__match(counter, HW_CACHE, HW_CACHE_L1I))
221 update_stats(&runtime_ll_cache_stats[ctx][cpu], count);
222 else if (perf_evsel__match(counter, HW_CACHE, HW_CACHE_LL))
223 update_stats(&runtime_ll_cache_stats[ctx][cpu], count);
224 else if (perf_evsel__match(counter, HW_CACHE, HW_CACHE_DTLB))
225 update_stats(&runtime_dtlb_cache_stats[ctx][cpu], count);
226 else if (perf_evsel__match(counter, HW_CACHE, HW_CACHE_ITLB))
227 update_stats(&runtime_itlb_cache_stats[ctx][cpu], count);
228 else if (perf_stat_evsel__is(counter, SMI_NUM))
229 update_stats(&runtime_smi_num_stats[ctx][cpu], count);
230 else if (perf_stat_evsel__is(counter, APERF))
231 update_stats(&runtime_aperf_stats[ctx][cpu], count);
233 if (counter->collect_stat) {
234 struct saved_value *v = saved_value_lookup(counter, cpu, true);
235 update_stats(&v->stats, count);
239 /* used for get_ratio_color() */
241 GRC_STALLED_CYCLES_FE,
242 GRC_STALLED_CYCLES_BE,
247 static const char *get_ratio_color(enum grc_type type, double ratio)
249 static const double grc_table[GRC_MAX_NR][3] = {
250 [GRC_STALLED_CYCLES_FE] = { 50.0, 30.0, 10.0 },
251 [GRC_STALLED_CYCLES_BE] = { 75.0, 50.0, 20.0 },
252 [GRC_CACHE_MISSES] = { 20.0, 10.0, 5.0 },
254 const char *color = PERF_COLOR_NORMAL;
256 if (ratio > grc_table[type][0])
257 color = PERF_COLOR_RED;
258 else if (ratio > grc_table[type][1])
259 color = PERF_COLOR_MAGENTA;
260 else if (ratio > grc_table[type][2])
261 color = PERF_COLOR_YELLOW;
266 static struct perf_evsel *perf_stat__find_event(struct perf_evlist *evsel_list,
269 struct perf_evsel *c2;
271 evlist__for_each_entry (evsel_list, c2) {
272 if (!strcasecmp(c2->name, name))
278 /* Mark MetricExpr target events and link events using them to them. */
279 void perf_stat__collect_metric_expr(struct perf_evlist *evsel_list)
281 struct perf_evsel *counter, *leader, **metric_events, *oc;
283 const char **metric_names;
285 int num_metric_names;
287 evlist__for_each_entry(evsel_list, counter) {
288 bool invalid = false;
290 leader = counter->leader;
291 if (!counter->metric_expr)
293 metric_events = counter->metric_events;
294 if (!metric_events) {
295 if (expr__find_other(counter->metric_expr, counter->name,
296 &metric_names, &num_metric_names) < 0)
299 metric_events = calloc(sizeof(struct perf_evsel *),
300 num_metric_names + 1);
303 counter->metric_events = metric_events;
306 for (i = 0; i < num_metric_names; i++) {
309 /* Search in group */
310 for_each_group_member (oc, leader) {
311 if (!strcasecmp(oc->name, metric_names[i])) {
318 /* Search ignoring groups */
319 oc = perf_stat__find_event(evsel_list, metric_names[i]);
322 /* Deduping one is good enough to handle duplicated PMUs. */
323 static char *printed;
326 * Adding events automatically would be difficult, because
327 * it would risk creating groups that are not schedulable.
328 * perf stat doesn't understand all the scheduling constraints
329 * of events. So we ask the user instead to add the missing
332 if (!printed || strcasecmp(printed, metric_names[i])) {
334 "Add %s event to groups to get metric expression for %s\n",
337 printed = strdup(metric_names[i]);
342 metric_events[i] = oc;
343 oc->collect_stat = true;
345 metric_events[i] = NULL;
349 counter->metric_events = NULL;
350 counter->metric_expr = NULL;
355 static void print_stalled_cycles_frontend(int cpu,
356 struct perf_evsel *evsel, double avg,
357 struct perf_stat_output_ctx *out)
359 double total, ratio = 0.0;
361 int ctx = evsel_context(evsel);
363 total = avg_stats(&runtime_cycles_stats[ctx][cpu]);
366 ratio = avg / total * 100.0;
368 color = get_ratio_color(GRC_STALLED_CYCLES_FE, ratio);
371 out->print_metric(out->ctx, color, "%7.2f%%", "frontend cycles idle",
374 out->print_metric(out->ctx, NULL, NULL, "frontend cycles idle", 0);
377 static void print_stalled_cycles_backend(int cpu,
378 struct perf_evsel *evsel, double avg,
379 struct perf_stat_output_ctx *out)
381 double total, ratio = 0.0;
383 int ctx = evsel_context(evsel);
385 total = avg_stats(&runtime_cycles_stats[ctx][cpu]);
388 ratio = avg / total * 100.0;
390 color = get_ratio_color(GRC_STALLED_CYCLES_BE, ratio);
392 out->print_metric(out->ctx, color, "%7.2f%%", "backend cycles idle", ratio);
395 static void print_branch_misses(int cpu,
396 struct perf_evsel *evsel,
398 struct perf_stat_output_ctx *out)
400 double total, ratio = 0.0;
402 int ctx = evsel_context(evsel);
404 total = avg_stats(&runtime_branches_stats[ctx][cpu]);
407 ratio = avg / total * 100.0;
409 color = get_ratio_color(GRC_CACHE_MISSES, ratio);
411 out->print_metric(out->ctx, color, "%7.2f%%", "of all branches", ratio);
414 static void print_l1_dcache_misses(int cpu,
415 struct perf_evsel *evsel,
417 struct perf_stat_output_ctx *out)
419 double total, ratio = 0.0;
421 int ctx = evsel_context(evsel);
423 total = avg_stats(&runtime_l1_dcache_stats[ctx][cpu]);
426 ratio = avg / total * 100.0;
428 color = get_ratio_color(GRC_CACHE_MISSES, ratio);
430 out->print_metric(out->ctx, color, "%7.2f%%", "of all L1-dcache hits", ratio);
433 static void print_l1_icache_misses(int cpu,
434 struct perf_evsel *evsel,
436 struct perf_stat_output_ctx *out)
438 double total, ratio = 0.0;
440 int ctx = evsel_context(evsel);
442 total = avg_stats(&runtime_l1_icache_stats[ctx][cpu]);
445 ratio = avg / total * 100.0;
447 color = get_ratio_color(GRC_CACHE_MISSES, ratio);
448 out->print_metric(out->ctx, color, "%7.2f%%", "of all L1-icache hits", ratio);
451 static void print_dtlb_cache_misses(int cpu,
452 struct perf_evsel *evsel,
454 struct perf_stat_output_ctx *out)
456 double total, ratio = 0.0;
458 int ctx = evsel_context(evsel);
460 total = avg_stats(&runtime_dtlb_cache_stats[ctx][cpu]);
463 ratio = avg / total * 100.0;
465 color = get_ratio_color(GRC_CACHE_MISSES, ratio);
466 out->print_metric(out->ctx, color, "%7.2f%%", "of all dTLB cache hits", ratio);
469 static void print_itlb_cache_misses(int cpu,
470 struct perf_evsel *evsel,
472 struct perf_stat_output_ctx *out)
474 double total, ratio = 0.0;
476 int ctx = evsel_context(evsel);
478 total = avg_stats(&runtime_itlb_cache_stats[ctx][cpu]);
481 ratio = avg / total * 100.0;
483 color = get_ratio_color(GRC_CACHE_MISSES, ratio);
484 out->print_metric(out->ctx, color, "%7.2f%%", "of all iTLB cache hits", ratio);
487 static void print_ll_cache_misses(int cpu,
488 struct perf_evsel *evsel,
490 struct perf_stat_output_ctx *out)
492 double total, ratio = 0.0;
494 int ctx = evsel_context(evsel);
496 total = avg_stats(&runtime_ll_cache_stats[ctx][cpu]);
499 ratio = avg / total * 100.0;
501 color = get_ratio_color(GRC_CACHE_MISSES, ratio);
502 out->print_metric(out->ctx, color, "%7.2f%%", "of all LL-cache hits", ratio);
506 * High level "TopDown" CPU core pipe line bottleneck break down.
508 * Basic concept following
509 * Yasin, A Top Down Method for Performance analysis and Counter architecture
512 * The CPU pipeline is divided into 4 areas that can be bottlenecks:
514 * Frontend -> Backend -> Retiring
515 * BadSpeculation in addition means out of order execution that is thrown away
516 * (for example branch mispredictions)
517 * Frontend is instruction decoding.
518 * Backend is execution, like computation and accessing data in memory
519 * Retiring is good execution that is not directly bottlenecked
521 * The formulas are computed in slots.
522 * A slot is an entry in the pipeline each for the pipeline width
523 * (for example a 4-wide pipeline has 4 slots for each cycle)
526 * BadSpeculation = ((SlotsIssued - SlotsRetired) + RecoveryBubbles) /
528 * Retiring = SlotsRetired / TotalSlots
529 * FrontendBound = FetchBubbles / TotalSlots
530 * BackendBound = 1.0 - BadSpeculation - Retiring - FrontendBound
532 * The kernel provides the mapping to the low level CPU events and any scaling
533 * needed for the CPU pipeline width, for example:
535 * TotalSlots = Cycles * 4
537 * The scaling factor is communicated in the sysfs unit.
539 * In some cases the CPU may not be able to measure all the formulas due to
540 * missing events. In this case multiple formulas are combined, as possible.
542 * Full TopDown supports more levels to sub-divide each area: for example
543 * BackendBound into computing bound and memory bound. For now we only
544 * support Level 1 TopDown.
547 static double sanitize_val(double x)
549 if (x < 0 && x >= -0.02)
554 static double td_total_slots(int ctx, int cpu)
556 return avg_stats(&runtime_topdown_total_slots[ctx][cpu]);
559 static double td_bad_spec(int ctx, int cpu)
565 total = avg_stats(&runtime_topdown_slots_issued[ctx][cpu]) -
566 avg_stats(&runtime_topdown_slots_retired[ctx][cpu]) +
567 avg_stats(&runtime_topdown_recovery_bubbles[ctx][cpu]);
568 total_slots = td_total_slots(ctx, cpu);
570 bad_spec = total / total_slots;
571 return sanitize_val(bad_spec);
574 static double td_retiring(int ctx, int cpu)
577 double total_slots = td_total_slots(ctx, cpu);
578 double ret_slots = avg_stats(&runtime_topdown_slots_retired[ctx][cpu]);
581 retiring = ret_slots / total_slots;
585 static double td_fe_bound(int ctx, int cpu)
588 double total_slots = td_total_slots(ctx, cpu);
589 double fetch_bub = avg_stats(&runtime_topdown_fetch_bubbles[ctx][cpu]);
592 fe_bound = fetch_bub / total_slots;
596 static double td_be_bound(int ctx, int cpu)
598 double sum = (td_fe_bound(ctx, cpu) +
599 td_bad_spec(ctx, cpu) +
600 td_retiring(ctx, cpu));
603 return sanitize_val(1.0 - sum);
606 static void print_smi_cost(int cpu, struct perf_evsel *evsel,
607 struct perf_stat_output_ctx *out)
609 double smi_num, aperf, cycles, cost = 0.0;
610 int ctx = evsel_context(evsel);
611 const char *color = NULL;
613 smi_num = avg_stats(&runtime_smi_num_stats[ctx][cpu]);
614 aperf = avg_stats(&runtime_aperf_stats[ctx][cpu]);
615 cycles = avg_stats(&runtime_cycles_stats[ctx][cpu]);
617 if ((cycles == 0) || (aperf == 0))
621 cost = (aperf - cycles) / aperf * 100.00;
624 color = PERF_COLOR_RED;
625 out->print_metric(out->ctx, color, "%8.1f%%", "SMI cycles%", cost);
626 out->print_metric(out->ctx, NULL, "%4.0f", "SMI#", smi_num);
629 static void generic_metric(const char *metric_expr,
630 struct perf_evsel **metric_events,
632 const char *metric_name,
635 struct perf_stat_output_ctx *out)
637 print_metric_t print_metric = out->print_metric;
638 struct parse_ctx pctx;
641 void *ctxp = out->ctx;
643 expr__ctx_init(&pctx);
644 expr__add_id(&pctx, name, avg);
645 for (i = 0; metric_events[i]; i++) {
646 struct saved_value *v;
650 if (!strcmp(metric_events[i]->name, "duration_time")) {
651 stats = &walltime_nsecs_stats;
654 v = saved_value_lookup(metric_events[i], cpu, false);
660 expr__add_id(&pctx, metric_events[i]->name, avg_stats(stats)*scale);
662 if (!metric_events[i]) {
663 const char *p = metric_expr;
665 if (expr__parse(&ratio, &pctx, &p) == 0)
666 print_metric(ctxp, NULL, "%8.1f",
669 out->force_header ? name : "",
672 print_metric(ctxp, NULL, NULL,
674 (metric_name ? metric_name : name) : "", 0);
676 print_metric(ctxp, NULL, NULL, "", 0);
679 void perf_stat__print_shadow_stats(struct perf_evsel *evsel,
681 struct perf_stat_output_ctx *out,
682 struct rblist *metric_events)
684 void *ctxp = out->ctx;
685 print_metric_t print_metric = out->print_metric;
686 double total, ratio = 0.0, total2;
687 const char *color = NULL;
688 int ctx = evsel_context(evsel);
689 struct metric_event *me;
692 if (perf_evsel__match(evsel, HARDWARE, HW_INSTRUCTIONS)) {
693 total = avg_stats(&runtime_cycles_stats[ctx][cpu]);
696 print_metric(ctxp, NULL, "%7.2f ",
697 "insn per cycle", ratio);
699 print_metric(ctxp, NULL, NULL, "insn per cycle", 0);
701 total = avg_stats(&runtime_stalled_cycles_front_stats[ctx][cpu]);
702 total = max(total, avg_stats(&runtime_stalled_cycles_back_stats[ctx][cpu]));
707 print_metric(ctxp, NULL, "%7.2f ",
708 "stalled cycles per insn",
710 } else if (have_frontend_stalled) {
711 print_metric(ctxp, NULL, NULL,
712 "stalled cycles per insn", 0);
714 } else if (perf_evsel__match(evsel, HARDWARE, HW_BRANCH_MISSES)) {
715 if (runtime_branches_stats[ctx][cpu].n != 0)
716 print_branch_misses(cpu, evsel, avg, out);
718 print_metric(ctxp, NULL, NULL, "of all branches", 0);
720 evsel->attr.type == PERF_TYPE_HW_CACHE &&
721 evsel->attr.config == ( PERF_COUNT_HW_CACHE_L1D |
722 ((PERF_COUNT_HW_CACHE_OP_READ) << 8) |
723 ((PERF_COUNT_HW_CACHE_RESULT_MISS) << 16))) {
724 if (runtime_l1_dcache_stats[ctx][cpu].n != 0)
725 print_l1_dcache_misses(cpu, evsel, avg, out);
727 print_metric(ctxp, NULL, NULL, "of all L1-dcache hits", 0);
729 evsel->attr.type == PERF_TYPE_HW_CACHE &&
730 evsel->attr.config == ( PERF_COUNT_HW_CACHE_L1I |
731 ((PERF_COUNT_HW_CACHE_OP_READ) << 8) |
732 ((PERF_COUNT_HW_CACHE_RESULT_MISS) << 16))) {
733 if (runtime_l1_icache_stats[ctx][cpu].n != 0)
734 print_l1_icache_misses(cpu, evsel, avg, out);
736 print_metric(ctxp, NULL, NULL, "of all L1-icache hits", 0);
738 evsel->attr.type == PERF_TYPE_HW_CACHE &&
739 evsel->attr.config == ( PERF_COUNT_HW_CACHE_DTLB |
740 ((PERF_COUNT_HW_CACHE_OP_READ) << 8) |
741 ((PERF_COUNT_HW_CACHE_RESULT_MISS) << 16))) {
742 if (runtime_dtlb_cache_stats[ctx][cpu].n != 0)
743 print_dtlb_cache_misses(cpu, evsel, avg, out);
745 print_metric(ctxp, NULL, NULL, "of all dTLB cache hits", 0);
747 evsel->attr.type == PERF_TYPE_HW_CACHE &&
748 evsel->attr.config == ( PERF_COUNT_HW_CACHE_ITLB |
749 ((PERF_COUNT_HW_CACHE_OP_READ) << 8) |
750 ((PERF_COUNT_HW_CACHE_RESULT_MISS) << 16))) {
751 if (runtime_itlb_cache_stats[ctx][cpu].n != 0)
752 print_itlb_cache_misses(cpu, evsel, avg, out);
754 print_metric(ctxp, NULL, NULL, "of all iTLB cache hits", 0);
756 evsel->attr.type == PERF_TYPE_HW_CACHE &&
757 evsel->attr.config == ( PERF_COUNT_HW_CACHE_LL |
758 ((PERF_COUNT_HW_CACHE_OP_READ) << 8) |
759 ((PERF_COUNT_HW_CACHE_RESULT_MISS) << 16))) {
760 if (runtime_ll_cache_stats[ctx][cpu].n != 0)
761 print_ll_cache_misses(cpu, evsel, avg, out);
763 print_metric(ctxp, NULL, NULL, "of all LL-cache hits", 0);
764 } else if (perf_evsel__match(evsel, HARDWARE, HW_CACHE_MISSES)) {
765 total = avg_stats(&runtime_cacherefs_stats[ctx][cpu]);
768 ratio = avg * 100 / total;
770 if (runtime_cacherefs_stats[ctx][cpu].n != 0)
771 print_metric(ctxp, NULL, "%8.3f %%",
772 "of all cache refs", ratio);
774 print_metric(ctxp, NULL, NULL, "of all cache refs", 0);
775 } else if (perf_evsel__match(evsel, HARDWARE, HW_STALLED_CYCLES_FRONTEND)) {
776 print_stalled_cycles_frontend(cpu, evsel, avg, out);
777 } else if (perf_evsel__match(evsel, HARDWARE, HW_STALLED_CYCLES_BACKEND)) {
778 print_stalled_cycles_backend(cpu, evsel, avg, out);
779 } else if (perf_evsel__match(evsel, HARDWARE, HW_CPU_CYCLES)) {
780 total = avg_stats(&runtime_nsecs_stats[cpu]);
784 print_metric(ctxp, NULL, "%8.3f", "GHz", ratio);
786 print_metric(ctxp, NULL, NULL, "Ghz", 0);
788 } else if (perf_stat_evsel__is(evsel, CYCLES_IN_TX)) {
789 total = avg_stats(&runtime_cycles_stats[ctx][cpu]);
791 print_metric(ctxp, NULL,
792 "%7.2f%%", "transactional cycles",
793 100.0 * (avg / total));
795 print_metric(ctxp, NULL, NULL, "transactional cycles",
797 } else if (perf_stat_evsel__is(evsel, CYCLES_IN_TX_CP)) {
798 total = avg_stats(&runtime_cycles_stats[ctx][cpu]);
799 total2 = avg_stats(&runtime_cycles_in_tx_stats[ctx][cpu]);
803 print_metric(ctxp, NULL, "%7.2f%%", "aborted cycles",
804 100.0 * ((total2-avg) / total));
806 print_metric(ctxp, NULL, NULL, "aborted cycles", 0);
807 } else if (perf_stat_evsel__is(evsel, TRANSACTION_START)) {
808 total = avg_stats(&runtime_cycles_in_tx_stats[ctx][cpu]);
813 if (runtime_cycles_in_tx_stats[ctx][cpu].n != 0)
814 print_metric(ctxp, NULL, "%8.0f",
815 "cycles / transaction", ratio);
817 print_metric(ctxp, NULL, NULL, "cycles / transaction",
819 } else if (perf_stat_evsel__is(evsel, ELISION_START)) {
820 total = avg_stats(&runtime_cycles_in_tx_stats[ctx][cpu]);
825 print_metric(ctxp, NULL, "%8.0f", "cycles / elision", ratio);
826 } else if (perf_evsel__match(evsel, SOFTWARE, SW_TASK_CLOCK) ||
827 perf_evsel__match(evsel, SOFTWARE, SW_CPU_CLOCK)) {
828 if ((ratio = avg_stats(&walltime_nsecs_stats)) != 0)
829 print_metric(ctxp, NULL, "%8.3f", "CPUs utilized",
832 print_metric(ctxp, NULL, NULL, "CPUs utilized", 0);
833 } else if (perf_stat_evsel__is(evsel, TOPDOWN_FETCH_BUBBLES)) {
834 double fe_bound = td_fe_bound(ctx, cpu);
837 color = PERF_COLOR_RED;
838 print_metric(ctxp, color, "%8.1f%%", "frontend bound",
840 } else if (perf_stat_evsel__is(evsel, TOPDOWN_SLOTS_RETIRED)) {
841 double retiring = td_retiring(ctx, cpu);
844 color = PERF_COLOR_GREEN;
845 print_metric(ctxp, color, "%8.1f%%", "retiring",
847 } else if (perf_stat_evsel__is(evsel, TOPDOWN_RECOVERY_BUBBLES)) {
848 double bad_spec = td_bad_spec(ctx, cpu);
851 color = PERF_COLOR_RED;
852 print_metric(ctxp, color, "%8.1f%%", "bad speculation",
854 } else if (perf_stat_evsel__is(evsel, TOPDOWN_SLOTS_ISSUED)) {
855 double be_bound = td_be_bound(ctx, cpu);
856 const char *name = "backend bound";
857 static int have_recovery_bubbles = -1;
859 /* In case the CPU does not support topdown-recovery-bubbles */
860 if (have_recovery_bubbles < 0)
861 have_recovery_bubbles = pmu_have_event("cpu",
862 "topdown-recovery-bubbles");
863 if (!have_recovery_bubbles)
864 name = "backend bound/bad spec";
867 color = PERF_COLOR_RED;
868 if (td_total_slots(ctx, cpu) > 0)
869 print_metric(ctxp, color, "%8.1f%%", name,
872 print_metric(ctxp, NULL, NULL, name, 0);
873 } else if (evsel->metric_expr) {
874 generic_metric(evsel->metric_expr, evsel->metric_events, evsel->name,
875 evsel->metric_name, avg, cpu, out);
876 } else if (runtime_nsecs_stats[cpu].n != 0) {
880 total = avg_stats(&runtime_nsecs_stats[cpu]);
883 ratio = 1000.0 * avg / total;
888 snprintf(unit_buf, sizeof(unit_buf), "%c/sec", unit);
889 print_metric(ctxp, NULL, "%8.3f", unit_buf, ratio);
890 } else if (perf_stat_evsel__is(evsel, SMI_NUM)) {
891 print_smi_cost(cpu, evsel, out);
896 if ((me = metricgroup__lookup(metric_events, evsel, false)) != NULL) {
897 struct metric_expr *mexp;
899 list_for_each_entry (mexp, &me->head, nd) {
902 generic_metric(mexp->metric_expr, mexp->metric_events,
903 evsel->name, mexp->metric_name,
908 print_metric(ctxp, NULL, NULL, NULL, 0);