1 // SPDX-License-Identifier: GPL-2.0
10 #include "metricgroup.h"
12 #include <linux/zalloc.h>
15 * AGGR_GLOBAL: Use CPU 0
16 * AGGR_SOCKET: Use first CPU of socket
17 * AGGR_DIE: Use first CPU of die
18 * AGGR_CORE: Use first CPU of core
19 * AGGR_NONE: Use matching CPU
20 * AGGR_THREAD: Not supported?
23 struct runtime_stat rt_stat;
24 struct stats walltime_nsecs_stats;
27 struct rb_node rb_node;
33 struct runtime_stat *stat;
39 static int saved_value_cmp(struct rb_node *rb_node, const void *entry)
41 struct saved_value *a = container_of(rb_node,
44 const struct saved_value *b = entry;
47 return a->cpu - b->cpu;
50 * Previously the rbtree was used to link generic metrics.
51 * The keys were evsel/cpu. Now the rbtree is extended to support
52 * per-thread shadow stats. For shadow stats case, the keys
53 * are cpu/type/ctx/stat (evsel is NULL). For generic metrics
54 * case, the keys are still evsel/cpu (type/ctx/stat are 0 or NULL).
56 if (a->type != b->type)
57 return a->type - b->type;
60 return a->ctx - b->ctx;
62 if (a->cgrp != b->cgrp)
63 return (char *)a->cgrp < (char *)b->cgrp ? -1 : +1;
65 if (a->evsel == NULL && b->evsel == NULL) {
66 if (a->stat == b->stat)
69 if ((char *)a->stat < (char *)b->stat)
75 if (a->evsel == b->evsel)
77 if ((char *)a->evsel < (char *)b->evsel)
82 static struct rb_node *saved_value_new(struct rblist *rblist __maybe_unused,
85 struct saved_value *nd = malloc(sizeof(struct saved_value));
89 memcpy(nd, entry, sizeof(struct saved_value));
93 static void saved_value_delete(struct rblist *rblist __maybe_unused,
94 struct rb_node *rb_node)
96 struct saved_value *v;
99 v = container_of(rb_node, struct saved_value, rb_node);
103 static struct saved_value *saved_value_lookup(struct evsel *evsel,
108 struct runtime_stat *st,
111 struct rblist *rblist;
113 struct saved_value dm = {
122 rblist = &st->value_list;
124 /* don't use context info for clock events */
125 if (type == STAT_NSECS)
128 nd = rblist__find(rblist, &dm);
130 return container_of(nd, struct saved_value, rb_node);
132 rblist__add_node(rblist, &dm);
133 nd = rblist__find(rblist, &dm);
135 return container_of(nd, struct saved_value, rb_node);
140 void runtime_stat__init(struct runtime_stat *st)
142 struct rblist *rblist = &st->value_list;
144 rblist__init(rblist);
145 rblist->node_cmp = saved_value_cmp;
146 rblist->node_new = saved_value_new;
147 rblist->node_delete = saved_value_delete;
150 void runtime_stat__exit(struct runtime_stat *st)
152 rblist__exit(&st->value_list);
155 void perf_stat__init_shadow_stats(void)
157 runtime_stat__init(&rt_stat);
160 static int evsel_context(struct evsel *evsel)
164 if (evsel->core.attr.exclude_kernel)
165 ctx |= CTX_BIT_KERNEL;
166 if (evsel->core.attr.exclude_user)
168 if (evsel->core.attr.exclude_hv)
170 if (evsel->core.attr.exclude_host)
172 if (evsel->core.attr.exclude_idle)
178 static void reset_stat(struct runtime_stat *st)
180 struct rblist *rblist;
181 struct rb_node *pos, *next;
183 rblist = &st->value_list;
184 next = rb_first_cached(&rblist->entries);
188 memset(&container_of(pos, struct saved_value, rb_node)->stats,
190 sizeof(struct stats));
194 void perf_stat__reset_shadow_stats(void)
196 reset_stat(&rt_stat);
197 memset(&walltime_nsecs_stats, 0, sizeof(walltime_nsecs_stats));
200 void perf_stat__reset_shadow_per_stat(struct runtime_stat *st)
205 struct runtime_stat_data {
210 static void update_runtime_stat(struct runtime_stat *st,
213 struct runtime_stat_data *rsd)
215 struct saved_value *v = saved_value_lookup(NULL, cpu, true, type,
216 rsd->ctx, st, rsd->cgrp);
219 update_stats(&v->stats, count);
223 * Update various tracking values we maintain to print
224 * more semantic information such as miss/hit ratios,
225 * instruction rates, etc:
227 void perf_stat__update_shadow_stats(struct evsel *counter, u64 count,
228 int cpu, struct runtime_stat *st)
230 u64 count_ns = count;
231 struct saved_value *v;
232 struct runtime_stat_data rsd = {
233 .ctx = evsel_context(counter),
234 .cgrp = counter->cgrp,
237 count *= counter->scale;
239 if (evsel__is_clock(counter))
240 update_runtime_stat(st, STAT_NSECS, cpu, count_ns, &rsd);
241 else if (evsel__match(counter, HARDWARE, HW_CPU_CYCLES))
242 update_runtime_stat(st, STAT_CYCLES, cpu, count, &rsd);
243 else if (perf_stat_evsel__is(counter, CYCLES_IN_TX))
244 update_runtime_stat(st, STAT_CYCLES_IN_TX, cpu, count, &rsd);
245 else if (perf_stat_evsel__is(counter, TRANSACTION_START))
246 update_runtime_stat(st, STAT_TRANSACTION, cpu, count, &rsd);
247 else if (perf_stat_evsel__is(counter, ELISION_START))
248 update_runtime_stat(st, STAT_ELISION, cpu, count, &rsd);
249 else if (perf_stat_evsel__is(counter, TOPDOWN_TOTAL_SLOTS))
250 update_runtime_stat(st, STAT_TOPDOWN_TOTAL_SLOTS,
252 else if (perf_stat_evsel__is(counter, TOPDOWN_SLOTS_ISSUED))
253 update_runtime_stat(st, STAT_TOPDOWN_SLOTS_ISSUED,
255 else if (perf_stat_evsel__is(counter, TOPDOWN_SLOTS_RETIRED))
256 update_runtime_stat(st, STAT_TOPDOWN_SLOTS_RETIRED,
258 else if (perf_stat_evsel__is(counter, TOPDOWN_FETCH_BUBBLES))
259 update_runtime_stat(st, STAT_TOPDOWN_FETCH_BUBBLES,
261 else if (perf_stat_evsel__is(counter, TOPDOWN_RECOVERY_BUBBLES))
262 update_runtime_stat(st, STAT_TOPDOWN_RECOVERY_BUBBLES,
264 else if (perf_stat_evsel__is(counter, TOPDOWN_RETIRING))
265 update_runtime_stat(st, STAT_TOPDOWN_RETIRING,
267 else if (perf_stat_evsel__is(counter, TOPDOWN_BAD_SPEC))
268 update_runtime_stat(st, STAT_TOPDOWN_BAD_SPEC,
270 else if (perf_stat_evsel__is(counter, TOPDOWN_FE_BOUND))
271 update_runtime_stat(st, STAT_TOPDOWN_FE_BOUND,
273 else if (perf_stat_evsel__is(counter, TOPDOWN_BE_BOUND))
274 update_runtime_stat(st, STAT_TOPDOWN_BE_BOUND,
276 else if (evsel__match(counter, HARDWARE, HW_STALLED_CYCLES_FRONTEND))
277 update_runtime_stat(st, STAT_STALLED_CYCLES_FRONT,
279 else if (evsel__match(counter, HARDWARE, HW_STALLED_CYCLES_BACKEND))
280 update_runtime_stat(st, STAT_STALLED_CYCLES_BACK,
282 else if (evsel__match(counter, HARDWARE, HW_BRANCH_INSTRUCTIONS))
283 update_runtime_stat(st, STAT_BRANCHES, cpu, count, &rsd);
284 else if (evsel__match(counter, HARDWARE, HW_CACHE_REFERENCES))
285 update_runtime_stat(st, STAT_CACHEREFS, cpu, count, &rsd);
286 else if (evsel__match(counter, HW_CACHE, HW_CACHE_L1D))
287 update_runtime_stat(st, STAT_L1_DCACHE, cpu, count, &rsd);
288 else if (evsel__match(counter, HW_CACHE, HW_CACHE_L1I))
289 update_runtime_stat(st, STAT_L1_ICACHE, cpu, count, &rsd);
290 else if (evsel__match(counter, HW_CACHE, HW_CACHE_LL))
291 update_runtime_stat(st, STAT_LL_CACHE, cpu, count, &rsd);
292 else if (evsel__match(counter, HW_CACHE, HW_CACHE_DTLB))
293 update_runtime_stat(st, STAT_DTLB_CACHE, cpu, count, &rsd);
294 else if (evsel__match(counter, HW_CACHE, HW_CACHE_ITLB))
295 update_runtime_stat(st, STAT_ITLB_CACHE, cpu, count, &rsd);
296 else if (perf_stat_evsel__is(counter, SMI_NUM))
297 update_runtime_stat(st, STAT_SMI_NUM, cpu, count, &rsd);
298 else if (perf_stat_evsel__is(counter, APERF))
299 update_runtime_stat(st, STAT_APERF, cpu, count, &rsd);
301 if (counter->collect_stat) {
302 v = saved_value_lookup(counter, cpu, true, STAT_NONE, 0, st,
304 update_stats(&v->stats, count);
305 if (counter->metric_leader)
306 v->metric_total += count;
307 } else if (counter->metric_leader) {
308 v = saved_value_lookup(counter->metric_leader,
309 cpu, true, STAT_NONE, 0, st, rsd.cgrp);
310 v->metric_total += count;
315 /* used for get_ratio_color() */
317 GRC_STALLED_CYCLES_FE,
318 GRC_STALLED_CYCLES_BE,
323 static const char *get_ratio_color(enum grc_type type, double ratio)
325 static const double grc_table[GRC_MAX_NR][3] = {
326 [GRC_STALLED_CYCLES_FE] = { 50.0, 30.0, 10.0 },
327 [GRC_STALLED_CYCLES_BE] = { 75.0, 50.0, 20.0 },
328 [GRC_CACHE_MISSES] = { 20.0, 10.0, 5.0 },
330 const char *color = PERF_COLOR_NORMAL;
332 if (ratio > grc_table[type][0])
333 color = PERF_COLOR_RED;
334 else if (ratio > grc_table[type][1])
335 color = PERF_COLOR_MAGENTA;
336 else if (ratio > grc_table[type][2])
337 color = PERF_COLOR_YELLOW;
342 static struct evsel *perf_stat__find_event(struct evlist *evsel_list,
347 evlist__for_each_entry (evsel_list, c2) {
348 if (!strcasecmp(c2->name, name) && !c2->collect_stat)
354 /* Mark MetricExpr target events and link events using them to them. */
355 void perf_stat__collect_metric_expr(struct evlist *evsel_list)
357 struct evsel *counter, *leader, **metric_events, *oc;
359 struct expr_parse_ctx ctx;
360 struct hashmap_entry *cur;
364 expr__ctx_init(&ctx);
365 evlist__for_each_entry(evsel_list, counter) {
366 bool invalid = false;
368 leader = counter->leader;
369 if (!counter->metric_expr)
372 expr__ctx_clear(&ctx);
373 metric_events = counter->metric_events;
374 if (!metric_events) {
375 if (expr__find_other(counter->metric_expr,
380 metric_events = calloc(sizeof(struct evsel *),
381 hashmap__size(&ctx.ids) + 1);
382 if (!metric_events) {
383 expr__ctx_clear(&ctx);
386 counter->metric_events = metric_events;
390 hashmap__for_each_entry((&ctx.ids), cur, bkt) {
391 const char *metric_name = (const char *)cur->key;
395 /* Search in group */
396 for_each_group_member (oc, leader) {
397 if (!strcasecmp(oc->name,
406 /* Search ignoring groups */
407 oc = perf_stat__find_event(evsel_list,
411 /* Deduping one is good enough to handle duplicated PMUs. */
412 static char *printed;
415 * Adding events automatically would be difficult, because
416 * it would risk creating groups that are not schedulable.
417 * perf stat doesn't understand all the scheduling constraints
418 * of events. So we ask the user instead to add the missing
422 strcasecmp(printed, metric_name)) {
424 "Add %s event to groups to get metric expression for %s\n",
427 printed = strdup(metric_name);
432 metric_events[i++] = oc;
433 oc->collect_stat = true;
435 metric_events[i] = NULL;
438 counter->metric_events = NULL;
439 counter->metric_expr = NULL;
442 expr__ctx_clear(&ctx);
445 static double runtime_stat_avg(struct runtime_stat *st,
446 enum stat_type type, int cpu,
447 struct runtime_stat_data *rsd)
449 struct saved_value *v;
451 v = saved_value_lookup(NULL, cpu, false, type, rsd->ctx, st, rsd->cgrp);
455 return avg_stats(&v->stats);
458 static double runtime_stat_n(struct runtime_stat *st,
459 enum stat_type type, int cpu,
460 struct runtime_stat_data *rsd)
462 struct saved_value *v;
464 v = saved_value_lookup(NULL, cpu, false, type, rsd->ctx, st, rsd->cgrp);
471 static void print_stalled_cycles_frontend(struct perf_stat_config *config,
473 struct perf_stat_output_ctx *out,
474 struct runtime_stat *st,
475 struct runtime_stat_data *rsd)
477 double total, ratio = 0.0;
480 total = runtime_stat_avg(st, STAT_CYCLES, cpu, rsd);
483 ratio = avg / total * 100.0;
485 color = get_ratio_color(GRC_STALLED_CYCLES_FE, ratio);
488 out->print_metric(config, out->ctx, color, "%7.2f%%", "frontend cycles idle",
491 out->print_metric(config, out->ctx, NULL, NULL, "frontend cycles idle", 0);
494 static void print_stalled_cycles_backend(struct perf_stat_config *config,
496 struct perf_stat_output_ctx *out,
497 struct runtime_stat *st,
498 struct runtime_stat_data *rsd)
500 double total, ratio = 0.0;
503 total = runtime_stat_avg(st, STAT_CYCLES, cpu, rsd);
506 ratio = avg / total * 100.0;
508 color = get_ratio_color(GRC_STALLED_CYCLES_BE, ratio);
510 out->print_metric(config, out->ctx, color, "%7.2f%%", "backend cycles idle", ratio);
513 static void print_branch_misses(struct perf_stat_config *config,
515 struct perf_stat_output_ctx *out,
516 struct runtime_stat *st,
517 struct runtime_stat_data *rsd)
519 double total, ratio = 0.0;
522 total = runtime_stat_avg(st, STAT_BRANCHES, cpu, rsd);
525 ratio = avg / total * 100.0;
527 color = get_ratio_color(GRC_CACHE_MISSES, ratio);
529 out->print_metric(config, out->ctx, color, "%7.2f%%", "of all branches", ratio);
532 static void print_l1_dcache_misses(struct perf_stat_config *config,
534 struct perf_stat_output_ctx *out,
535 struct runtime_stat *st,
536 struct runtime_stat_data *rsd)
538 double total, ratio = 0.0;
541 total = runtime_stat_avg(st, STAT_L1_DCACHE, cpu, rsd);
544 ratio = avg / total * 100.0;
546 color = get_ratio_color(GRC_CACHE_MISSES, ratio);
548 out->print_metric(config, out->ctx, color, "%7.2f%%", "of all L1-dcache accesses", ratio);
551 static void print_l1_icache_misses(struct perf_stat_config *config,
553 struct perf_stat_output_ctx *out,
554 struct runtime_stat *st,
555 struct runtime_stat_data *rsd)
557 double total, ratio = 0.0;
560 total = runtime_stat_avg(st, STAT_L1_ICACHE, cpu, rsd);
563 ratio = avg / total * 100.0;
565 color = get_ratio_color(GRC_CACHE_MISSES, ratio);
566 out->print_metric(config, out->ctx, color, "%7.2f%%", "of all L1-icache accesses", ratio);
569 static void print_dtlb_cache_misses(struct perf_stat_config *config,
571 struct perf_stat_output_ctx *out,
572 struct runtime_stat *st,
573 struct runtime_stat_data *rsd)
575 double total, ratio = 0.0;
578 total = runtime_stat_avg(st, STAT_DTLB_CACHE, cpu, rsd);
581 ratio = avg / total * 100.0;
583 color = get_ratio_color(GRC_CACHE_MISSES, ratio);
584 out->print_metric(config, out->ctx, color, "%7.2f%%", "of all dTLB cache accesses", ratio);
587 static void print_itlb_cache_misses(struct perf_stat_config *config,
589 struct perf_stat_output_ctx *out,
590 struct runtime_stat *st,
591 struct runtime_stat_data *rsd)
593 double total, ratio = 0.0;
596 total = runtime_stat_avg(st, STAT_ITLB_CACHE, cpu, rsd);
599 ratio = avg / total * 100.0;
601 color = get_ratio_color(GRC_CACHE_MISSES, ratio);
602 out->print_metric(config, out->ctx, color, "%7.2f%%", "of all iTLB cache accesses", ratio);
605 static void print_ll_cache_misses(struct perf_stat_config *config,
607 struct perf_stat_output_ctx *out,
608 struct runtime_stat *st,
609 struct runtime_stat_data *rsd)
611 double total, ratio = 0.0;
614 total = runtime_stat_avg(st, STAT_LL_CACHE, cpu, rsd);
617 ratio = avg / total * 100.0;
619 color = get_ratio_color(GRC_CACHE_MISSES, ratio);
620 out->print_metric(config, out->ctx, color, "%7.2f%%", "of all LL-cache accesses", ratio);
624 * High level "TopDown" CPU core pipe line bottleneck break down.
626 * Basic concept following
627 * Yasin, A Top Down Method for Performance analysis and Counter architecture
630 * The CPU pipeline is divided into 4 areas that can be bottlenecks:
632 * Frontend -> Backend -> Retiring
633 * BadSpeculation in addition means out of order execution that is thrown away
634 * (for example branch mispredictions)
635 * Frontend is instruction decoding.
636 * Backend is execution, like computation and accessing data in memory
637 * Retiring is good execution that is not directly bottlenecked
639 * The formulas are computed in slots.
640 * A slot is an entry in the pipeline each for the pipeline width
641 * (for example a 4-wide pipeline has 4 slots for each cycle)
644 * BadSpeculation = ((SlotsIssued - SlotsRetired) + RecoveryBubbles) /
646 * Retiring = SlotsRetired / TotalSlots
647 * FrontendBound = FetchBubbles / TotalSlots
648 * BackendBound = 1.0 - BadSpeculation - Retiring - FrontendBound
650 * The kernel provides the mapping to the low level CPU events and any scaling
651 * needed for the CPU pipeline width, for example:
653 * TotalSlots = Cycles * 4
655 * The scaling factor is communicated in the sysfs unit.
657 * In some cases the CPU may not be able to measure all the formulas due to
658 * missing events. In this case multiple formulas are combined, as possible.
660 * Full TopDown supports more levels to sub-divide each area: for example
661 * BackendBound into computing bound and memory bound. For now we only
662 * support Level 1 TopDown.
665 static double sanitize_val(double x)
667 if (x < 0 && x >= -0.02)
672 static double td_total_slots(int cpu, struct runtime_stat *st,
673 struct runtime_stat_data *rsd)
675 return runtime_stat_avg(st, STAT_TOPDOWN_TOTAL_SLOTS, cpu, rsd);
678 static double td_bad_spec(int cpu, struct runtime_stat *st,
679 struct runtime_stat_data *rsd)
685 total = runtime_stat_avg(st, STAT_TOPDOWN_SLOTS_ISSUED, cpu, rsd) -
686 runtime_stat_avg(st, STAT_TOPDOWN_SLOTS_RETIRED, cpu, rsd) +
687 runtime_stat_avg(st, STAT_TOPDOWN_RECOVERY_BUBBLES, cpu, rsd);
689 total_slots = td_total_slots(cpu, st, rsd);
691 bad_spec = total / total_slots;
692 return sanitize_val(bad_spec);
695 static double td_retiring(int cpu, struct runtime_stat *st,
696 struct runtime_stat_data *rsd)
699 double total_slots = td_total_slots(cpu, st, rsd);
700 double ret_slots = runtime_stat_avg(st, STAT_TOPDOWN_SLOTS_RETIRED,
704 retiring = ret_slots / total_slots;
708 static double td_fe_bound(int cpu, struct runtime_stat *st,
709 struct runtime_stat_data *rsd)
712 double total_slots = td_total_slots(cpu, st, rsd);
713 double fetch_bub = runtime_stat_avg(st, STAT_TOPDOWN_FETCH_BUBBLES,
717 fe_bound = fetch_bub / total_slots;
721 static double td_be_bound(int cpu, struct runtime_stat *st,
722 struct runtime_stat_data *rsd)
724 double sum = (td_fe_bound(cpu, st, rsd) +
725 td_bad_spec(cpu, st, rsd) +
726 td_retiring(cpu, st, rsd));
729 return sanitize_val(1.0 - sum);
733 * Kernel reports metrics multiplied with slots. To get back
734 * the ratios we need to recreate the sum.
737 static double td_metric_ratio(int cpu, enum stat_type type,
738 struct runtime_stat *stat,
739 struct runtime_stat_data *rsd)
741 double sum = runtime_stat_avg(stat, STAT_TOPDOWN_RETIRING, cpu, rsd) +
742 runtime_stat_avg(stat, STAT_TOPDOWN_FE_BOUND, cpu, rsd) +
743 runtime_stat_avg(stat, STAT_TOPDOWN_BE_BOUND, cpu, rsd) +
744 runtime_stat_avg(stat, STAT_TOPDOWN_BAD_SPEC, cpu, rsd);
745 double d = runtime_stat_avg(stat, type, cpu, rsd);
753 * ... but only if most of the values are actually available.
754 * We allow two missing.
757 static bool full_td(int cpu, struct runtime_stat *stat,
758 struct runtime_stat_data *rsd)
762 if (runtime_stat_avg(stat, STAT_TOPDOWN_RETIRING, cpu, rsd) > 0)
764 if (runtime_stat_avg(stat, STAT_TOPDOWN_BE_BOUND, cpu, rsd) > 0)
766 if (runtime_stat_avg(stat, STAT_TOPDOWN_FE_BOUND, cpu, rsd) > 0)
768 if (runtime_stat_avg(stat, STAT_TOPDOWN_BAD_SPEC, cpu, rsd) > 0)
773 static void print_smi_cost(struct perf_stat_config *config, int cpu,
774 struct perf_stat_output_ctx *out,
775 struct runtime_stat *st,
776 struct runtime_stat_data *rsd)
778 double smi_num, aperf, cycles, cost = 0.0;
779 const char *color = NULL;
781 smi_num = runtime_stat_avg(st, STAT_SMI_NUM, cpu, rsd);
782 aperf = runtime_stat_avg(st, STAT_APERF, cpu, rsd);
783 cycles = runtime_stat_avg(st, STAT_CYCLES, cpu, rsd);
785 if ((cycles == 0) || (aperf == 0))
789 cost = (aperf - cycles) / aperf * 100.00;
792 color = PERF_COLOR_RED;
793 out->print_metric(config, out->ctx, color, "%8.1f%%", "SMI cycles%", cost);
794 out->print_metric(config, out->ctx, NULL, "%4.0f", "SMI#", smi_num);
797 static int prepare_metric(struct evsel **metric_events,
798 struct metric_ref *metric_refs,
799 struct expr_parse_ctx *pctx,
801 struct runtime_stat *st)
807 expr__ctx_init(pctx);
808 for (i = 0; metric_events[i]; i++) {
809 struct saved_value *v;
811 u64 metric_total = 0;
813 if (!strcmp(metric_events[i]->name, "duration_time")) {
814 stats = &walltime_nsecs_stats;
817 v = saved_value_lookup(metric_events[i], cpu, false,
819 metric_events[i]->cgrp);
826 metric_total = v->metric_total;
829 n = strdup(metric_events[i]->name);
833 * This display code with --no-merge adds [cpu] postfixes.
834 * These are not supported by the parser. Remove everything
842 expr__add_id_val(pctx, n, metric_total);
844 expr__add_id_val(pctx, n, avg_stats(stats)*scale);
847 for (j = 0; metric_refs && metric_refs[j].metric_name; j++) {
848 ret = expr__add_ref(pctx, &metric_refs[j]);
856 static void generic_metric(struct perf_stat_config *config,
857 const char *metric_expr,
858 struct evsel **metric_events,
859 struct metric_ref *metric_refs,
861 const char *metric_name,
862 const char *metric_unit,
865 struct perf_stat_output_ctx *out,
866 struct runtime_stat *st)
868 print_metric_t print_metric = out->print_metric;
869 struct expr_parse_ctx pctx;
872 void *ctxp = out->ctx;
874 i = prepare_metric(metric_events, metric_refs, &pctx, cpu, st);
878 if (!metric_events[i]) {
879 if (expr__parse(&ratio, &pctx, metric_expr, runtime) == 0) {
883 if (metric_unit && metric_name) {
884 if (perf_pmu__convert_scale(metric_unit,
885 &unit, &scale) >= 0) {
888 if (strstr(metric_expr, "?"))
889 scnprintf(metric_bf, sizeof(metric_bf),
890 "%s %s_%d", unit, metric_name, runtime);
892 scnprintf(metric_bf, sizeof(metric_bf),
893 "%s %s", unit, metric_name);
895 print_metric(config, ctxp, NULL, "%8.1f",
898 print_metric(config, ctxp, NULL, "%8.2f",
901 out->force_header ? name : "",
905 print_metric(config, ctxp, NULL, NULL,
907 (metric_name ? metric_name : name) : "", 0);
910 print_metric(config, ctxp, NULL, NULL,
912 (metric_name ? metric_name : name) : "", 0);
915 expr__ctx_clear(&pctx);
918 double test_generic_metric(struct metric_expr *mexp, int cpu, struct runtime_stat *st)
920 struct expr_parse_ctx pctx;
923 if (prepare_metric(mexp->metric_events, mexp->metric_refs, &pctx, cpu, st) < 0)
926 if (expr__parse(&ratio, &pctx, mexp->metric_expr, 1))
930 expr__ctx_clear(&pctx);
934 void perf_stat__print_shadow_stats(struct perf_stat_config *config,
937 struct perf_stat_output_ctx *out,
938 struct rblist *metric_events,
939 struct runtime_stat *st)
941 void *ctxp = out->ctx;
942 print_metric_t print_metric = out->print_metric;
943 double total, ratio = 0.0, total2;
944 const char *color = NULL;
945 struct runtime_stat_data rsd = {
946 .ctx = evsel_context(evsel),
949 struct metric_event *me;
952 if (evsel__match(evsel, HARDWARE, HW_INSTRUCTIONS)) {
953 total = runtime_stat_avg(st, STAT_CYCLES, cpu, &rsd);
957 print_metric(config, ctxp, NULL, "%7.2f ",
958 "insn per cycle", ratio);
960 print_metric(config, ctxp, NULL, NULL, "insn per cycle", 0);
963 total = runtime_stat_avg(st, STAT_STALLED_CYCLES_FRONT, cpu, &rsd);
965 total = max(total, runtime_stat_avg(st,
966 STAT_STALLED_CYCLES_BACK,
970 out->new_line(config, ctxp);
972 print_metric(config, ctxp, NULL, "%7.2f ",
973 "stalled cycles per insn",
976 } else if (evsel__match(evsel, HARDWARE, HW_BRANCH_MISSES)) {
977 if (runtime_stat_n(st, STAT_BRANCHES, cpu, &rsd) != 0)
978 print_branch_misses(config, cpu, avg, out, st, &rsd);
980 print_metric(config, ctxp, NULL, NULL, "of all branches", 0);
982 evsel->core.attr.type == PERF_TYPE_HW_CACHE &&
983 evsel->core.attr.config == ( PERF_COUNT_HW_CACHE_L1D |
984 ((PERF_COUNT_HW_CACHE_OP_READ) << 8) |
985 ((PERF_COUNT_HW_CACHE_RESULT_MISS) << 16))) {
987 if (runtime_stat_n(st, STAT_L1_DCACHE, cpu, &rsd) != 0)
988 print_l1_dcache_misses(config, cpu, avg, out, st, &rsd);
990 print_metric(config, ctxp, NULL, NULL, "of all L1-dcache accesses", 0);
992 evsel->core.attr.type == PERF_TYPE_HW_CACHE &&
993 evsel->core.attr.config == ( PERF_COUNT_HW_CACHE_L1I |
994 ((PERF_COUNT_HW_CACHE_OP_READ) << 8) |
995 ((PERF_COUNT_HW_CACHE_RESULT_MISS) << 16))) {
997 if (runtime_stat_n(st, STAT_L1_ICACHE, cpu, &rsd) != 0)
998 print_l1_icache_misses(config, cpu, avg, out, st, &rsd);
1000 print_metric(config, ctxp, NULL, NULL, "of all L1-icache accesses", 0);
1002 evsel->core.attr.type == PERF_TYPE_HW_CACHE &&
1003 evsel->core.attr.config == ( PERF_COUNT_HW_CACHE_DTLB |
1004 ((PERF_COUNT_HW_CACHE_OP_READ) << 8) |
1005 ((PERF_COUNT_HW_CACHE_RESULT_MISS) << 16))) {
1007 if (runtime_stat_n(st, STAT_DTLB_CACHE, cpu, &rsd) != 0)
1008 print_dtlb_cache_misses(config, cpu, avg, out, st, &rsd);
1010 print_metric(config, ctxp, NULL, NULL, "of all dTLB cache accesses", 0);
1012 evsel->core.attr.type == PERF_TYPE_HW_CACHE &&
1013 evsel->core.attr.config == ( PERF_COUNT_HW_CACHE_ITLB |
1014 ((PERF_COUNT_HW_CACHE_OP_READ) << 8) |
1015 ((PERF_COUNT_HW_CACHE_RESULT_MISS) << 16))) {
1017 if (runtime_stat_n(st, STAT_ITLB_CACHE, cpu, &rsd) != 0)
1018 print_itlb_cache_misses(config, cpu, avg, out, st, &rsd);
1020 print_metric(config, ctxp, NULL, NULL, "of all iTLB cache accesses", 0);
1022 evsel->core.attr.type == PERF_TYPE_HW_CACHE &&
1023 evsel->core.attr.config == ( PERF_COUNT_HW_CACHE_LL |
1024 ((PERF_COUNT_HW_CACHE_OP_READ) << 8) |
1025 ((PERF_COUNT_HW_CACHE_RESULT_MISS) << 16))) {
1027 if (runtime_stat_n(st, STAT_LL_CACHE, cpu, &rsd) != 0)
1028 print_ll_cache_misses(config, cpu, avg, out, st, &rsd);
1030 print_metric(config, ctxp, NULL, NULL, "of all LL-cache accesses", 0);
1031 } else if (evsel__match(evsel, HARDWARE, HW_CACHE_MISSES)) {
1032 total = runtime_stat_avg(st, STAT_CACHEREFS, cpu, &rsd);
1035 ratio = avg * 100 / total;
1037 if (runtime_stat_n(st, STAT_CACHEREFS, cpu, &rsd) != 0)
1038 print_metric(config, ctxp, NULL, "%8.3f %%",
1039 "of all cache refs", ratio);
1041 print_metric(config, ctxp, NULL, NULL, "of all cache refs", 0);
1042 } else if (evsel__match(evsel, HARDWARE, HW_STALLED_CYCLES_FRONTEND)) {
1043 print_stalled_cycles_frontend(config, cpu, avg, out, st, &rsd);
1044 } else if (evsel__match(evsel, HARDWARE, HW_STALLED_CYCLES_BACKEND)) {
1045 print_stalled_cycles_backend(config, cpu, avg, out, st, &rsd);
1046 } else if (evsel__match(evsel, HARDWARE, HW_CPU_CYCLES)) {
1047 total = runtime_stat_avg(st, STAT_NSECS, cpu, &rsd);
1050 ratio = avg / total;
1051 print_metric(config, ctxp, NULL, "%8.3f", "GHz", ratio);
1053 print_metric(config, ctxp, NULL, NULL, "Ghz", 0);
1055 } else if (perf_stat_evsel__is(evsel, CYCLES_IN_TX)) {
1056 total = runtime_stat_avg(st, STAT_CYCLES, cpu, &rsd);
1059 print_metric(config, ctxp, NULL,
1060 "%7.2f%%", "transactional cycles",
1061 100.0 * (avg / total));
1063 print_metric(config, ctxp, NULL, NULL, "transactional cycles",
1065 } else if (perf_stat_evsel__is(evsel, CYCLES_IN_TX_CP)) {
1066 total = runtime_stat_avg(st, STAT_CYCLES, cpu, &rsd);
1067 total2 = runtime_stat_avg(st, STAT_CYCLES_IN_TX, cpu, &rsd);
1072 print_metric(config, ctxp, NULL, "%7.2f%%", "aborted cycles",
1073 100.0 * ((total2-avg) / total));
1075 print_metric(config, ctxp, NULL, NULL, "aborted cycles", 0);
1076 } else if (perf_stat_evsel__is(evsel, TRANSACTION_START)) {
1077 total = runtime_stat_avg(st, STAT_CYCLES_IN_TX, cpu, &rsd);
1080 ratio = total / avg;
1082 if (runtime_stat_n(st, STAT_CYCLES_IN_TX, cpu, &rsd) != 0)
1083 print_metric(config, ctxp, NULL, "%8.0f",
1084 "cycles / transaction", ratio);
1086 print_metric(config, ctxp, NULL, NULL, "cycles / transaction",
1088 } else if (perf_stat_evsel__is(evsel, ELISION_START)) {
1089 total = runtime_stat_avg(st, STAT_CYCLES_IN_TX, cpu, &rsd);
1092 ratio = total / avg;
1094 print_metric(config, ctxp, NULL, "%8.0f", "cycles / elision", ratio);
1095 } else if (evsel__is_clock(evsel)) {
1096 if ((ratio = avg_stats(&walltime_nsecs_stats)) != 0)
1097 print_metric(config, ctxp, NULL, "%8.3f", "CPUs utilized",
1098 avg / (ratio * evsel->scale));
1100 print_metric(config, ctxp, NULL, NULL, "CPUs utilized", 0);
1101 } else if (perf_stat_evsel__is(evsel, TOPDOWN_FETCH_BUBBLES)) {
1102 double fe_bound = td_fe_bound(cpu, st, &rsd);
1105 color = PERF_COLOR_RED;
1106 print_metric(config, ctxp, color, "%8.1f%%", "frontend bound",
1108 } else if (perf_stat_evsel__is(evsel, TOPDOWN_SLOTS_RETIRED)) {
1109 double retiring = td_retiring(cpu, st, &rsd);
1112 color = PERF_COLOR_GREEN;
1113 print_metric(config, ctxp, color, "%8.1f%%", "retiring",
1115 } else if (perf_stat_evsel__is(evsel, TOPDOWN_RECOVERY_BUBBLES)) {
1116 double bad_spec = td_bad_spec(cpu, st, &rsd);
1119 color = PERF_COLOR_RED;
1120 print_metric(config, ctxp, color, "%8.1f%%", "bad speculation",
1122 } else if (perf_stat_evsel__is(evsel, TOPDOWN_SLOTS_ISSUED)) {
1123 double be_bound = td_be_bound(cpu, st, &rsd);
1124 const char *name = "backend bound";
1125 static int have_recovery_bubbles = -1;
1127 /* In case the CPU does not support topdown-recovery-bubbles */
1128 if (have_recovery_bubbles < 0)
1129 have_recovery_bubbles = pmu_have_event("cpu",
1130 "topdown-recovery-bubbles");
1131 if (!have_recovery_bubbles)
1132 name = "backend bound/bad spec";
1135 color = PERF_COLOR_RED;
1136 if (td_total_slots(cpu, st, &rsd) > 0)
1137 print_metric(config, ctxp, color, "%8.1f%%", name,
1140 print_metric(config, ctxp, NULL, NULL, name, 0);
1141 } else if (perf_stat_evsel__is(evsel, TOPDOWN_RETIRING) &&
1142 full_td(cpu, st, &rsd)) {
1143 double retiring = td_metric_ratio(cpu,
1144 STAT_TOPDOWN_RETIRING, st,
1147 color = PERF_COLOR_GREEN;
1148 print_metric(config, ctxp, color, "%8.1f%%", "retiring",
1150 } else if (perf_stat_evsel__is(evsel, TOPDOWN_FE_BOUND) &&
1151 full_td(cpu, st, &rsd)) {
1152 double fe_bound = td_metric_ratio(cpu,
1153 STAT_TOPDOWN_FE_BOUND, st,
1156 color = PERF_COLOR_RED;
1157 print_metric(config, ctxp, color, "%8.1f%%", "frontend bound",
1159 } else if (perf_stat_evsel__is(evsel, TOPDOWN_BE_BOUND) &&
1160 full_td(cpu, st, &rsd)) {
1161 double be_bound = td_metric_ratio(cpu,
1162 STAT_TOPDOWN_BE_BOUND, st,
1165 color = PERF_COLOR_RED;
1166 print_metric(config, ctxp, color, "%8.1f%%", "backend bound",
1168 } else if (perf_stat_evsel__is(evsel, TOPDOWN_BAD_SPEC) &&
1169 full_td(cpu, st, &rsd)) {
1170 double bad_spec = td_metric_ratio(cpu,
1171 STAT_TOPDOWN_BAD_SPEC, st,
1174 color = PERF_COLOR_RED;
1175 print_metric(config, ctxp, color, "%8.1f%%", "bad speculation",
1177 } else if (evsel->metric_expr) {
1178 generic_metric(config, evsel->metric_expr, evsel->metric_events, NULL,
1179 evsel->name, evsel->metric_name, NULL, 1, cpu, out, st);
1180 } else if (runtime_stat_n(st, STAT_NSECS, cpu, &rsd) != 0) {
1184 total = runtime_stat_avg(st, STAT_NSECS, cpu, &rsd);
1187 ratio = 1000.0 * avg / total;
1188 if (ratio < 0.001) {
1192 snprintf(unit_buf, sizeof(unit_buf), "%c/sec", unit);
1193 print_metric(config, ctxp, NULL, "%8.3f", unit_buf, ratio);
1194 } else if (perf_stat_evsel__is(evsel, SMI_NUM)) {
1195 print_smi_cost(config, cpu, out, st, &rsd);
1200 if ((me = metricgroup__lookup(metric_events, evsel, false)) != NULL) {
1201 struct metric_expr *mexp;
1203 list_for_each_entry (mexp, &me->head, nd) {
1205 out->new_line(config, ctxp);
1206 generic_metric(config, mexp->metric_expr, mexp->metric_events,
1207 mexp->metric_refs, evsel->name, mexp->metric_name,
1208 mexp->metric_unit, mexp->runtime, cpu, out, st);
1212 print_metric(config, ctxp, NULL, NULL, NULL, 0);