1 // SPDX-License-Identifier: GPL-2.0
15 #include "thread_map.h"
17 #include <linux/zalloc.h>
19 void update_stats(struct stats *stats, u64 val)
24 delta = val - stats->mean;
25 stats->mean += delta / stats->n;
26 stats->M2 += delta*(val - stats->mean);
35 double avg_stats(struct stats *stats)
41 * http://en.wikipedia.org/wiki/Algorithms_for_calculating_variance
43 * (\Sum n_i^2) - ((\Sum n_i)^2)/n
44 * s^2 = -------------------------------
47 * http://en.wikipedia.org/wiki/Stddev
49 * The std dev of the mean is related to the std dev by:
56 double stddev_stats(struct stats *stats)
58 double variance, variance_mean;
63 variance = stats->M2 / (stats->n - 1);
64 variance_mean = variance / stats->n;
66 return sqrt(variance_mean);
69 double rel_stddev_stats(double stddev, double avg)
74 pct = 100.0 * stddev/avg;
79 bool __perf_stat_evsel__is(struct evsel *evsel, enum perf_stat_evsel_id id)
81 struct perf_stat_evsel *ps = evsel->stats;
86 #define ID(id, name) [PERF_STAT_EVSEL_ID__##id] = #name
87 static const char *id_str[PERF_STAT_EVSEL_ID__MAX] = {
89 ID(CYCLES_IN_TX, cpu/cycles-t/),
90 ID(TRANSACTION_START, cpu/tx-start/),
91 ID(ELISION_START, cpu/el-start/),
92 ID(CYCLES_IN_TX_CP, cpu/cycles-ct/),
93 ID(TOPDOWN_TOTAL_SLOTS, topdown-total-slots),
94 ID(TOPDOWN_SLOTS_ISSUED, topdown-slots-issued),
95 ID(TOPDOWN_SLOTS_RETIRED, topdown-slots-retired),
96 ID(TOPDOWN_FETCH_BUBBLES, topdown-fetch-bubbles),
97 ID(TOPDOWN_RECOVERY_BUBBLES, topdown-recovery-bubbles),
98 ID(TOPDOWN_RETIRING, topdown-retiring),
99 ID(TOPDOWN_BAD_SPEC, topdown-bad-spec),
100 ID(TOPDOWN_FE_BOUND, topdown-fe-bound),
101 ID(TOPDOWN_BE_BOUND, topdown-be-bound),
102 ID(TOPDOWN_HEAVY_OPS, topdown-heavy-ops),
103 ID(TOPDOWN_BR_MISPREDICT, topdown-br-mispredict),
104 ID(TOPDOWN_FETCH_LAT, topdown-fetch-lat),
105 ID(TOPDOWN_MEM_BOUND, topdown-mem-bound),
106 ID(SMI_NUM, msr/smi/),
107 ID(APERF, msr/aperf/),
111 static void perf_stat_evsel_id_init(struct evsel *evsel)
113 struct perf_stat_evsel *ps = evsel->stats;
116 /* ps->id is 0 hence PERF_STAT_EVSEL_ID__NONE by default */
118 for (i = 0; i < PERF_STAT_EVSEL_ID__MAX; i++) {
119 if (!strcmp(evsel__name(evsel), id_str[i])) {
126 static void evsel__reset_stat_priv(struct evsel *evsel)
129 struct perf_stat_evsel *ps = evsel->stats;
131 for (i = 0; i < 3; i++)
132 init_stats(&ps->res_stats[i]);
134 perf_stat_evsel_id_init(evsel);
137 static int evsel__alloc_stat_priv(struct evsel *evsel)
139 evsel->stats = zalloc(sizeof(struct perf_stat_evsel));
140 if (evsel->stats == NULL)
142 evsel__reset_stat_priv(evsel);
146 static void evsel__free_stat_priv(struct evsel *evsel)
148 struct perf_stat_evsel *ps = evsel->stats;
151 zfree(&ps->group_data);
152 zfree(&evsel->stats);
155 static int evsel__alloc_prev_raw_counts(struct evsel *evsel, int ncpus, int nthreads)
157 struct perf_counts *counts;
159 counts = perf_counts__new(ncpus, nthreads);
161 evsel->prev_raw_counts = counts;
163 return counts ? 0 : -ENOMEM;
166 static void evsel__free_prev_raw_counts(struct evsel *evsel)
168 perf_counts__delete(evsel->prev_raw_counts);
169 evsel->prev_raw_counts = NULL;
172 static void evsel__reset_prev_raw_counts(struct evsel *evsel)
174 if (evsel->prev_raw_counts)
175 perf_counts__reset(evsel->prev_raw_counts);
178 static int evsel__alloc_stats(struct evsel *evsel, bool alloc_raw)
180 int ncpus = evsel__nr_cpus(evsel);
181 int nthreads = perf_thread_map__nr(evsel->core.threads);
183 if (evsel__alloc_stat_priv(evsel) < 0 ||
184 evsel__alloc_counts(evsel, ncpus, nthreads) < 0 ||
185 (alloc_raw && evsel__alloc_prev_raw_counts(evsel, ncpus, nthreads) < 0))
191 int evlist__alloc_stats(struct evlist *evlist, bool alloc_raw)
195 evlist__for_each_entry(evlist, evsel) {
196 if (evsel__alloc_stats(evsel, alloc_raw))
203 evlist__free_stats(evlist);
207 void evlist__free_stats(struct evlist *evlist)
211 evlist__for_each_entry(evlist, evsel) {
212 evsel__free_stat_priv(evsel);
213 evsel__free_counts(evsel);
214 evsel__free_prev_raw_counts(evsel);
218 void evlist__reset_stats(struct evlist *evlist)
222 evlist__for_each_entry(evlist, evsel) {
223 evsel__reset_stat_priv(evsel);
224 evsel__reset_counts(evsel);
228 void evlist__reset_prev_raw_counts(struct evlist *evlist)
232 evlist__for_each_entry(evlist, evsel)
233 evsel__reset_prev_raw_counts(evsel);
236 static void evsel__copy_prev_raw_counts(struct evsel *evsel)
238 int ncpus = evsel__nr_cpus(evsel);
239 int nthreads = perf_thread_map__nr(evsel->core.threads);
241 for (int thread = 0; thread < nthreads; thread++) {
242 for (int cpu = 0; cpu < ncpus; cpu++) {
243 *perf_counts(evsel->counts, cpu, thread) =
244 *perf_counts(evsel->prev_raw_counts, cpu,
249 evsel->counts->aggr = evsel->prev_raw_counts->aggr;
252 void evlist__copy_prev_raw_counts(struct evlist *evlist)
256 evlist__for_each_entry(evlist, evsel)
257 evsel__copy_prev_raw_counts(evsel);
260 void evlist__save_aggr_prev_raw_counts(struct evlist *evlist)
265 * To collect the overall statistics for interval mode,
266 * we copy the counts from evsel->prev_raw_counts to
267 * evsel->counts. The perf_stat_process_counter creates
268 * aggr values from per cpu values, but the per cpu values
269 * are 0 for AGGR_GLOBAL. So we use a trick that saves the
270 * previous aggr value to the first member of perf_counts,
271 * then aggr calculation in process_counter_values can work
274 evlist__for_each_entry(evlist, evsel) {
275 *perf_counts(evsel->prev_raw_counts, 0, 0) =
276 evsel->prev_raw_counts->aggr;
280 static size_t pkg_id_hash(const void *__key, void *ctx __maybe_unused)
282 uint64_t *key = (uint64_t *) __key;
284 return *key & 0xffffffff;
287 static bool pkg_id_equal(const void *__key1, const void *__key2,
288 void *ctx __maybe_unused)
290 uint64_t *key1 = (uint64_t *) __key1;
291 uint64_t *key2 = (uint64_t *) __key2;
293 return *key1 == *key2;
296 static int check_per_pkg(struct evsel *counter,
297 struct perf_counts_values *vals, int cpu, bool *skip)
299 struct hashmap *mask = counter->per_pkg_mask;
300 struct perf_cpu_map *cpus = evsel__cpus(counter);
306 if (!counter->per_pkg)
309 if (perf_cpu_map__empty(cpus))
313 mask = hashmap__new(pkg_id_hash, pkg_id_equal, NULL);
317 counter->per_pkg_mask = mask;
321 * we do not consider an event that has not run as a good
322 * instance to mark a package as used (skip=1). Otherwise
323 * we may run into a situation where the first CPU in a package
324 * is not running anything, yet the second is, and this function
325 * would mark the package as used after the first CPU and would
326 * not read the values from the second CPU.
328 if (!(vals->run && vals->ena))
331 s = cpu_map__get_socket(cpus, cpu, NULL).socket;
336 * On multi-die system, die_id > 0. On no-die system, die_id = 0.
337 * We use hashmap(socket, die) to check the used socket+die pair.
339 d = cpu_map__get_die(cpus, cpu, NULL).die;
343 key = malloc(sizeof(*key));
347 *key = (uint64_t)d << 32 | s;
348 if (hashmap__find(mask, (void *)key, NULL))
351 ret = hashmap__add(mask, (void *)key, (void *)1);
357 process_counter_values(struct perf_stat_config *config, struct evsel *evsel,
359 struct perf_counts_values *count)
361 struct perf_counts_values *aggr = &evsel->counts->aggr;
362 static struct perf_counts_values zero;
365 if (check_per_pkg(evsel, count, cpu, &skip)) {
366 pr_err("failed to read per-pkg counter\n");
373 switch (config->aggr_mode) {
380 if (!evsel->snapshot)
381 evsel__compute_deltas(evsel, cpu, thread, count);
382 perf_counts_values__scale(count, config->scale, NULL);
383 if ((config->aggr_mode == AGGR_NONE) && (!evsel->percore)) {
384 perf_stat__update_shadow_stats(evsel, count->val,
388 if (config->aggr_mode == AGGR_THREAD) {
390 perf_stat__update_shadow_stats(evsel,
391 count->val, 0, &config->stats[thread]);
393 perf_stat__update_shadow_stats(evsel,
394 count->val, 0, &rt_stat);
398 aggr->val += count->val;
399 aggr->ena += count->ena;
400 aggr->run += count->run;
409 static int process_counter_maps(struct perf_stat_config *config,
410 struct evsel *counter)
412 int nthreads = perf_thread_map__nr(counter->core.threads);
413 int ncpus = evsel__nr_cpus(counter);
416 if (counter->core.system_wide)
419 for (thread = 0; thread < nthreads; thread++) {
420 for (cpu = 0; cpu < ncpus; cpu++) {
421 if (process_counter_values(config, counter, cpu, thread,
422 perf_counts(counter->counts, cpu, thread)))
430 int perf_stat_process_counter(struct perf_stat_config *config,
431 struct evsel *counter)
433 struct perf_counts_values *aggr = &counter->counts->aggr;
434 struct perf_stat_evsel *ps = counter->stats;
435 u64 *count = counter->counts->aggr.values;
438 aggr->val = aggr->ena = aggr->run = 0;
440 if (counter->per_pkg)
441 evsel__zero_per_pkg(counter);
443 ret = process_counter_maps(config, counter);
447 if (config->aggr_mode != AGGR_GLOBAL)
450 if (!counter->snapshot)
451 evsel__compute_deltas(counter, -1, -1, aggr);
452 perf_counts_values__scale(aggr, config->scale, &counter->counts->scaled);
454 for (i = 0; i < 3; i++)
455 update_stats(&ps->res_stats[i], count[i]);
458 fprintf(config->output, "%s: %" PRIu64 " %" PRIu64 " %" PRIu64 "\n",
459 evsel__name(counter), count[0], count[1], count[2]);
463 * Save the full runtime - to allow normalization during printout:
465 perf_stat__update_shadow_stats(counter, *count, 0, &rt_stat);
470 int perf_event__process_stat_event(struct perf_session *session,
471 union perf_event *event)
473 struct perf_counts_values count;
474 struct perf_record_stat *st = &event->stat;
475 struct evsel *counter;
481 counter = evlist__id2evsel(session->evlist, st->id);
483 pr_err("Failed to resolve counter for stat event.\n");
487 *perf_counts(counter->counts, st->cpu, st->thread) = count;
488 counter->supported = true;
492 size_t perf_event__fprintf_stat(union perf_event *event, FILE *fp)
494 struct perf_record_stat *st = (struct perf_record_stat *)event;
497 ret = fprintf(fp, "\n... id %" PRI_lu64 ", cpu %d, thread %d\n",
498 st->id, st->cpu, st->thread);
499 ret += fprintf(fp, "... value %" PRI_lu64 ", enabled %" PRI_lu64 ", running %" PRI_lu64 "\n",
500 st->val, st->ena, st->run);
505 size_t perf_event__fprintf_stat_round(union perf_event *event, FILE *fp)
507 struct perf_record_stat_round *rd = (struct perf_record_stat_round *)event;
510 ret = fprintf(fp, "\n... time %" PRI_lu64 ", type %s\n", rd->time,
511 rd->type == PERF_STAT_ROUND_TYPE__FINAL ? "FINAL" : "INTERVAL");
516 size_t perf_event__fprintf_stat_config(union perf_event *event, FILE *fp)
518 struct perf_stat_config sc;
521 perf_event__read_stat_config(&sc, &event->stat_config);
523 ret = fprintf(fp, "\n");
524 ret += fprintf(fp, "... aggr_mode %d\n", sc.aggr_mode);
525 ret += fprintf(fp, "... scale %d\n", sc.scale);
526 ret += fprintf(fp, "... interval %u\n", sc.interval);
531 int create_perf_stat_counter(struct evsel *evsel,
532 struct perf_stat_config *config,
533 struct target *target,
536 struct perf_event_attr *attr = &evsel->core.attr;
537 struct evsel *leader = evsel->leader;
539 attr->read_format = PERF_FORMAT_TOTAL_TIME_ENABLED |
540 PERF_FORMAT_TOTAL_TIME_RUNNING;
543 * The event is part of non trivial group, let's enable
544 * the group read (for leader) and ID retrieval for all
547 if (leader->core.nr_members > 1)
548 attr->read_format |= PERF_FORMAT_ID|PERF_FORMAT_GROUP;
550 attr->inherit = !config->no_inherit && list_empty(&evsel->bpf_counter_list);
553 * Some events get initialized with sample_(period/type) set,
554 * like tracepoints. Clear it up for counting.
556 attr->sample_period = 0;
558 if (config->identifier)
559 attr->sample_type = PERF_SAMPLE_IDENTIFIER;
561 if (config->all_user) {
562 attr->exclude_kernel = 1;
563 attr->exclude_user = 0;
566 if (config->all_kernel) {
567 attr->exclude_kernel = 0;
568 attr->exclude_user = 1;
572 * Disabling all counters initially, they will be enabled
573 * either manually by us or by kernel via enable_on_exec
576 if (evsel__is_group_leader(evsel)) {
580 * In case of initial_delay we enable tracee
583 if (target__none(target) && !config->initial_delay)
584 attr->enable_on_exec = 1;
587 if (target__has_cpu(target) && !target__has_per_thread(target))
588 return evsel__open_per_cpu(evsel, evsel__cpus(evsel), cpu);
590 return evsel__open_per_thread(evsel, evsel->core.threads);