1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (c) 2017, Intel Corporation.
6 /* Manage metrics and groups of metrics from JSON files */
8 #include "metricgroup.h"
20 #include <linux/ctype.h>
21 #include <linux/string.h>
22 #include <linux/zalloc.h>
23 #include <subcmd/parse-options.h>
24 #include <api/fs/fs.h>
29 struct metric_event *metricgroup__lookup(struct rblist *metric_events,
34 struct metric_event me = {
41 nd = rblist__find(metric_events, &me);
43 return container_of(nd, struct metric_event, nd);
45 rblist__add_node(metric_events, &me);
46 nd = rblist__find(metric_events, &me);
48 return container_of(nd, struct metric_event, nd);
53 static int metric_event_cmp(struct rb_node *rb_node, const void *entry)
55 struct metric_event *a = container_of(rb_node,
58 const struct metric_event *b = entry;
60 if (a->evsel == b->evsel)
62 if ((char *)a->evsel < (char *)b->evsel)
67 static struct rb_node *metric_event_new(struct rblist *rblist __maybe_unused,
70 struct metric_event *me = malloc(sizeof(struct metric_event));
74 memcpy(me, entry, sizeof(struct metric_event));
75 me->evsel = ((struct metric_event *)entry)->evsel;
76 INIT_LIST_HEAD(&me->head);
80 static void metric_event_delete(struct rblist *rblist __maybe_unused,
81 struct rb_node *rb_node)
83 struct metric_event *me = container_of(rb_node, struct metric_event, nd);
84 struct metric_expr *expr, *tmp;
86 list_for_each_entry_safe(expr, tmp, &me->head, nd) {
87 free(expr->metric_refs);
88 free(expr->metric_events);
95 static void metricgroup__rblist_init(struct rblist *metric_events)
97 rblist__init(metric_events);
98 metric_events->node_cmp = metric_event_cmp;
99 metric_events->node_new = metric_event_new;
100 metric_events->node_delete = metric_event_delete;
103 void metricgroup__rblist_exit(struct rblist *metric_events)
105 rblist__exit(metric_events);
109 * A node in the list of referenced metrics. metric_expr
110 * is held as a convenience to avoid a search through the
113 struct metric_ref_node {
114 const char *metric_name;
115 const char *metric_expr;
116 struct list_head list;
121 struct expr_parse_ctx pctx;
122 const char *metric_name;
123 const char *metric_expr;
124 const char *metric_unit;
125 struct list_head metric_refs;
131 #define RECURSION_ID_MAX 1000
134 struct expr_id id[RECURSION_ID_MAX];
138 static struct expr_id *expr_ids__alloc(struct expr_ids *ids)
140 if (ids->cnt >= RECURSION_ID_MAX)
142 return &ids->id[ids->cnt++];
145 static void expr_ids__exit(struct expr_ids *ids)
149 for (i = 0; i < ids->cnt; i++)
153 static bool contains_event(struct evsel **metric_events, int num_events,
154 const char *event_name)
158 for (i = 0; i < num_events; i++) {
159 if (!strcmp(metric_events[i]->name, event_name))
165 static bool evsel_same_pmu(struct evsel *ev1, struct evsel *ev2)
167 if (!ev1->pmu_name || !ev2->pmu_name)
170 return !strcmp(ev1->pmu_name, ev2->pmu_name);
174 * Find a group of events in perf_evlist that correspond to those from a parsed
175 * metric expression. Note, as find_evsel_group is called in the same order as
176 * perf_evlist was constructed, metric_no_merge doesn't need to test for
177 * underfilling a group.
178 * @perf_evlist: a list of events something like: {metric1 leader, metric1
179 * sibling, metric1 sibling}:W,duration_time,{metric2 leader, metric2 sibling,
180 * metric2 sibling}:W,duration_time
181 * @pctx: the parse context for the metric expression.
182 * @metric_no_merge: don't attempt to share events for the metric with other
184 * @has_constraint: is there a constraint on the group of events? In which case
185 * the events won't be grouped.
186 * @metric_events: out argument, null terminated array of evsel's associated
188 * @evlist_used: in/out argument, bitmap tracking which evlist events are used.
189 * @return the first metric event or NULL on failure.
191 static struct evsel *find_evsel_group(struct evlist *perf_evlist,
192 struct expr_parse_ctx *pctx,
193 bool metric_no_merge,
195 struct evsel **metric_events,
196 unsigned long *evlist_used)
198 struct evsel *ev, *current_leader = NULL;
199 struct expr_id_data *val_ptr;
200 int i = 0, matched_events = 0, events_to_match;
201 const int idnum = (int)hashmap__size(&pctx->ids);
204 * duration_time is always grouped separately, when events are grouped
205 * (ie has_constraint is false) then ignore it in the matching loop and
206 * add it to metric_events at the end.
208 if (!has_constraint &&
209 hashmap__find(&pctx->ids, "duration_time", (void **)&val_ptr))
210 events_to_match = idnum - 1;
212 events_to_match = idnum;
214 evlist__for_each_entry (perf_evlist, ev) {
216 * Events with a constraint aren't grouped and match the first
219 if (has_constraint && ev->weak_group)
221 /* Ignore event if already used and merging is disabled. */
222 if (metric_no_merge && test_bit(ev->idx, evlist_used))
224 if (!has_constraint && ev->leader != current_leader) {
226 * Start of a new group, discard the whole match and
230 memset(metric_events, 0,
231 sizeof(struct evsel *) * idnum);
232 current_leader = ev->leader;
235 * Check for duplicate events with the same name. For example,
236 * uncore_imc/cas_count_read/ will turn into 6 events per socket
237 * on skylakex. Only the first such event is placed in
238 * metric_events. If events aren't grouped then this also
239 * ensures that the same event in different sibling groups
240 * aren't both added to metric_events.
242 if (contains_event(metric_events, matched_events, ev->name))
244 /* Does this event belong to the parse context? */
245 if (hashmap__find(&pctx->ids, ev->name, (void **)&val_ptr))
246 metric_events[matched_events++] = ev;
248 if (matched_events == events_to_match)
252 if (events_to_match != idnum) {
253 /* Add the first duration_time. */
254 evlist__for_each_entry(perf_evlist, ev) {
255 if (!strcmp(ev->name, "duration_time")) {
256 metric_events[matched_events++] = ev;
262 if (matched_events != idnum) {
263 /* Not a whole match */
267 metric_events[idnum] = NULL;
269 for (i = 0; i < idnum; i++) {
270 ev = metric_events[i];
271 /* Don't free the used events. */
272 set_bit(ev->idx, evlist_used);
274 * The metric leader points to the identically named event in
277 ev->metric_leader = ev;
279 * Mark two events with identical names in the same group (or
280 * globally) as being in use as uncore events may be duplicated
281 * for each pmu. Set the metric leader of such events to be the
282 * event that appears in metric_events.
284 evlist__for_each_entry_continue(perf_evlist, ev) {
286 * If events are grouped then the search can terminate
287 * when then group is left.
289 if (!has_constraint &&
290 ev->leader != metric_events[i]->leader &&
291 evsel_same_pmu(ev->leader, metric_events[i]->leader))
293 if (!strcmp(metric_events[i]->name, ev->name)) {
294 set_bit(ev->idx, evlist_used);
295 ev->metric_leader = metric_events[i];
300 return metric_events[0];
303 static int metricgroup__setup_events(struct list_head *groups,
304 bool metric_no_merge,
305 struct evlist *perf_evlist,
306 struct rblist *metric_events_list)
308 struct metric_event *me;
309 struct metric_expr *expr;
313 struct evsel *evsel, *tmp;
314 unsigned long *evlist_used;
316 evlist_used = bitmap_alloc(perf_evlist->core.nr_entries);
320 list_for_each_entry (m, groups, nd) {
321 struct evsel **metric_events;
322 struct metric_ref *metric_refs = NULL;
324 metric_events = calloc(sizeof(void *),
325 hashmap__size(&m->pctx.ids) + 1);
326 if (!metric_events) {
330 evsel = find_evsel_group(perf_evlist, &m->pctx,
332 m->has_constraint, metric_events,
335 pr_debug("Cannot resolve %s: %s\n",
336 m->metric_name, m->metric_expr);
340 for (i = 0; metric_events[i]; i++)
341 metric_events[i]->collect_stat = true;
342 me = metricgroup__lookup(metric_events_list, evsel, true);
348 expr = malloc(sizeof(struct metric_expr));
356 * Collect and store collected nested expressions
357 * for metric processing.
359 if (m->metric_refs_cnt) {
360 struct metric_ref_node *ref;
362 metric_refs = zalloc(sizeof(struct metric_ref) * (m->metric_refs_cnt + 1));
371 list_for_each_entry(ref, &m->metric_refs, list) {
373 * Intentionally passing just const char pointers,
374 * originally from 'struct pmu_event' object.
375 * We don't need to change them, so there's no
376 * need to create our own copy.
378 metric_refs[i].metric_name = ref->metric_name;
379 metric_refs[i].metric_expr = ref->metric_expr;
384 expr->metric_refs = metric_refs;
385 expr->metric_expr = m->metric_expr;
386 expr->metric_name = m->metric_name;
387 expr->metric_unit = m->metric_unit;
388 expr->metric_events = metric_events;
389 expr->runtime = m->runtime;
390 list_add(&expr->nd, &me->head);
393 evlist__for_each_entry_safe(perf_evlist, tmp, evsel) {
394 if (!test_bit(evsel->idx, evlist_used)) {
395 evlist__remove(perf_evlist, evsel);
396 evsel__delete(evsel);
399 bitmap_free(evlist_used);
404 static bool match_metric(const char *n, const char *list)
411 if (!strcmp(list, "all"))
414 return !strcasecmp(list, "No_group");
416 m = strcasestr(n, list);
419 if ((m == n || m[-1] == ';' || m[-1] == ' ') &&
420 (m[len] == 0 || m[len] == ';'))
425 static bool match_pe_metric(struct pmu_event *pe, const char *metric)
427 return match_metric(pe->metric_group, metric) ||
428 match_metric(pe->metric_name, metric);
434 struct strlist *metrics;
437 static int mep_cmp(struct rb_node *rb_node, const void *entry)
439 struct mep *a = container_of(rb_node, struct mep, nd);
440 struct mep *b = (struct mep *)entry;
442 return strcmp(a->name, b->name);
445 static struct rb_node *mep_new(struct rblist *rl __maybe_unused,
448 struct mep *me = malloc(sizeof(struct mep));
452 memcpy(me, entry, sizeof(struct mep));
453 me->name = strdup(me->name);
456 me->metrics = strlist__new(NULL, NULL);
467 static struct mep *mep_lookup(struct rblist *groups, const char *name)
473 nd = rblist__find(groups, &me);
475 return container_of(nd, struct mep, nd);
476 rblist__add_node(groups, &me);
477 nd = rblist__find(groups, &me);
479 return container_of(nd, struct mep, nd);
483 static void mep_delete(struct rblist *rl __maybe_unused,
486 struct mep *me = container_of(nd, struct mep, nd);
488 strlist__delete(me->metrics);
493 static void metricgroup__print_strlist(struct strlist *metrics, bool raw)
498 strlist__for_each_entry (sn, metrics) {
500 printf("%s%s", n > 0 ? " " : "", sn->s);
502 printf(" %s\n", sn->s);
509 static int metricgroup__print_pmu_event(struct pmu_event *pe,
510 bool metricgroups, char *filter,
511 bool raw, bool details,
512 struct rblist *groups,
513 struct strlist *metriclist)
518 g = pe->metric_group;
519 if (!g && pe->metric_name) {
533 while ((g = strsep(&mg, ";")) != NULL) {
540 if (filter && !strstr(g, filter))
543 s = (char *)pe->metric_name;
545 if (asprintf(&s, "%s\n%*s%s]",
546 pe->metric_name, 8, "[", pe->desc) < 0)
549 if (asprintf(&s, "%s\n%*s%s]",
550 s, 8, "[", pe->metric_expr) < 0)
559 strlist__add(metriclist, s);
561 me = mep_lookup(groups, g);
564 strlist__add(me->metrics, s);
575 struct metricgroup_print_sys_idata {
576 struct strlist *metriclist;
578 struct rblist *groups;
584 typedef int (*metricgroup_sys_event_iter_fn)(struct pmu_event *pe, void *);
586 struct metricgroup_iter_data {
587 metricgroup_sys_event_iter_fn fn;
591 static int metricgroup__sys_event_iter(struct pmu_event *pe, void *data)
593 struct metricgroup_iter_data *d = data;
594 struct perf_pmu *pmu = NULL;
596 if (!pe->metric_expr || !pe->compat)
599 while ((pmu = perf_pmu__scan(pmu))) {
601 if (!pmu->id || strcmp(pmu->id, pe->compat))
604 return d->fn(pe, d->data);
610 static int metricgroup__print_sys_event_iter(struct pmu_event *pe, void *data)
612 struct metricgroup_print_sys_idata *d = data;
614 return metricgroup__print_pmu_event(pe, d->metricgroups, d->filter, d->raw,
615 d->details, d->groups, d->metriclist);
618 void metricgroup__print(bool metrics, bool metricgroups, char *filter,
619 bool raw, bool details)
621 struct pmu_events_map *map = pmu_events_map__find();
622 struct pmu_event *pe;
624 struct rblist groups;
625 struct rb_node *node, *next;
626 struct strlist *metriclist = NULL;
629 metriclist = strlist__new(NULL, NULL);
634 rblist__init(&groups);
635 groups.node_new = mep_new;
636 groups.node_cmp = mep_cmp;
637 groups.node_delete = mep_delete;
638 for (i = 0; map; i++) {
641 if (!pe->name && !pe->metric_group && !pe->metric_name)
643 if (!pe->metric_expr)
645 if (metricgroup__print_pmu_event(pe, metricgroups, filter,
646 raw, details, &groups,
652 struct metricgroup_iter_data data = {
653 .fn = metricgroup__print_sys_event_iter,
654 .data = (void *) &(struct metricgroup_print_sys_idata){
655 .metriclist = metriclist,
656 .metricgroups = metricgroups,
664 pmu_for_each_sys_event(metricgroup__sys_event_iter, &data);
667 if (!filter || !rblist__empty(&groups)) {
668 if (metricgroups && !raw)
669 printf("\nMetric Groups:\n\n");
670 else if (metrics && !raw)
671 printf("\nMetrics:\n\n");
674 for (node = rb_first_cached(&groups.entries); node; node = next) {
675 struct mep *me = container_of(node, struct mep, nd);
678 printf("%s%s%s", me->name, metrics && !raw ? ":" : "", raw ? " " : "\n");
680 metricgroup__print_strlist(me->metrics, raw);
681 next = rb_next(node);
682 rblist__remove_node(&groups, node);
685 metricgroup__print_strlist(metriclist, raw);
686 strlist__delete(metriclist);
689 static void metricgroup__add_metric_weak_group(struct strbuf *events,
690 struct expr_parse_ctx *ctx)
692 struct hashmap_entry *cur;
694 bool no_group = true, has_duration = false;
696 hashmap__for_each_entry((&ctx->ids), cur, bkt) {
697 pr_debug("found event %s\n", (const char *)cur->key);
699 * Duration time maps to a software event and can make
700 * groups not count. Always use it outside a
703 if (!strcmp(cur->key, "duration_time")) {
707 strbuf_addf(events, "%s%s",
708 no_group ? "{" : ",",
709 (const char *)cur->key);
713 strbuf_addf(events, "}:W");
715 strbuf_addf(events, ",duration_time");
716 } else if (has_duration)
717 strbuf_addf(events, "duration_time");
720 static void metricgroup__add_metric_non_group(struct strbuf *events,
721 struct expr_parse_ctx *ctx)
723 struct hashmap_entry *cur;
727 hashmap__for_each_entry((&ctx->ids), cur, bkt) {
729 strbuf_addf(events, ",");
730 strbuf_addf(events, "%s", (const char *)cur->key);
735 static void metricgroup___watchdog_constraint_hint(const char *name, bool foot)
737 static bool violate_nmi_constraint;
740 pr_warning("Splitting metric group %s into standalone metrics.\n", name);
741 violate_nmi_constraint = true;
745 if (!violate_nmi_constraint)
748 pr_warning("Try disabling the NMI watchdog to comply NO_NMI_WATCHDOG metric constraint:\n"
749 " echo 0 > /proc/sys/kernel/nmi_watchdog\n"
751 " echo 1 > /proc/sys/kernel/nmi_watchdog\n");
754 static bool metricgroup__has_constraint(struct pmu_event *pe)
756 if (!pe->metric_constraint)
759 if (!strcmp(pe->metric_constraint, "NO_NMI_WATCHDOG") &&
760 sysctl__nmi_watchdog_enabled()) {
761 metricgroup___watchdog_constraint_hint(pe->metric_name, false);
768 int __weak arch_get_runtimeparam(struct pmu_event *pe __maybe_unused)
773 struct metricgroup_add_iter_data {
774 struct list_head *metric_list;
776 struct expr_ids *ids;
779 bool metric_no_group;
782 static int __add_metric(struct list_head *metric_list,
783 struct pmu_event *pe,
784 bool metric_no_group,
787 struct expr_id *parent,
788 struct expr_ids *ids)
790 struct metric_ref_node *ref;
795 * We got in here for the parent group,
796 * allocate it and put it on the list.
798 m = zalloc(sizeof(*m));
802 expr__ctx_init(&m->pctx);
803 m->metric_name = pe->metric_name;
804 m->metric_expr = pe->metric_expr;
805 m->metric_unit = pe->unit;
806 m->runtime = runtime;
807 m->has_constraint = metric_no_group || metricgroup__has_constraint(pe);
808 INIT_LIST_HEAD(&m->metric_refs);
809 m->metric_refs_cnt = 0;
811 parent = expr_ids__alloc(ids);
817 parent->id = strdup(pe->metric_name);
825 * We got here for the referenced metric, via the
826 * recursive metricgroup__add_metric call, add
827 * it to the parent group.
831 ref = malloc(sizeof(*ref));
836 * Intentionally passing just const char pointers,
837 * from 'pe' object, so they never go away. We don't
838 * need to change them, so there's no need to create
841 ref->metric_name = pe->metric_name;
842 ref->metric_expr = pe->metric_expr;
844 list_add(&ref->list, &m->metric_refs);
845 m->metric_refs_cnt++;
848 /* Force all found IDs in metric to have us as parent ID. */
849 WARN_ON_ONCE(!parent);
850 m->pctx.parent = parent;
853 * For both the parent and referenced metrics, we parse
854 * all the metric's IDs and add it to the parent context.
856 if (expr__find_other(pe->metric_expr, NULL, &m->pctx, runtime) < 0) {
857 if (m->metric_refs_cnt == 0) {
858 expr__ctx_clear(&m->pctx);
866 * We add new group only in the 'parent' call,
867 * so bail out for referenced metric case.
869 if (m->metric_refs_cnt)
872 if (list_empty(metric_list))
873 list_add(&m->nd, metric_list);
875 struct list_head *pos;
877 /* Place the largest groups at the front. */
878 list_for_each_prev(pos, metric_list) {
879 struct metric *old = list_entry(pos, struct metric, nd);
881 if (hashmap__size(&m->pctx.ids) <=
882 hashmap__size(&old->pctx.ids))
885 list_add(&m->nd, pos);
891 #define map_for_each_event(__pe, __idx, __map) \
893 for (__idx = 0, __pe = &__map->table[__idx]; \
894 __pe->name || __pe->metric_group || __pe->metric_name; \
895 __pe = &__map->table[++__idx])
897 #define map_for_each_metric(__pe, __idx, __map, __metric) \
898 map_for_each_event(__pe, __idx, __map) \
899 if (__pe->metric_expr && \
900 (match_metric(__pe->metric_group, __metric) || \
901 match_metric(__pe->metric_name, __metric)))
903 struct pmu_event *metricgroup__find_metric(const char *metric,
904 struct pmu_events_map *map)
906 struct pmu_event *pe;
909 map_for_each_event(pe, i, map) {
910 if (match_metric(pe->metric_name, metric))
917 static int recursion_check(struct metric *m, const char *id, struct expr_id **parent,
918 struct expr_ids *ids)
920 struct expr_id_data *data;
925 * We get the parent referenced by 'id' argument and
926 * traverse through all the parent object IDs to check
927 * if we already processed 'id', if we did, it's recursion
930 ret = expr__get_id(&m->pctx, id, &data);
934 p = expr_id_data__parent(data);
937 if (!strcmp(p->id, id)) {
938 pr_err("failed: recursion detected for %s\n", id);
945 * If we are over the limit of static entris, the metric
946 * is too difficult/nested to process, fail as well.
948 p = expr_ids__alloc(ids);
950 pr_err("failed: too many nested metrics\n");
955 p->parent = expr_id_data__parent(data);
958 return p->id ? 0 : -ENOMEM;
961 static int add_metric(struct list_head *metric_list,
962 struct pmu_event *pe,
963 bool metric_no_group,
965 struct expr_id *parent,
966 struct expr_ids *ids);
968 static int __resolve_metric(struct metric *m,
969 bool metric_no_group,
970 struct list_head *metric_list,
971 struct pmu_events_map *map,
972 struct expr_ids *ids)
974 struct hashmap_entry *cur;
980 * Iterate all the parsed IDs and if there's metric,
981 * add it to the context.
985 hashmap__for_each_entry((&m->pctx.ids), cur, bkt) {
986 struct expr_id *parent;
987 struct pmu_event *pe;
989 pe = metricgroup__find_metric(cur->key, map);
993 ret = recursion_check(m, cur->key, &parent, ids);
998 /* The metric key itself needs to go out.. */
999 expr__del_id(&m->pctx, cur->key);
1001 /* ... and it gets resolved to the parent context. */
1002 ret = add_metric(metric_list, pe, metric_no_group, &m, parent, ids);
1007 * We added new metric to hashmap, so we need
1008 * to break the iteration and start over.
1017 static int resolve_metric(bool metric_no_group,
1018 struct list_head *metric_list,
1019 struct pmu_events_map *map,
1020 struct expr_ids *ids)
1025 list_for_each_entry(m, metric_list, nd) {
1026 err = __resolve_metric(m, metric_no_group, metric_list, map, ids);
1033 static int add_metric(struct list_head *metric_list,
1034 struct pmu_event *pe,
1035 bool metric_no_group,
1037 struct expr_id *parent,
1038 struct expr_ids *ids)
1040 struct metric *orig = *m;
1043 pr_debug("metric expr %s for %s\n", pe->metric_expr, pe->metric_name);
1045 if (!strstr(pe->metric_expr, "?")) {
1046 ret = __add_metric(metric_list, pe, metric_no_group, 1, m, parent, ids);
1050 count = arch_get_runtimeparam(pe);
1052 /* This loop is added to create multiple
1053 * events depend on count value and add
1054 * those events to metric_list.
1057 for (j = 0; j < count && !ret; j++, *m = orig)
1058 ret = __add_metric(metric_list, pe, metric_no_group, j, m, parent, ids);
1064 static int metricgroup__add_metric_sys_event_iter(struct pmu_event *pe,
1067 struct metricgroup_add_iter_data *d = data;
1068 struct metric *m = NULL;
1071 if (!match_pe_metric(pe, d->metric))
1074 ret = add_metric(d->metric_list, pe, d->metric_no_group, &m, NULL, d->ids);
1078 ret = resolve_metric(d->metric_no_group,
1079 d->metric_list, NULL, d->ids);
1083 *(d->has_match) = true;
1088 static int metricgroup__add_metric(const char *metric, bool metric_no_group,
1089 struct strbuf *events,
1090 struct list_head *metric_list,
1091 struct pmu_events_map *map)
1093 struct expr_ids ids = { .cnt = 0, };
1094 struct pmu_event *pe;
1098 bool has_match = false;
1100 map_for_each_metric(pe, i, map, metric) {
1104 ret = add_metric(&list, pe, metric_no_group, &m, NULL, &ids);
1109 * Process any possible referenced metrics
1110 * included in the expression.
1112 ret = resolve_metric(metric_no_group,
1119 struct metricgroup_iter_data data = {
1120 .fn = metricgroup__add_metric_sys_event_iter,
1121 .data = (void *) &(struct metricgroup_add_iter_data) {
1122 .metric_list = &list,
1124 .metric_no_group = metric_no_group,
1126 .has_match = &has_match,
1131 pmu_for_each_sys_event(metricgroup__sys_event_iter, &data);
1133 /* End of pmu events. */
1139 list_for_each_entry(m, &list, nd) {
1140 if (events->len > 0)
1141 strbuf_addf(events, ",");
1143 if (m->has_constraint) {
1144 metricgroup__add_metric_non_group(events,
1147 metricgroup__add_metric_weak_group(events,
1154 * add to metric_list so that they can be released
1155 * even if it's failed
1157 list_splice(&list, metric_list);
1158 expr_ids__exit(&ids);
1162 static int metricgroup__add_metric_list(const char *list, bool metric_no_group,
1163 struct strbuf *events,
1164 struct list_head *metric_list,
1165 struct pmu_events_map *map)
1167 char *llist, *nlist, *p;
1170 nlist = strdup(list);
1175 strbuf_init(events, 100);
1176 strbuf_addf(events, "%s", "");
1178 while ((p = strsep(&llist, ",")) != NULL) {
1179 ret = metricgroup__add_metric(p, metric_no_group, events,
1181 if (ret == -EINVAL) {
1182 fprintf(stderr, "Cannot find metric or group `%s'\n",
1190 metricgroup___watchdog_constraint_hint(NULL, true);
1195 static void metric__free_refs(struct metric *metric)
1197 struct metric_ref_node *ref, *tmp;
1199 list_for_each_entry_safe(ref, tmp, &metric->metric_refs, list) {
1200 list_del(&ref->list);
1205 static void metricgroup__free_metrics(struct list_head *metric_list)
1207 struct metric *m, *tmp;
1209 list_for_each_entry_safe (m, tmp, metric_list, nd) {
1210 metric__free_refs(m);
1211 expr__ctx_clear(&m->pctx);
1212 list_del_init(&m->nd);
1217 static int parse_groups(struct evlist *perf_evlist, const char *str,
1218 bool metric_no_group,
1219 bool metric_no_merge,
1220 struct perf_pmu *fake_pmu,
1221 struct rblist *metric_events,
1222 struct pmu_events_map *map)
1224 struct parse_events_error parse_error;
1225 struct strbuf extra_events;
1226 LIST_HEAD(metric_list);
1229 if (metric_events->nr_entries == 0)
1230 metricgroup__rblist_init(metric_events);
1231 ret = metricgroup__add_metric_list(str, metric_no_group,
1232 &extra_events, &metric_list, map);
1235 pr_debug("adding %s\n", extra_events.buf);
1236 bzero(&parse_error, sizeof(parse_error));
1237 ret = __parse_events(perf_evlist, extra_events.buf, &parse_error, fake_pmu);
1239 parse_events_print_error(&parse_error, extra_events.buf);
1242 ret = metricgroup__setup_events(&metric_list, metric_no_merge,
1243 perf_evlist, metric_events);
1245 metricgroup__free_metrics(&metric_list);
1246 strbuf_release(&extra_events);
1250 int metricgroup__parse_groups(const struct option *opt,
1252 bool metric_no_group,
1253 bool metric_no_merge,
1254 struct rblist *metric_events)
1256 struct evlist *perf_evlist = *(struct evlist **)opt->value;
1257 struct pmu_events_map *map = pmu_events_map__find();
1259 return parse_groups(perf_evlist, str, metric_no_group,
1260 metric_no_merge, NULL, metric_events, map);
1263 int metricgroup__parse_groups_test(struct evlist *evlist,
1264 struct pmu_events_map *map,
1266 bool metric_no_group,
1267 bool metric_no_merge,
1268 struct rblist *metric_events)
1270 return parse_groups(evlist, str, metric_no_group,
1271 metric_no_merge, &perf_pmu__fake, metric_events, map);
1274 bool metricgroup__has_metric(const char *metric)
1276 struct pmu_events_map *map = pmu_events_map__find();
1277 struct pmu_event *pe;
1283 for (i = 0; ; i++) {
1284 pe = &map->table[i];
1286 if (!pe->name && !pe->metric_group && !pe->metric_name)
1288 if (!pe->metric_expr)
1290 if (match_metric(pe->metric_name, metric))
1296 int metricgroup__copy_metric_events(struct evlist *evlist, struct cgroup *cgrp,
1297 struct rblist *new_metric_events,
1298 struct rblist *old_metric_events)
1302 for (i = 0; i < rblist__nr_entries(old_metric_events); i++) {
1304 struct metric_event *old_me, *new_me;
1305 struct metric_expr *old_expr, *new_expr;
1306 struct evsel *evsel;
1310 nd = rblist__entry(old_metric_events, i);
1311 old_me = container_of(nd, struct metric_event, nd);
1313 evsel = evlist__find_evsel(evlist, old_me->evsel->idx);
1316 new_me = metricgroup__lookup(new_metric_events, evsel, true);
1320 pr_debug("copying metric event for cgroup '%s': %s (idx=%d)\n",
1321 cgrp ? cgrp->name : "root", evsel->name, evsel->idx);
1323 list_for_each_entry(old_expr, &old_me->head, nd) {
1324 new_expr = malloc(sizeof(*new_expr));
1328 new_expr->metric_expr = old_expr->metric_expr;
1329 new_expr->metric_name = old_expr->metric_name;
1330 new_expr->metric_unit = old_expr->metric_unit;
1331 new_expr->runtime = old_expr->runtime;
1333 if (old_expr->metric_refs) {
1334 /* calculate number of metric_events */
1335 for (nr = 0; old_expr->metric_refs[nr].metric_name; nr++)
1337 alloc_size = sizeof(*new_expr->metric_refs);
1338 new_expr->metric_refs = calloc(nr + 1, alloc_size);
1339 if (!new_expr->metric_refs) {
1344 memcpy(new_expr->metric_refs, old_expr->metric_refs,
1347 new_expr->metric_refs = NULL;
1350 /* calculate number of metric_events */
1351 for (nr = 0; old_expr->metric_events[nr]; nr++)
1353 alloc_size = sizeof(*new_expr->metric_events);
1354 new_expr->metric_events = calloc(nr + 1, alloc_size);
1355 if (!new_expr->metric_events) {
1356 free(new_expr->metric_refs);
1361 /* copy evsel in the same position */
1362 for (idx = 0; idx < nr; idx++) {
1363 evsel = old_expr->metric_events[idx];
1364 evsel = evlist__find_evsel(evlist, evsel->idx);
1365 if (evsel == NULL) {
1366 free(new_expr->metric_events);
1367 free(new_expr->metric_refs);
1371 new_expr->metric_events[idx] = evsel;
1374 list_add(&new_expr->nd, &new_me->head);